aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_lpss.c2
-rw-r--r--drivers/acpi/bus.c13
-rw-r--r--drivers/android/binder_alloc.c43
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/pata_ftide010.c27
-rw-r--r--drivers/base/firmware_loader/main.c30
-rw-r--r--drivers/base/memory.c20
-rw-r--r--drivers/base/power/clock_ops.c2
-rw-r--r--drivers/block/nbd.c3
-rw-r--r--drivers/block/null_blk.h17
-rw-r--r--drivers/block/null_blk_main.c45
-rw-r--r--drivers/block/null_blk_zoned.c34
-rw-r--r--drivers/block/rbd.c235
-rw-r--r--drivers/block/xen-blkback/blkback.c99
-rw-r--r--drivers/block/xen-blkback/common.h14
-rw-r--r--drivers/block/xen-blkfront.c110
-rw-r--r--drivers/bluetooth/Kconfig1
-rw-r--r--drivers/bluetooth/btmtkuart.c8
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bus/ti-sysc.c37
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/Kconfig4
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c92
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c53
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c17
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c32
-rw-r--r--drivers/char/ipmi/kcs_bmc.c7
-rw-r--r--drivers/char/random.c11
-rw-r--r--drivers/clk/clk-npcm7xx.c4
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c18
-rw-r--r--drivers/clk/x86/clk-st.c2
-rw-r--r--drivers/cpuidle/governors/menu.c13
-rw-r--r--drivers/crypto/caam/caamalg_qi.c6
-rw-r--r--drivers/crypto/caam/caampkc.c20
-rw-r--r--drivers/crypto/caam/jr.c3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_dev.h3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c57
-rw-r--r--drivers/crypto/chelsio/chtls/chtls.h5
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c7
-rw-r--r--drivers/crypto/vmx/aes_cbc.c30
-rw-r--r--drivers/crypto/vmx/aes_xts.c21
-rw-r--r--drivers/dax/device.c3
-rw-r--r--drivers/dma/mic_x100_dma.c4
-rw-r--r--drivers/firmware/arm_scmi/perf.c8
-rw-r--r--drivers/fpga/dfl-fme-pr.c2
-rw-r--r--drivers/gpio/gpio-adp5588.c24
-rw-r--r--drivers/gpio/gpio-dwapb.c1
-rw-r--r--drivers/gpio/gpiolib-acpi.c86
-rw-r--r--drivers/gpio/gpiolib-of.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c33
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c33
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c20
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c37
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c4
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c3
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c17
-rw-r--r--drivers/gpu/drm/i915/intel_display.c15
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c33
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c228
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c92
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c47
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c18
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c27
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c67
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c110
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c57
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c2
-rw-r--r--drivers/hid/hid-apple.c9
-rw-r--r--drivers/hid/hid-core.c5
-rw-r--r--drivers/hid/hid-ids.h6
-rw-r--r--drivers/hid/hid-input.c5
-rw-r--r--drivers/hid/hid-multitouch.c19
-rw-r--r--drivers/hid/hid-saitek.c2
-rw-r--r--drivers/hid/hid-sensor-hub.c23
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c11
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hv/vmbus_drv.c3
-rw-r--r--drivers/hwmon/adt7475.c25
-rw-r--r--drivers/hwmon/ina2xx.c13
-rw-r--r--drivers/hwmon/nct6775.c2
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c1
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c55
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c1
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c7
-rw-r--r--drivers/i2c/busses/i2c-i801.c16
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c1
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c15
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c7
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c7
-rw-r--r--drivers/i2c/busses/i2c-xiic.c4
-rw-r--r--drivers/i2c/i2c-core-base.c11
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c13
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c1
-rw-r--r--drivers/infiniband/core/cma.c12
-rw-r--r--drivers/infiniband/core/rdma_core.c2
-rw-r--r--drivers/infiniband/core/ucma.c6
-rw-r--r--drivers/infiniband/core/uverbs_main.c5
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c6
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c11
-rw-r--r--drivers/infiniband/hw/mlx4/main.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c4
-rw-r--r--drivers/md/dm-crypt.c10
-rw-r--r--drivers/md/dm-integrity.c4
-rw-r--r--drivers/md/dm-raid.c154
-rw-r--r--drivers/md/dm-thin-metadata.c36
-rw-r--r--drivers/md/dm-thin.c73
-rw-r--r--drivers/md/dm-verity-target.c24
-rw-r--r--drivers/md/md-cluster.c10
-rw-r--r--drivers/md/raid10.c5
-rw-r--r--drivers/md/raid5-log.h5
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/memory/ti-aemif.c2
-rw-r--r--drivers/misc/hmc6352.c2
-rw-r--r--drivers/misc/ibmvmc.c2
-rw-r--r--drivers/misc/mei/bus.c12
-rw-r--r--drivers/misc/mei/client.c2
-rw-r--r--drivers/misc/mei/hbm.c9
-rw-r--r--drivers/mmc/core/queue.c12
-rw-r--r--drivers/mmc/core/queue.h1
-rw-r--r--drivers/mmc/host/android-goldfish.c4
-rw-r--r--drivers/mmc/host/atmel-mci.c12
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c8
-rw-r--r--drivers/mmc/host/omap_hsmmc.c1
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c10
-rw-r--r--drivers/mtd/nand/raw/denali.c5
-rw-r--r--drivers/mtd/nand/raw/docg4.c4
-rw-r--r--drivers/net/appletalk/ipddp.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c24
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h8
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c82
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c31
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c10
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c46
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c36
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c44
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c29
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c129
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h6
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c12
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c115
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h16
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c36
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ethernet/lantiq_etop.c1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c20
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c48
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c187
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h27
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c23
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k.c76
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c110
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h5
-rw-r--r--drivers/net/ethernet/realtek/r8169.c56
-rw-r--r--drivers/net/ethernet/renesas/Kconfig1
-rw-r--r--drivers/net/ethernet/renesas/Makefile1
-rw-r--r--drivers/net/ethernet/renesas/ravb.h5
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c5
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c6
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c49
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c5
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c9
-rw-r--r--drivers/net/hyperv/netvsc.c3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c75
-rw-r--r--drivers/net/phy/sfp.c20
-rw-r--r--drivers/net/ppp/pppoe.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c44
-rw-r--r--drivers/net/usb/r8152.c4
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c50
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c12
-rw-r--r--drivers/net/xen-netfront.c32
-rw-r--r--drivers/nvme/host/pci.c8
-rw-r--r--drivers/nvme/target/core.c4
-rw-r--r--drivers/nvme/target/fcloop.c3
-rw-r--r--drivers/nvme/target/rdma.c27
-rw-r--r--drivers/of/base.c50
-rw-r--r--drivers/of/platform.c4
-rw-r--r--drivers/pci/controller/pci-hyperv.c37
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c18
-rw-r--r--drivers/pci/pci.c3
-rw-r--r--drivers/pci/probe.c5
-rw-r--r--drivers/pci/quirks.c6
-rw-r--r--drivers/pci/switch/switchtec.c4
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c2
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c24
-rw-r--r--drivers/s390/crypto/ap_bus.c86
-rw-r--r--drivers/s390/net/qeth_core_main.c11
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c71
-rw-r--r--drivers/scsi/csiostor/csio_hw.h1
-rw-r--r--drivers/scsi/csiostor/csio_mb.c6
-rw-r--r--drivers/scsi/hosts.c24
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c8
-rw-r--r--drivers/scsi/qedi/qedi.h7
-rw-r--r--drivers/scsi/qedi/qedi_main.c28
-rw-r--r--drivers/scsi/scsi_lib.c21
-rw-r--r--drivers/spi/spi-fsl-dspi.c6
-rw-r--r--drivers/spi/spi.c13
-rw-r--r--drivers/staging/erofs/Kconfig2
-rw-r--r--drivers/staging/erofs/super.c4
-rw-r--r--drivers/staging/fbtft/TODO4
-rw-r--r--drivers/staging/gasket/TODO13
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.c7
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c5
-rw-r--r--drivers/staging/wilc1000/Makefile3
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c6
-rw-r--r--drivers/staging/wilc1000/wilc_debugfs.c7
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c6
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c8
-rw-r--r--drivers/target/iscsi/iscsi_target.c9
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c149
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h2
-rw-r--r--drivers/thermal/of-thermal.c7
-rw-r--r--drivers/thermal/qoriq_thermal.c27
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c11
-rw-r--r--drivers/thermal/rcar_thermal.c16
-rw-r--r--drivers/tty/hvc/hvc_console.c38
-rw-r--r--drivers/usb/class/cdc-acm.c73
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/common/common.c25
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/message.c11
-rw-r--r--drivers/usb/core/of.c26
-rw-r--r--drivers/usb/core/quirks.c7
-rw-r--r--drivers/usb/dwc2/platform.c4
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c10
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c1
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c15
-rw-r--r--drivers/usb/gadget/udc/net2280.c16
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c5
-rw-r--r--drivers/usb/host/u132-hcd.c2
-rw-r--r--drivers/usb/host/xhci-mem.c4
-rw-r--r--drivers/usb/host/xhci-plat.c27
-rw-r--r--drivers/usb/host/xhci.c30
-rw-r--r--drivers/usb/misc/uss720.c4
-rw-r--r--drivers/usb/misc/yurex.c8
-rw-r--r--drivers/usb/mtu3/mtu3_core.c6
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h1
-rw-r--r--drivers/usb/serial/io_ti.h2
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/storage/scsiglue.c9
-rw-r--r--drivers/usb/storage/uas.c21
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/typec/bus.c7
-rw-r--r--drivers/usb/typec/class.c1
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--drivers/xen/Kconfig10
-rw-r--r--drivers/xen/cpu_hotplug.c15
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/gntdev.c26
-rw-r--r--drivers/xen/manage.c6
-rw-r--r--drivers/xen/mem-reservation.c4
-rw-r--r--drivers/xen/xen-balloon.c3
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c9
383 files changed, 4336 insertions, 2581 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 9706613eecf9..bf64cfa30feb 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -879,7 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev)
879#define LPSS_GPIODEF0_DMA_LLP BIT(13) 879#define LPSS_GPIODEF0_DMA_LLP BIT(13)
880 880
881static DEFINE_MUTEX(lpss_iosf_mutex); 881static DEFINE_MUTEX(lpss_iosf_mutex);
882static bool lpss_iosf_d3_entered; 882static bool lpss_iosf_d3_entered = true;
883 883
884static void lpss_iosf_enter_d3_state(void) 884static void lpss_iosf_enter_d3_state(void)
885{ 885{
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 292088fcc624..d2e29a19890d 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -35,11 +35,11 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#ifdef CONFIG_X86 36#ifdef CONFIG_X86
37#include <asm/mpspec.h> 37#include <asm/mpspec.h>
38#include <linux/dmi.h>
38#endif 39#endif
39#include <linux/acpi_iort.h> 40#include <linux/acpi_iort.h>
40#include <linux/pci.h> 41#include <linux/pci.h>
41#include <acpi/apei.h> 42#include <acpi/apei.h>
42#include <linux/dmi.h>
43#include <linux/suspend.h> 43#include <linux/suspend.h>
44 44
45#include "internal.h" 45#include "internal.h"
@@ -82,10 +82,6 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
82 }, 82 },
83 {} 83 {}
84}; 84};
85#else
86static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
87 {}
88};
89#endif 85#endif
90 86
91/* -------------------------------------------------------------------------- 87/* --------------------------------------------------------------------------
@@ -1033,11 +1029,16 @@ void __init acpi_early_init(void)
1033 1029
1034 acpi_permanent_mmap = true; 1030 acpi_permanent_mmap = true;
1035 1031
1032#ifdef CONFIG_X86
1036 /* 1033 /*
1037 * If the machine falls into the DMI check table, 1034 * If the machine falls into the DMI check table,
1038 * DSDT will be copied to memory 1035 * DSDT will be copied to memory.
1036 * Note that calling dmi_check_system() here on other architectures
1037 * would not be OK because only x86 initializes dmi early enough.
1038 * Thankfully only x86 systems need such quirks for now.
1039 */ 1039 */
1040 dmi_check_system(dsdt_dmi_table); 1040 dmi_check_system(dsdt_dmi_table);
1041#endif
1041 1042
1042 status = acpi_reallocate_root_table(); 1043 status = acpi_reallocate_root_table();
1043 if (ACPI_FAILURE(status)) { 1044 if (ACPI_FAILURE(status)) {
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 3f3b7b253445..64fd96eada31 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -332,6 +332,35 @@ err_no_vma:
332 return vma ? -ENOMEM : -ESRCH; 332 return vma ? -ENOMEM : -ESRCH;
333} 333}
334 334
335
336static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
337 struct vm_area_struct *vma)
338{
339 if (vma)
340 alloc->vma_vm_mm = vma->vm_mm;
341 /*
342 * If we see alloc->vma is not NULL, buffer data structures set up
343 * completely. Look at smp_rmb side binder_alloc_get_vma.
344 * We also want to guarantee new alloc->vma_vm_mm is always visible
345 * if alloc->vma is set.
346 */
347 smp_wmb();
348 alloc->vma = vma;
349}
350
351static inline struct vm_area_struct *binder_alloc_get_vma(
352 struct binder_alloc *alloc)
353{
354 struct vm_area_struct *vma = NULL;
355
356 if (alloc->vma) {
357 /* Look at description in binder_alloc_set_vma */
358 smp_rmb();
359 vma = alloc->vma;
360 }
361 return vma;
362}
363
335static struct binder_buffer *binder_alloc_new_buf_locked( 364static struct binder_buffer *binder_alloc_new_buf_locked(
336 struct binder_alloc *alloc, 365 struct binder_alloc *alloc,
337 size_t data_size, 366 size_t data_size,
@@ -348,7 +377,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
348 size_t size, data_offsets_size; 377 size_t size, data_offsets_size;
349 int ret; 378 int ret;
350 379
351 if (alloc->vma == NULL) { 380 if (!binder_alloc_get_vma(alloc)) {
352 binder_alloc_debug(BINDER_DEBUG_USER_ERROR, 381 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
353 "%d: binder_alloc_buf, no vma\n", 382 "%d: binder_alloc_buf, no vma\n",
354 alloc->pid); 383 alloc->pid);
@@ -723,9 +752,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
723 buffer->free = 1; 752 buffer->free = 1;
724 binder_insert_free_buffer(alloc, buffer); 753 binder_insert_free_buffer(alloc, buffer);
725 alloc->free_async_space = alloc->buffer_size / 2; 754 alloc->free_async_space = alloc->buffer_size / 2;
726 barrier(); 755 binder_alloc_set_vma(alloc, vma);
727 alloc->vma = vma;
728 alloc->vma_vm_mm = vma->vm_mm;
729 mmgrab(alloc->vma_vm_mm); 756 mmgrab(alloc->vma_vm_mm);
730 757
731 return 0; 758 return 0;
@@ -754,10 +781,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
754 int buffers, page_count; 781 int buffers, page_count;
755 struct binder_buffer *buffer; 782 struct binder_buffer *buffer;
756 783
757 BUG_ON(alloc->vma);
758
759 buffers = 0; 784 buffers = 0;
760 mutex_lock(&alloc->mutex); 785 mutex_lock(&alloc->mutex);
786 BUG_ON(alloc->vma);
787
761 while ((n = rb_first(&alloc->allocated_buffers))) { 788 while ((n = rb_first(&alloc->allocated_buffers))) {
762 buffer = rb_entry(n, struct binder_buffer, rb_node); 789 buffer = rb_entry(n, struct binder_buffer, rb_node);
763 790
@@ -900,7 +927,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
900 */ 927 */
901void binder_alloc_vma_close(struct binder_alloc *alloc) 928void binder_alloc_vma_close(struct binder_alloc *alloc)
902{ 929{
903 WRITE_ONCE(alloc->vma, NULL); 930 binder_alloc_set_vma(alloc, NULL);
904} 931}
905 932
906/** 933/**
@@ -935,7 +962,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
935 962
936 index = page - alloc->pages; 963 index = page - alloc->pages;
937 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; 964 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
938 vma = alloc->vma; 965 vma = binder_alloc_get_vma(alloc);
939 if (vma) { 966 if (vma) {
940 if (!mmget_not_zero(alloc->vma_vm_mm)) 967 if (!mmget_not_zero(alloc->vma_vm_mm))
941 goto err_mmget; 968 goto err_mmget;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 172e32840256..599e01bcdef2 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -7394,4 +7394,4 @@ EXPORT_SYMBOL_GPL(ata_cable_unknown);
7394EXPORT_SYMBOL_GPL(ata_cable_ignore); 7394EXPORT_SYMBOL_GPL(ata_cable_ignore);
7395EXPORT_SYMBOL_GPL(ata_cable_sata); 7395EXPORT_SYMBOL_GPL(ata_cable_sata);
7396EXPORT_SYMBOL_GPL(ata_host_get); 7396EXPORT_SYMBOL_GPL(ata_host_get);
7397EXPORT_SYMBOL_GPL(ata_host_put); \ No newline at end of file 7397EXPORT_SYMBOL_GPL(ata_host_put);
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
index 5d4b72e21161..569a4a662dcd 100644
--- a/drivers/ata/pata_ftide010.c
+++ b/drivers/ata/pata_ftide010.c
@@ -256,14 +256,12 @@ static struct ata_port_operations pata_ftide010_port_ops = {
256 .qc_issue = ftide010_qc_issue, 256 .qc_issue = ftide010_qc_issue,
257}; 257};
258 258
259static struct ata_port_info ftide010_port_info[] = { 259static struct ata_port_info ftide010_port_info = {
260 { 260 .flags = ATA_FLAG_SLAVE_POSS,
261 .flags = ATA_FLAG_SLAVE_POSS, 261 .mwdma_mask = ATA_MWDMA2,
262 .mwdma_mask = ATA_MWDMA2, 262 .udma_mask = ATA_UDMA6,
263 .udma_mask = ATA_UDMA6, 263 .pio_mask = ATA_PIO4,
264 .pio_mask = ATA_PIO4, 264 .port_ops = &pata_ftide010_port_ops,
265 .port_ops = &pata_ftide010_port_ops,
266 },
267}; 265};
268 266
269#if IS_ENABLED(CONFIG_SATA_GEMINI) 267#if IS_ENABLED(CONFIG_SATA_GEMINI)
@@ -349,6 +347,7 @@ static int pata_ftide010_gemini_cable_detect(struct ata_port *ap)
349} 347}
350 348
351static int pata_ftide010_gemini_init(struct ftide010 *ftide, 349static int pata_ftide010_gemini_init(struct ftide010 *ftide,
350 struct ata_port_info *pi,
352 bool is_ata1) 351 bool is_ata1)
353{ 352{
354 struct device *dev = ftide->dev; 353 struct device *dev = ftide->dev;
@@ -373,7 +372,13 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide,
373 372
374 /* Flag port as SATA-capable */ 373 /* Flag port as SATA-capable */
375 if (gemini_sata_bridge_enabled(sg, is_ata1)) 374 if (gemini_sata_bridge_enabled(sg, is_ata1))
376 ftide010_port_info[0].flags |= ATA_FLAG_SATA; 375 pi->flags |= ATA_FLAG_SATA;
376
377 /* This device has broken DMA, only PIO works */
378 if (of_machine_is_compatible("itian,sq201")) {
379 pi->mwdma_mask = 0;
380 pi->udma_mask = 0;
381 }
377 382
378 /* 383 /*
379 * We assume that a simple 40-wire cable is used in the PATA mode. 384 * We assume that a simple 40-wire cable is used in the PATA mode.
@@ -435,6 +440,7 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide,
435} 440}
436#else 441#else
437static int pata_ftide010_gemini_init(struct ftide010 *ftide, 442static int pata_ftide010_gemini_init(struct ftide010 *ftide,
443 struct ata_port_info *pi,
438 bool is_ata1) 444 bool is_ata1)
439{ 445{
440 return -ENOTSUPP; 446 return -ENOTSUPP;
@@ -446,7 +452,7 @@ static int pata_ftide010_probe(struct platform_device *pdev)
446{ 452{
447 struct device *dev = &pdev->dev; 453 struct device *dev = &pdev->dev;
448 struct device_node *np = dev->of_node; 454 struct device_node *np = dev->of_node;
449 const struct ata_port_info pi = ftide010_port_info[0]; 455 struct ata_port_info pi = ftide010_port_info;
450 const struct ata_port_info *ppi[] = { &pi, NULL }; 456 const struct ata_port_info *ppi[] = { &pi, NULL };
451 struct ftide010 *ftide; 457 struct ftide010 *ftide;
452 struct resource *res; 458 struct resource *res;
@@ -490,6 +496,7 @@ static int pata_ftide010_probe(struct platform_device *pdev)
490 * are ATA0. This will also set up the cable types. 496 * are ATA0. This will also set up the cable types.
491 */ 497 */
492 ret = pata_ftide010_gemini_init(ftide, 498 ret = pata_ftide010_gemini_init(ftide,
499 &pi,
493 (res->start == 0x63400000)); 500 (res->start == 0x63400000));
494 if (ret) 501 if (ret)
495 goto err_dis_clk; 502 goto err_dis_clk;
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 0943e7065e0e..b3c0498ee433 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -209,21 +209,24 @@ static struct fw_priv *__lookup_fw_priv(const char *fw_name)
209static int alloc_lookup_fw_priv(const char *fw_name, 209static int alloc_lookup_fw_priv(const char *fw_name,
210 struct firmware_cache *fwc, 210 struct firmware_cache *fwc,
211 struct fw_priv **fw_priv, void *dbuf, 211 struct fw_priv **fw_priv, void *dbuf,
212 size_t size) 212 size_t size, enum fw_opt opt_flags)
213{ 213{
214 struct fw_priv *tmp; 214 struct fw_priv *tmp;
215 215
216 spin_lock(&fwc->lock); 216 spin_lock(&fwc->lock);
217 tmp = __lookup_fw_priv(fw_name); 217 if (!(opt_flags & FW_OPT_NOCACHE)) {
218 if (tmp) { 218 tmp = __lookup_fw_priv(fw_name);
219 kref_get(&tmp->ref); 219 if (tmp) {
220 spin_unlock(&fwc->lock); 220 kref_get(&tmp->ref);
221 *fw_priv = tmp; 221 spin_unlock(&fwc->lock);
222 pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n"); 222 *fw_priv = tmp;
223 return 1; 223 pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
224 return 1;
225 }
224 } 226 }
227
225 tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size); 228 tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
226 if (tmp) 229 if (tmp && !(opt_flags & FW_OPT_NOCACHE))
227 list_add(&tmp->list, &fwc->head); 230 list_add(&tmp->list, &fwc->head);
228 spin_unlock(&fwc->lock); 231 spin_unlock(&fwc->lock);
229 232
@@ -493,7 +496,8 @@ int assign_fw(struct firmware *fw, struct device *device,
493 */ 496 */
494static int 497static int
495_request_firmware_prepare(struct firmware **firmware_p, const char *name, 498_request_firmware_prepare(struct firmware **firmware_p, const char *name,
496 struct device *device, void *dbuf, size_t size) 499 struct device *device, void *dbuf, size_t size,
500 enum fw_opt opt_flags)
497{ 501{
498 struct firmware *firmware; 502 struct firmware *firmware;
499 struct fw_priv *fw_priv; 503 struct fw_priv *fw_priv;
@@ -511,7 +515,8 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
511 return 0; /* assigned */ 515 return 0; /* assigned */
512 } 516 }
513 517
514 ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size); 518 ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size,
519 opt_flags);
515 520
516 /* 521 /*
517 * bind with 'priv' now to avoid warning in failure path 522 * bind with 'priv' now to avoid warning in failure path
@@ -571,7 +576,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
571 goto out; 576 goto out;
572 } 577 }
573 578
574 ret = _request_firmware_prepare(&fw, name, device, buf, size); 579 ret = _request_firmware_prepare(&fw, name, device, buf, size,
580 opt_flags);
575 if (ret <= 0) /* error or already assigned */ 581 if (ret <= 0) /* error or already assigned */
576 goto out; 582 goto out;
577 583
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index c8a1cb0b6136..817320c7c4c1 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -417,25 +417,23 @@ static ssize_t show_valid_zones(struct device *dev,
417 int nid; 417 int nid;
418 418
419 /* 419 /*
420 * The block contains more than one zone can not be offlined.
421 * This can happen e.g. for ZONE_DMA and ZONE_DMA32
422 */
423 if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn))
424 return sprintf(buf, "none\n");
425
426 start_pfn = valid_start_pfn;
427 nr_pages = valid_end_pfn - start_pfn;
428
429 /*
430 * Check the existing zone. Make sure that we do that only on the 420 * Check the existing zone. Make sure that we do that only on the
431 * online nodes otherwise the page_zone is not reliable 421 * online nodes otherwise the page_zone is not reliable
432 */ 422 */
433 if (mem->state == MEM_ONLINE) { 423 if (mem->state == MEM_ONLINE) {
424 /*
425 * The block contains more than one zone can not be offlined.
426 * This can happen e.g. for ZONE_DMA and ZONE_DMA32
427 */
428 if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages,
429 &valid_start_pfn, &valid_end_pfn))
430 return sprintf(buf, "none\n");
431 start_pfn = valid_start_pfn;
434 strcat(buf, page_zone(pfn_to_page(start_pfn))->name); 432 strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
435 goto out; 433 goto out;
436 } 434 }
437 435
438 nid = pfn_to_nid(start_pfn); 436 nid = mem->nid;
439 default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages); 437 default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
440 strcat(buf, default_zone->name); 438 strcat(buf, default_zone->name);
441 439
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 8e2e4757adcb..5a42ae4078c2 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
185int of_pm_clk_add_clks(struct device *dev) 185int of_pm_clk_add_clks(struct device *dev)
186{ 186{
187 struct clk **clks; 187 struct clk **clks;
188 unsigned int i, count; 188 int i, count;
189 int ret; 189 int ret;
190 190
191 if (!dev || !dev->of_node) 191 if (!dev || !dev->of_node)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 3863c00372bb..14a51254c3db 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1239,6 +1239,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1239 case NBD_SET_SOCK: 1239 case NBD_SET_SOCK:
1240 return nbd_add_socket(nbd, arg, false); 1240 return nbd_add_socket(nbd, arg, false);
1241 case NBD_SET_BLKSIZE: 1241 case NBD_SET_BLKSIZE:
1242 if (!arg || !is_power_of_2(arg) || arg < 512 ||
1243 arg > PAGE_SIZE)
1244 return -EINVAL;
1242 nbd_size_set(nbd, arg, 1245 nbd_size_set(nbd, arg,
1243 div_s64(config->bytesize, arg)); 1246 div_s64(config->bytesize, arg));
1244 return 0; 1247 return 0;
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index d81781f22dba..34e0030f0592 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -87,10 +87,10 @@ struct nullb {
87#ifdef CONFIG_BLK_DEV_ZONED 87#ifdef CONFIG_BLK_DEV_ZONED
88int null_zone_init(struct nullb_device *dev); 88int null_zone_init(struct nullb_device *dev);
89void null_zone_exit(struct nullb_device *dev); 89void null_zone_exit(struct nullb_device *dev);
90blk_status_t null_zone_report(struct nullb *nullb, 90blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio);
91 struct nullb_cmd *cmd); 91void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
92void null_zone_write(struct nullb_cmd *cmd); 92 unsigned int nr_sectors);
93void null_zone_reset(struct nullb_cmd *cmd); 93void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
94#else 94#else
95static inline int null_zone_init(struct nullb_device *dev) 95static inline int null_zone_init(struct nullb_device *dev)
96{ 96{
@@ -98,11 +98,14 @@ static inline int null_zone_init(struct nullb_device *dev)
98} 98}
99static inline void null_zone_exit(struct nullb_device *dev) {} 99static inline void null_zone_exit(struct nullb_device *dev) {}
100static inline blk_status_t null_zone_report(struct nullb *nullb, 100static inline blk_status_t null_zone_report(struct nullb *nullb,
101 struct nullb_cmd *cmd) 101 struct bio *bio)
102{ 102{
103 return BLK_STS_NOTSUPP; 103 return BLK_STS_NOTSUPP;
104} 104}
105static inline void null_zone_write(struct nullb_cmd *cmd) {} 105static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
106static inline void null_zone_reset(struct nullb_cmd *cmd) {} 106 unsigned int nr_sectors)
107{
108}
109static inline void null_zone_reset(struct nullb_cmd *cmd, sector_t sector) {}
107#endif /* CONFIG_BLK_DEV_ZONED */ 110#endif /* CONFIG_BLK_DEV_ZONED */
108#endif /* __NULL_BLK_H */ 111#endif /* __NULL_BLK_H */
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 6127e3ff7b4b..093b614d6524 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1157,16 +1157,33 @@ static void null_restart_queue_async(struct nullb *nullb)
1157 } 1157 }
1158} 1158}
1159 1159
1160static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd)
1161{
1162 struct nullb_device *dev = cmd->nq->dev;
1163
1164 if (dev->queue_mode == NULL_Q_BIO) {
1165 if (bio_op(cmd->bio) == REQ_OP_ZONE_REPORT) {
1166 cmd->error = null_zone_report(nullb, cmd->bio);
1167 return true;
1168 }
1169 } else {
1170 if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
1171 cmd->error = null_zone_report(nullb, cmd->rq->bio);
1172 return true;
1173 }
1174 }
1175
1176 return false;
1177}
1178
1160static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) 1179static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
1161{ 1180{
1162 struct nullb_device *dev = cmd->nq->dev; 1181 struct nullb_device *dev = cmd->nq->dev;
1163 struct nullb *nullb = dev->nullb; 1182 struct nullb *nullb = dev->nullb;
1164 int err = 0; 1183 int err = 0;
1165 1184
1166 if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) { 1185 if (cmd_report_zone(nullb, cmd))
1167 cmd->error = null_zone_report(nullb, cmd);
1168 goto out; 1186 goto out;
1169 }
1170 1187
1171 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) { 1188 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
1172 struct request *rq = cmd->rq; 1189 struct request *rq = cmd->rq;
@@ -1234,10 +1251,24 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
1234 cmd->error = errno_to_blk_status(err); 1251 cmd->error = errno_to_blk_status(err);
1235 1252
1236 if (!cmd->error && dev->zoned) { 1253 if (!cmd->error && dev->zoned) {
1237 if (req_op(cmd->rq) == REQ_OP_WRITE) 1254 sector_t sector;
1238 null_zone_write(cmd); 1255 unsigned int nr_sectors;
1239 else if (req_op(cmd->rq) == REQ_OP_ZONE_RESET) 1256 int op;
1240 null_zone_reset(cmd); 1257
1258 if (dev->queue_mode == NULL_Q_BIO) {
1259 op = bio_op(cmd->bio);
1260 sector = cmd->bio->bi_iter.bi_sector;
1261 nr_sectors = cmd->bio->bi_iter.bi_size >> 9;
1262 } else {
1263 op = req_op(cmd->rq);
1264 sector = blk_rq_pos(cmd->rq);
1265 nr_sectors = blk_rq_sectors(cmd->rq);
1266 }
1267
1268 if (op == REQ_OP_WRITE)
1269 null_zone_write(cmd, sector, nr_sectors);
1270 else if (op == REQ_OP_ZONE_RESET)
1271 null_zone_reset(cmd, sector);
1241 } 1272 }
1242out: 1273out:
1243 /* Complete IO by inline, softirq or timer */ 1274 /* Complete IO by inline, softirq or timer */
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index a979ca00d7be..7c6b86d98700 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -48,8 +48,8 @@ void null_zone_exit(struct nullb_device *dev)
48 kvfree(dev->zones); 48 kvfree(dev->zones);
49} 49}
50 50
51static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq, 51static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio,
52 unsigned int zno, unsigned int nr_zones) 52 unsigned int zno, unsigned int nr_zones)
53{ 53{
54 struct blk_zone_report_hdr *hdr = NULL; 54 struct blk_zone_report_hdr *hdr = NULL;
55 struct bio_vec bvec; 55 struct bio_vec bvec;
@@ -57,7 +57,7 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
57 void *addr; 57 void *addr;
58 unsigned int zones_to_cpy; 58 unsigned int zones_to_cpy;
59 59
60 bio_for_each_segment(bvec, rq->bio, iter) { 60 bio_for_each_segment(bvec, bio, iter) {
61 addr = kmap_atomic(bvec.bv_page); 61 addr = kmap_atomic(bvec.bv_page);
62 62
63 zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone); 63 zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
@@ -84,29 +84,24 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
84 } 84 }
85} 85}
86 86
87blk_status_t null_zone_report(struct nullb *nullb, 87blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio)
88 struct nullb_cmd *cmd)
89{ 88{
90 struct nullb_device *dev = nullb->dev; 89 struct nullb_device *dev = nullb->dev;
91 struct request *rq = cmd->rq; 90 unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector);
92 unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
93 unsigned int nr_zones = dev->nr_zones - zno; 91 unsigned int nr_zones = dev->nr_zones - zno;
94 unsigned int max_zones = (blk_rq_bytes(rq) / 92 unsigned int max_zones;
95 sizeof(struct blk_zone)) - 1;
96 93
94 max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1;
97 nr_zones = min_t(unsigned int, nr_zones, max_zones); 95 nr_zones = min_t(unsigned int, nr_zones, max_zones);
98 96 null_zone_fill_bio(nullb->dev, bio, zno, nr_zones);
99 null_zone_fill_rq(nullb->dev, rq, zno, nr_zones);
100 97
101 return BLK_STS_OK; 98 return BLK_STS_OK;
102} 99}
103 100
104void null_zone_write(struct nullb_cmd *cmd) 101void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
102 unsigned int nr_sectors)
105{ 103{
106 struct nullb_device *dev = cmd->nq->dev; 104 struct nullb_device *dev = cmd->nq->dev;
107 struct request *rq = cmd->rq;
108 sector_t sector = blk_rq_pos(rq);
109 unsigned int rq_sectors = blk_rq_sectors(rq);
110 unsigned int zno = null_zone_no(dev, sector); 105 unsigned int zno = null_zone_no(dev, sector);
111 struct blk_zone *zone = &dev->zones[zno]; 106 struct blk_zone *zone = &dev->zones[zno];
112 107
@@ -118,7 +113,7 @@ void null_zone_write(struct nullb_cmd *cmd)
118 case BLK_ZONE_COND_EMPTY: 113 case BLK_ZONE_COND_EMPTY:
119 case BLK_ZONE_COND_IMP_OPEN: 114 case BLK_ZONE_COND_IMP_OPEN:
120 /* Writes must be at the write pointer position */ 115 /* Writes must be at the write pointer position */
121 if (blk_rq_pos(rq) != zone->wp) { 116 if (sector != zone->wp) {
122 cmd->error = BLK_STS_IOERR; 117 cmd->error = BLK_STS_IOERR;
123 break; 118 break;
124 } 119 }
@@ -126,7 +121,7 @@ void null_zone_write(struct nullb_cmd *cmd)
126 if (zone->cond == BLK_ZONE_COND_EMPTY) 121 if (zone->cond == BLK_ZONE_COND_EMPTY)
127 zone->cond = BLK_ZONE_COND_IMP_OPEN; 122 zone->cond = BLK_ZONE_COND_IMP_OPEN;
128 123
129 zone->wp += rq_sectors; 124 zone->wp += nr_sectors;
130 if (zone->wp == zone->start + zone->len) 125 if (zone->wp == zone->start + zone->len)
131 zone->cond = BLK_ZONE_COND_FULL; 126 zone->cond = BLK_ZONE_COND_FULL;
132 break; 127 break;
@@ -137,11 +132,10 @@ void null_zone_write(struct nullb_cmd *cmd)
137 } 132 }
138} 133}
139 134
140void null_zone_reset(struct nullb_cmd *cmd) 135void null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
141{ 136{
142 struct nullb_device *dev = cmd->nq->dev; 137 struct nullb_device *dev = cmd->nq->dev;
143 struct request *rq = cmd->rq; 138 unsigned int zno = null_zone_no(dev, sector);
144 unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
145 struct blk_zone *zone = &dev->zones[zno]; 139 struct blk_zone *zone = &dev->zones[zno];
146 140
147 zone->cond = BLK_ZONE_COND_EMPTY; 141 zone->cond = BLK_ZONE_COND_EMPTY;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 7915f3b03736..73ed5f3a862d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4207,11 +4207,13 @@ static ssize_t rbd_parent_show(struct device *dev,
4207 4207
4208 count += sprintf(&buf[count], "%s" 4208 count += sprintf(&buf[count], "%s"
4209 "pool_id %llu\npool_name %s\n" 4209 "pool_id %llu\npool_name %s\n"
4210 "pool_ns %s\n"
4210 "image_id %s\nimage_name %s\n" 4211 "image_id %s\nimage_name %s\n"
4211 "snap_id %llu\nsnap_name %s\n" 4212 "snap_id %llu\nsnap_name %s\n"
4212 "overlap %llu\n", 4213 "overlap %llu\n",
4213 !count ? "" : "\n", /* first? */ 4214 !count ? "" : "\n", /* first? */
4214 spec->pool_id, spec->pool_name, 4215 spec->pool_id, spec->pool_name,
4216 spec->pool_ns ?: "",
4215 spec->image_id, spec->image_name ?: "(unknown)", 4217 spec->image_id, spec->image_name ?: "(unknown)",
4216 spec->snap_id, spec->snap_name, 4218 spec->snap_id, spec->snap_name,
4217 rbd_dev->parent_overlap); 4219 rbd_dev->parent_overlap);
@@ -4584,47 +4586,177 @@ static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4584 &rbd_dev->header.features); 4586 &rbd_dev->header.features);
4585} 4587}
4586 4588
4589struct parent_image_info {
4590 u64 pool_id;
4591 const char *pool_ns;
4592 const char *image_id;
4593 u64 snap_id;
4594
4595 bool has_overlap;
4596 u64 overlap;
4597};
4598
4599/*
4600 * The caller is responsible for @pii.
4601 */
4602static int decode_parent_image_spec(void **p, void *end,
4603 struct parent_image_info *pii)
4604{
4605 u8 struct_v;
4606 u32 struct_len;
4607 int ret;
4608
4609 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
4610 &struct_v, &struct_len);
4611 if (ret)
4612 return ret;
4613
4614 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
4615 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
4616 if (IS_ERR(pii->pool_ns)) {
4617 ret = PTR_ERR(pii->pool_ns);
4618 pii->pool_ns = NULL;
4619 return ret;
4620 }
4621 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
4622 if (IS_ERR(pii->image_id)) {
4623 ret = PTR_ERR(pii->image_id);
4624 pii->image_id = NULL;
4625 return ret;
4626 }
4627 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
4628 return 0;
4629
4630e_inval:
4631 return -EINVAL;
4632}
4633
4634static int __get_parent_info(struct rbd_device *rbd_dev,
4635 struct page *req_page,
4636 struct page *reply_page,
4637 struct parent_image_info *pii)
4638{
4639 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4640 size_t reply_len = PAGE_SIZE;
4641 void *p, *end;
4642 int ret;
4643
4644 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4645 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
4646 req_page, sizeof(u64), reply_page, &reply_len);
4647 if (ret)
4648 return ret == -EOPNOTSUPP ? 1 : ret;
4649
4650 p = page_address(reply_page);
4651 end = p + reply_len;
4652 ret = decode_parent_image_spec(&p, end, pii);
4653 if (ret)
4654 return ret;
4655
4656 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4657 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
4658 req_page, sizeof(u64), reply_page, &reply_len);
4659 if (ret)
4660 return ret;
4661
4662 p = page_address(reply_page);
4663 end = p + reply_len;
4664 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
4665 if (pii->has_overlap)
4666 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
4667
4668 return 0;
4669
4670e_inval:
4671 return -EINVAL;
4672}
4673
4674/*
4675 * The caller is responsible for @pii.
4676 */
4677static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
4678 struct page *req_page,
4679 struct page *reply_page,
4680 struct parent_image_info *pii)
4681{
4682 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4683 size_t reply_len = PAGE_SIZE;
4684 void *p, *end;
4685 int ret;
4686
4687 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4688 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
4689 req_page, sizeof(u64), reply_page, &reply_len);
4690 if (ret)
4691 return ret;
4692
4693 p = page_address(reply_page);
4694 end = p + reply_len;
4695 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
4696 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4697 if (IS_ERR(pii->image_id)) {
4698 ret = PTR_ERR(pii->image_id);
4699 pii->image_id = NULL;
4700 return ret;
4701 }
4702 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
4703 pii->has_overlap = true;
4704 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
4705
4706 return 0;
4707
4708e_inval:
4709 return -EINVAL;
4710}
4711
4712static int get_parent_info(struct rbd_device *rbd_dev,
4713 struct parent_image_info *pii)
4714{
4715 struct page *req_page, *reply_page;
4716 void *p;
4717 int ret;
4718
4719 req_page = alloc_page(GFP_KERNEL);
4720 if (!req_page)
4721 return -ENOMEM;
4722
4723 reply_page = alloc_page(GFP_KERNEL);
4724 if (!reply_page) {
4725 __free_page(req_page);
4726 return -ENOMEM;
4727 }
4728
4729 p = page_address(req_page);
4730 ceph_encode_64(&p, rbd_dev->spec->snap_id);
4731 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
4732 if (ret > 0)
4733 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
4734 pii);
4735
4736 __free_page(req_page);
4737 __free_page(reply_page);
4738 return ret;
4739}
4740
4587static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) 4741static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4588{ 4742{
4589 struct rbd_spec *parent_spec; 4743 struct rbd_spec *parent_spec;
4590 size_t size; 4744 struct parent_image_info pii = { 0 };
4591 void *reply_buf = NULL;
4592 __le64 snapid;
4593 void *p;
4594 void *end;
4595 u64 pool_id;
4596 char *image_id;
4597 u64 snap_id;
4598 u64 overlap;
4599 int ret; 4745 int ret;
4600 4746
4601 parent_spec = rbd_spec_alloc(); 4747 parent_spec = rbd_spec_alloc();
4602 if (!parent_spec) 4748 if (!parent_spec)
4603 return -ENOMEM; 4749 return -ENOMEM;
4604 4750
4605 size = sizeof (__le64) + /* pool_id */ 4751 ret = get_parent_info(rbd_dev, &pii);
4606 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */ 4752 if (ret)
4607 sizeof (__le64) + /* snap_id */
4608 sizeof (__le64); /* overlap */
4609 reply_buf = kmalloc(size, GFP_KERNEL);
4610 if (!reply_buf) {
4611 ret = -ENOMEM;
4612 goto out_err; 4753 goto out_err;
4613 }
4614 4754
4615 snapid = cpu_to_le64(rbd_dev->spec->snap_id); 4755 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
4616 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 4756 __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
4617 &rbd_dev->header_oloc, "get_parent", 4757 pii.has_overlap, pii.overlap);
4618 &snapid, sizeof(snapid), reply_buf, size);
4619 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4620 if (ret < 0)
4621 goto out_err;
4622 4758
4623 p = reply_buf; 4759 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
4624 end = reply_buf + ret;
4625 ret = -ERANGE;
4626 ceph_decode_64_safe(&p, end, pool_id, out_err);
4627 if (pool_id == CEPH_NOPOOL) {
4628 /* 4760 /*
4629 * Either the parent never existed, or we have 4761 * Either the parent never existed, or we have
4630 * record of it but the image got flattened so it no 4762 * record of it but the image got flattened so it no
@@ -4633,6 +4765,10 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4633 * overlap to 0. The effect of this is that all new 4765 * overlap to 0. The effect of this is that all new
4634 * requests will be treated as if the image had no 4766 * requests will be treated as if the image had no
4635 * parent. 4767 * parent.
4768 *
4769 * If !pii.has_overlap, the parent image spec is not
4770 * applicable. It's there to avoid duplication in each
4771 * snapshot record.
4636 */ 4772 */
4637 if (rbd_dev->parent_overlap) { 4773 if (rbd_dev->parent_overlap) {
4638 rbd_dev->parent_overlap = 0; 4774 rbd_dev->parent_overlap = 0;
@@ -4647,51 +4783,36 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4647 /* The ceph file layout needs to fit pool id in 32 bits */ 4783 /* The ceph file layout needs to fit pool id in 32 bits */
4648 4784
4649 ret = -EIO; 4785 ret = -EIO;
4650 if (pool_id > (u64)U32_MAX) { 4786 if (pii.pool_id > (u64)U32_MAX) {
4651 rbd_warn(NULL, "parent pool id too large (%llu > %u)", 4787 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
4652 (unsigned long long)pool_id, U32_MAX); 4788 (unsigned long long)pii.pool_id, U32_MAX);
4653 goto out_err; 4789 goto out_err;
4654 } 4790 }
4655 4791
4656 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4657 if (IS_ERR(image_id)) {
4658 ret = PTR_ERR(image_id);
4659 goto out_err;
4660 }
4661 ceph_decode_64_safe(&p, end, snap_id, out_err);
4662 ceph_decode_64_safe(&p, end, overlap, out_err);
4663
4664 /* 4792 /*
4665 * The parent won't change (except when the clone is 4793 * The parent won't change (except when the clone is
4666 * flattened, already handled that). So we only need to 4794 * flattened, already handled that). So we only need to
4667 * record the parent spec we have not already done so. 4795 * record the parent spec we have not already done so.
4668 */ 4796 */
4669 if (!rbd_dev->parent_spec) { 4797 if (!rbd_dev->parent_spec) {
4670 parent_spec->pool_id = pool_id; 4798 parent_spec->pool_id = pii.pool_id;
4671 parent_spec->image_id = image_id; 4799 if (pii.pool_ns && *pii.pool_ns) {
4672 parent_spec->snap_id = snap_id; 4800 parent_spec->pool_ns = pii.pool_ns;
4673 4801 pii.pool_ns = NULL;
4674 /* TODO: support cloning across namespaces */
4675 if (rbd_dev->spec->pool_ns) {
4676 parent_spec->pool_ns = kstrdup(rbd_dev->spec->pool_ns,
4677 GFP_KERNEL);
4678 if (!parent_spec->pool_ns) {
4679 ret = -ENOMEM;
4680 goto out_err;
4681 }
4682 } 4802 }
4803 parent_spec->image_id = pii.image_id;
4804 pii.image_id = NULL;
4805 parent_spec->snap_id = pii.snap_id;
4683 4806
4684 rbd_dev->parent_spec = parent_spec; 4807 rbd_dev->parent_spec = parent_spec;
4685 parent_spec = NULL; /* rbd_dev now owns this */ 4808 parent_spec = NULL; /* rbd_dev now owns this */
4686 } else {
4687 kfree(image_id);
4688 } 4809 }
4689 4810
4690 /* 4811 /*
4691 * We always update the parent overlap. If it's zero we issue 4812 * We always update the parent overlap. If it's zero we issue
4692 * a warning, as we will proceed as if there was no parent. 4813 * a warning, as we will proceed as if there was no parent.
4693 */ 4814 */
4694 if (!overlap) { 4815 if (!pii.overlap) {
4695 if (parent_spec) { 4816 if (parent_spec) {
4696 /* refresh, careful to warn just once */ 4817 /* refresh, careful to warn just once */
4697 if (rbd_dev->parent_overlap) 4818 if (rbd_dev->parent_overlap)
@@ -4702,14 +4823,14 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4702 rbd_warn(rbd_dev, "clone is standalone (overlap 0)"); 4823 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
4703 } 4824 }
4704 } 4825 }
4705 rbd_dev->parent_overlap = overlap; 4826 rbd_dev->parent_overlap = pii.overlap;
4706 4827
4707out: 4828out:
4708 ret = 0; 4829 ret = 0;
4709out_err: 4830out_err:
4710 kfree(reply_buf); 4831 kfree(pii.pool_ns);
4832 kfree(pii.image_id);
4711 rbd_spec_put(parent_spec); 4833 rbd_spec_put(parent_spec);
4712
4713 return ret; 4834 return ret;
4714} 4835}
4715 4836
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index b55b245e8052..fd1e19f1a49f 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -84,6 +84,18 @@ MODULE_PARM_DESC(max_persistent_grants,
84 "Maximum number of grants to map persistently"); 84 "Maximum number of grants to map persistently");
85 85
86/* 86/*
87 * How long a persistent grant is allowed to remain allocated without being in
88 * use. The time is in seconds, 0 means indefinitely long.
89 */
90
91static unsigned int xen_blkif_pgrant_timeout = 60;
92module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout,
93 uint, 0644);
94MODULE_PARM_DESC(persistent_grant_unused_seconds,
95 "Time in seconds an unused persistent grant is allowed to "
96 "remain allocated. Default is 60, 0 means unlimited.");
97
98/*
87 * Maximum number of rings/queues blkback supports, allow as many queues as there 99 * Maximum number of rings/queues blkback supports, allow as many queues as there
88 * are CPUs if user has not specified a value. 100 * are CPUs if user has not specified a value.
89 */ 101 */
@@ -123,6 +135,13 @@ module_param(log_stats, int, 0644);
123/* Number of free pages to remove on each call to gnttab_free_pages */ 135/* Number of free pages to remove on each call to gnttab_free_pages */
124#define NUM_BATCH_FREE_PAGES 10 136#define NUM_BATCH_FREE_PAGES 10
125 137
138static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
139{
140 return xen_blkif_pgrant_timeout &&
141 (jiffies - persistent_gnt->last_used >=
142 HZ * xen_blkif_pgrant_timeout);
143}
144
126static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page) 145static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
127{ 146{
128 unsigned long flags; 147 unsigned long flags;
@@ -236,8 +255,7 @@ static int add_persistent_gnt(struct xen_blkif_ring *ring,
236 } 255 }
237 } 256 }
238 257
239 bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE); 258 persistent_gnt->active = true;
240 set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
241 /* Add new node and rebalance tree. */ 259 /* Add new node and rebalance tree. */
242 rb_link_node(&(persistent_gnt->node), parent, new); 260 rb_link_node(&(persistent_gnt->node), parent, new);
243 rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts); 261 rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
@@ -261,11 +279,11 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
261 else if (gref > data->gnt) 279 else if (gref > data->gnt)
262 node = node->rb_right; 280 node = node->rb_right;
263 else { 281 else {
264 if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { 282 if (data->active) {
265 pr_alert_ratelimited("requesting a grant already in use\n"); 283 pr_alert_ratelimited("requesting a grant already in use\n");
266 return NULL; 284 return NULL;
267 } 285 }
268 set_bit(PERSISTENT_GNT_ACTIVE, data->flags); 286 data->active = true;
269 atomic_inc(&ring->persistent_gnt_in_use); 287 atomic_inc(&ring->persistent_gnt_in_use);
270 return data; 288 return data;
271 } 289 }
@@ -276,10 +294,10 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
276static void put_persistent_gnt(struct xen_blkif_ring *ring, 294static void put_persistent_gnt(struct xen_blkif_ring *ring,
277 struct persistent_gnt *persistent_gnt) 295 struct persistent_gnt *persistent_gnt)
278{ 296{
279 if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) 297 if (!persistent_gnt->active)
280 pr_alert_ratelimited("freeing a grant already unused\n"); 298 pr_alert_ratelimited("freeing a grant already unused\n");
281 set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); 299 persistent_gnt->last_used = jiffies;
282 clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); 300 persistent_gnt->active = false;
283 atomic_dec(&ring->persistent_gnt_in_use); 301 atomic_dec(&ring->persistent_gnt_in_use);
284} 302}
285 303
@@ -371,26 +389,26 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring)
371 struct persistent_gnt *persistent_gnt; 389 struct persistent_gnt *persistent_gnt;
372 struct rb_node *n; 390 struct rb_node *n;
373 unsigned int num_clean, total; 391 unsigned int num_clean, total;
374 bool scan_used = false, clean_used = false; 392 bool scan_used = false;
375 struct rb_root *root; 393 struct rb_root *root;
376 394
377 if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
378 (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
379 !ring->blkif->vbd.overflow_max_grants)) {
380 goto out;
381 }
382
383 if (work_busy(&ring->persistent_purge_work)) { 395 if (work_busy(&ring->persistent_purge_work)) {
384 pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); 396 pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
385 goto out; 397 goto out;
386 } 398 }
387 399
388 num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; 400 if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
389 num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean; 401 (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
390 num_clean = min(ring->persistent_gnt_c, num_clean); 402 !ring->blkif->vbd.overflow_max_grants)) {
391 if ((num_clean == 0) || 403 num_clean = 0;
392 (num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use)))) 404 } else {
393 goto out; 405 num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
406 num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants +
407 num_clean;
408 num_clean = min(ring->persistent_gnt_c, num_clean);
409 pr_debug("Going to purge at least %u persistent grants\n",
410 num_clean);
411 }
394 412
395 /* 413 /*
396 * At this point, we can assure that there will be no calls 414 * At this point, we can assure that there will be no calls
@@ -401,9 +419,7 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring)
401 * number of grants. 419 * number of grants.
402 */ 420 */
403 421
404 total = num_clean; 422 total = 0;
405
406 pr_debug("Going to purge %u persistent grants\n", num_clean);
407 423
408 BUG_ON(!list_empty(&ring->persistent_purge_list)); 424 BUG_ON(!list_empty(&ring->persistent_purge_list));
409 root = &ring->persistent_gnts; 425 root = &ring->persistent_gnts;
@@ -412,46 +428,37 @@ purge_list:
412 BUG_ON(persistent_gnt->handle == 428 BUG_ON(persistent_gnt->handle ==
413 BLKBACK_INVALID_HANDLE); 429 BLKBACK_INVALID_HANDLE);
414 430
415 if (clean_used) { 431 if (persistent_gnt->active)
416 clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
417 continue; 432 continue;
418 } 433 if (!scan_used && !persistent_gnt_timeout(persistent_gnt))
419
420 if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
421 continue; 434 continue;
422 if (!scan_used && 435 if (scan_used && total >= num_clean)
423 (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
424 continue; 436 continue;
425 437
426 rb_erase(&persistent_gnt->node, root); 438 rb_erase(&persistent_gnt->node, root);
427 list_add(&persistent_gnt->remove_node, 439 list_add(&persistent_gnt->remove_node,
428 &ring->persistent_purge_list); 440 &ring->persistent_purge_list);
429 if (--num_clean == 0) 441 total++;
430 goto finished;
431 } 442 }
432 /* 443 /*
433 * If we get here it means we also need to start cleaning 444 * Check whether we also need to start cleaning
434 * grants that were used since last purge in order to cope 445 * grants that were used since last purge in order to cope
435 * with the requested num 446 * with the requested num
436 */ 447 */
437 if (!scan_used && !clean_used) { 448 if (!scan_used && total < num_clean) {
438 pr_debug("Still missing %u purged frames\n", num_clean); 449 pr_debug("Still missing %u purged frames\n", num_clean - total);
439 scan_used = true; 450 scan_used = true;
440 goto purge_list; 451 goto purge_list;
441 } 452 }
442finished:
443 if (!clean_used) {
444 pr_debug("Finished scanning for grants to clean, removing used flag\n");
445 clean_used = true;
446 goto purge_list;
447 }
448 453
449 ring->persistent_gnt_c -= (total - num_clean); 454 if (total) {
450 ring->blkif->vbd.overflow_max_grants = 0; 455 ring->persistent_gnt_c -= total;
456 ring->blkif->vbd.overflow_max_grants = 0;
451 457
452 /* We can defer this work */ 458 /* We can defer this work */
453 schedule_work(&ring->persistent_purge_work); 459 schedule_work(&ring->persistent_purge_work);
454 pr_debug("Purged %u/%u\n", (total - num_clean), total); 460 pr_debug("Purged %u/%u\n", num_clean, total);
461 }
455 462
456out: 463out:
457 return; 464 return;
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index ecb35fe8ca8d..1d3002d773f7 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -233,16 +233,6 @@ struct xen_vbd {
233 233
234struct backend_info; 234struct backend_info;
235 235
236/* Number of available flags */
237#define PERSISTENT_GNT_FLAGS_SIZE 2
238/* This persistent grant is currently in use */
239#define PERSISTENT_GNT_ACTIVE 0
240/*
241 * This persistent grant has been used, this flag is set when we remove the
242 * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
243 */
244#define PERSISTENT_GNT_WAS_ACTIVE 1
245
246/* Number of requests that we can fit in a ring */ 236/* Number of requests that we can fit in a ring */
247#define XEN_BLKIF_REQS_PER_PAGE 32 237#define XEN_BLKIF_REQS_PER_PAGE 32
248 238
@@ -250,7 +240,8 @@ struct persistent_gnt {
250 struct page *page; 240 struct page *page;
251 grant_ref_t gnt; 241 grant_ref_t gnt;
252 grant_handle_t handle; 242 grant_handle_t handle;
253 DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE); 243 unsigned long last_used;
244 bool active;
254 struct rb_node node; 245 struct rb_node node;
255 struct list_head remove_node; 246 struct list_head remove_node;
256}; 247};
@@ -278,7 +269,6 @@ struct xen_blkif_ring {
278 wait_queue_head_t pending_free_wq; 269 wait_queue_head_t pending_free_wq;
279 270
280 /* Tree to store persistent grants. */ 271 /* Tree to store persistent grants. */
281 spinlock_t pers_gnts_lock;
282 struct rb_root persistent_gnts; 272 struct rb_root persistent_gnts;
283 unsigned int persistent_gnt_c; 273 unsigned int persistent_gnt_c;
284 atomic_t persistent_gnt_in_use; 274 atomic_t persistent_gnt_in_use;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8986adab9bf5..a71d817e900d 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -46,6 +46,7 @@
46#include <linux/scatterlist.h> 46#include <linux/scatterlist.h>
47#include <linux/bitmap.h> 47#include <linux/bitmap.h>
48#include <linux/list.h> 48#include <linux/list.h>
49#include <linux/workqueue.h>
49 50
50#include <xen/xen.h> 51#include <xen/xen.h>
51#include <xen/xenbus.h> 52#include <xen/xenbus.h>
@@ -121,6 +122,8 @@ static inline struct blkif_req *blkif_req(struct request *rq)
121 122
122static DEFINE_MUTEX(blkfront_mutex); 123static DEFINE_MUTEX(blkfront_mutex);
123static const struct block_device_operations xlvbd_block_fops; 124static const struct block_device_operations xlvbd_block_fops;
125static struct delayed_work blkfront_work;
126static LIST_HEAD(info_list);
124 127
125/* 128/*
126 * Maximum number of segments in indirect requests, the actual value used by 129 * Maximum number of segments in indirect requests, the actual value used by
@@ -216,6 +219,7 @@ struct blkfront_info
216 /* Save uncomplete reqs and bios for migration. */ 219 /* Save uncomplete reqs and bios for migration. */
217 struct list_head requests; 220 struct list_head requests;
218 struct bio_list bio_list; 221 struct bio_list bio_list;
222 struct list_head info_list;
219}; 223};
220 224
221static unsigned int nr_minors; 225static unsigned int nr_minors;
@@ -1759,6 +1763,12 @@ abort_transaction:
1759 return err; 1763 return err;
1760} 1764}
1761 1765
1766static void free_info(struct blkfront_info *info)
1767{
1768 list_del(&info->info_list);
1769 kfree(info);
1770}
1771
1762/* Common code used when first setting up, and when resuming. */ 1772/* Common code used when first setting up, and when resuming. */
1763static int talk_to_blkback(struct xenbus_device *dev, 1773static int talk_to_blkback(struct xenbus_device *dev,
1764 struct blkfront_info *info) 1774 struct blkfront_info *info)
@@ -1880,7 +1890,10 @@ again:
1880 destroy_blkring: 1890 destroy_blkring:
1881 blkif_free(info, 0); 1891 blkif_free(info, 0);
1882 1892
1883 kfree(info); 1893 mutex_lock(&blkfront_mutex);
1894 free_info(info);
1895 mutex_unlock(&blkfront_mutex);
1896
1884 dev_set_drvdata(&dev->dev, NULL); 1897 dev_set_drvdata(&dev->dev, NULL);
1885 1898
1886 return err; 1899 return err;
@@ -1991,6 +2004,10 @@ static int blkfront_probe(struct xenbus_device *dev,
1991 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); 2004 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
1992 dev_set_drvdata(&dev->dev, info); 2005 dev_set_drvdata(&dev->dev, info);
1993 2006
2007 mutex_lock(&blkfront_mutex);
2008 list_add(&info->info_list, &info_list);
2009 mutex_unlock(&blkfront_mutex);
2010
1994 return 0; 2011 return 0;
1995} 2012}
1996 2013
@@ -2301,6 +2318,12 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
2301 if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) 2318 if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
2302 indirect_segments = 0; 2319 indirect_segments = 0;
2303 info->max_indirect_segments = indirect_segments; 2320 info->max_indirect_segments = indirect_segments;
2321
2322 if (info->feature_persistent) {
2323 mutex_lock(&blkfront_mutex);
2324 schedule_delayed_work(&blkfront_work, HZ * 10);
2325 mutex_unlock(&blkfront_mutex);
2326 }
2304} 2327}
2305 2328
2306/* 2329/*
@@ -2482,7 +2505,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
2482 mutex_unlock(&info->mutex); 2505 mutex_unlock(&info->mutex);
2483 2506
2484 if (!bdev) { 2507 if (!bdev) {
2485 kfree(info); 2508 mutex_lock(&blkfront_mutex);
2509 free_info(info);
2510 mutex_unlock(&blkfront_mutex);
2486 return 0; 2511 return 0;
2487 } 2512 }
2488 2513
@@ -2502,7 +2527,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
2502 if (info && !bdev->bd_openers) { 2527 if (info && !bdev->bd_openers) {
2503 xlvbd_release_gendisk(info); 2528 xlvbd_release_gendisk(info);
2504 disk->private_data = NULL; 2529 disk->private_data = NULL;
2505 kfree(info); 2530 mutex_lock(&blkfront_mutex);
2531 free_info(info);
2532 mutex_unlock(&blkfront_mutex);
2506 } 2533 }
2507 2534
2508 mutex_unlock(&bdev->bd_mutex); 2535 mutex_unlock(&bdev->bd_mutex);
@@ -2585,7 +2612,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode)
2585 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); 2612 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
2586 xlvbd_release_gendisk(info); 2613 xlvbd_release_gendisk(info);
2587 disk->private_data = NULL; 2614 disk->private_data = NULL;
2588 kfree(info); 2615 free_info(info);
2589 } 2616 }
2590 2617
2591out: 2618out:
@@ -2618,6 +2645,61 @@ static struct xenbus_driver blkfront_driver = {
2618 .is_ready = blkfront_is_ready, 2645 .is_ready = blkfront_is_ready,
2619}; 2646};
2620 2647
2648static void purge_persistent_grants(struct blkfront_info *info)
2649{
2650 unsigned int i;
2651 unsigned long flags;
2652
2653 for (i = 0; i < info->nr_rings; i++) {
2654 struct blkfront_ring_info *rinfo = &info->rinfo[i];
2655 struct grant *gnt_list_entry, *tmp;
2656
2657 spin_lock_irqsave(&rinfo->ring_lock, flags);
2658
2659 if (rinfo->persistent_gnts_c == 0) {
2660 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
2661 continue;
2662 }
2663
2664 list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
2665 node) {
2666 if (gnt_list_entry->gref == GRANT_INVALID_REF ||
2667 gnttab_query_foreign_access(gnt_list_entry->gref))
2668 continue;
2669
2670 list_del(&gnt_list_entry->node);
2671 gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
2672 rinfo->persistent_gnts_c--;
2673 __free_page(gnt_list_entry->page);
2674 kfree(gnt_list_entry);
2675 }
2676
2677 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
2678 }
2679}
2680
2681static void blkfront_delay_work(struct work_struct *work)
2682{
2683 struct blkfront_info *info;
2684 bool need_schedule_work = false;
2685
2686 mutex_lock(&blkfront_mutex);
2687
2688 list_for_each_entry(info, &info_list, info_list) {
2689 if (info->feature_persistent) {
2690 need_schedule_work = true;
2691 mutex_lock(&info->mutex);
2692 purge_persistent_grants(info);
2693 mutex_unlock(&info->mutex);
2694 }
2695 }
2696
2697 if (need_schedule_work)
2698 schedule_delayed_work(&blkfront_work, HZ * 10);
2699
2700 mutex_unlock(&blkfront_mutex);
2701}
2702
2621static int __init xlblk_init(void) 2703static int __init xlblk_init(void)
2622{ 2704{
2623 int ret; 2705 int ret;
@@ -2626,6 +2708,15 @@ static int __init xlblk_init(void)
2626 if (!xen_domain()) 2708 if (!xen_domain())
2627 return -ENODEV; 2709 return -ENODEV;
2628 2710
2711 if (!xen_has_pv_disk_devices())
2712 return -ENODEV;
2713
2714 if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
2715 pr_warn("xen_blk: can't get major %d with name %s\n",
2716 XENVBD_MAJOR, DEV_NAME);
2717 return -ENODEV;
2718 }
2719
2629 if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) 2720 if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
2630 xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; 2721 xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2631 2722
@@ -2641,14 +2732,7 @@ static int __init xlblk_init(void)
2641 xen_blkif_max_queues = nr_cpus; 2732 xen_blkif_max_queues = nr_cpus;
2642 } 2733 }
2643 2734
2644 if (!xen_has_pv_disk_devices()) 2735 INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work);
2645 return -ENODEV;
2646
2647 if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
2648 printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
2649 XENVBD_MAJOR, DEV_NAME);
2650 return -ENODEV;
2651 }
2652 2736
2653 ret = xenbus_register_frontend(&blkfront_driver); 2737 ret = xenbus_register_frontend(&blkfront_driver);
2654 if (ret) { 2738 if (ret) {
@@ -2663,6 +2747,8 @@ module_init(xlblk_init);
2663 2747
2664static void __exit xlblk_exit(void) 2748static void __exit xlblk_exit(void)
2665{ 2749{
2750 cancel_delayed_work_sync(&blkfront_work);
2751
2666 xenbus_unregister_driver(&blkfront_driver); 2752 xenbus_unregister_driver(&blkfront_driver);
2667 unregister_blkdev(XENVBD_MAJOR, DEV_NAME); 2753 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2668 kfree(minors); 2754 kfree(minors);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 2df11cc08a46..845b0314ce3a 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -200,6 +200,7 @@ config BT_HCIUART_RTL
200 depends on BT_HCIUART 200 depends on BT_HCIUART
201 depends on BT_HCIUART_SERDEV 201 depends on BT_HCIUART_SERDEV
202 depends on GPIOLIB 202 depends on GPIOLIB
203 depends on ACPI
203 select BT_HCIUART_3WIRE 204 select BT_HCIUART_3WIRE
204 select BT_RTL 205 select BT_RTL
205 help 206 help
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index ed2a5c7cb77f..4593baff2bc9 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -144,8 +144,10 @@ static int mtk_setup_fw(struct hci_dev *hdev)
144 fw_size = fw->size; 144 fw_size = fw->size;
145 145
146 /* The size of patch header is 30 bytes, should be skip */ 146 /* The size of patch header is 30 bytes, should be skip */
147 if (fw_size < 30) 147 if (fw_size < 30) {
148 return -EINVAL; 148 err = -EINVAL;
149 goto free_fw;
150 }
149 151
150 fw_size -= 30; 152 fw_size -= 30;
151 fw_ptr += 30; 153 fw_ptr += 30;
@@ -172,8 +174,8 @@ static int mtk_setup_fw(struct hci_dev *hdev)
172 fw_ptr += dlen; 174 fw_ptr += dlen;
173 } 175 }
174 176
177free_fw:
175 release_firmware(fw); 178 release_firmware(fw);
176
177 return err; 179 return err;
178} 180}
179 181
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 963bb0309e25..ea6238ed5c0e 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -543,6 +543,8 @@ static void hci_uart_tty_close(struct tty_struct *tty)
543 } 543 }
544 clear_bit(HCI_UART_PROTO_SET, &hu->flags); 544 clear_bit(HCI_UART_PROTO_SET, &hu->flags);
545 545
546 percpu_free_rwsem(&hu->proto_lock);
547
546 kfree(hu); 548 kfree(hu);
547} 549}
548 550
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index c9bac9dc4637..e4fe954e63a9 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -498,32 +498,29 @@ static int sysc_check_registers(struct sysc *ddata)
498 498
499/** 499/**
500 * syc_ioremap - ioremap register space for the interconnect target module 500 * syc_ioremap - ioremap register space for the interconnect target module
501 * @ddata: deviec driver data 501 * @ddata: device driver data
502 * 502 *
503 * Note that the interconnect target module registers can be anywhere 503 * Note that the interconnect target module registers can be anywhere
504 * within the first child device address space. For example, SGX has 504 * within the interconnect target module range. For example, SGX has
505 * them at offset 0x1fc00 in the 32MB module address space. We just 505 * them at offset 0x1fc00 in the 32MB module address space. And cpsw
506 * what we need around the interconnect target module registers. 506 * has them at offset 0x1200 in the CPSW_WR child. Usually the
507 * the interconnect target module registers are at the beginning of
508 * the module range though.
507 */ 509 */
508static int sysc_ioremap(struct sysc *ddata) 510static int sysc_ioremap(struct sysc *ddata)
509{ 511{
510 u32 size = 0; 512 int size;
511
512 if (ddata->offsets[SYSC_SYSSTATUS] >= 0)
513 size = ddata->offsets[SYSC_SYSSTATUS];
514 else if (ddata->offsets[SYSC_SYSCONFIG] >= 0)
515 size = ddata->offsets[SYSC_SYSCONFIG];
516 else if (ddata->offsets[SYSC_REVISION] >= 0)
517 size = ddata->offsets[SYSC_REVISION];
518 else
519 return -EINVAL;
520 513
521 size &= 0xfff00; 514 size = max3(ddata->offsets[SYSC_REVISION],
522 size += SZ_256; 515 ddata->offsets[SYSC_SYSCONFIG],
516 ddata->offsets[SYSC_SYSSTATUS]);
517
518 if (size < 0 || (size + sizeof(u32)) > ddata->module_size)
519 return -EINVAL;
523 520
524 ddata->module_va = devm_ioremap(ddata->dev, 521 ddata->module_va = devm_ioremap(ddata->dev,
525 ddata->module_pa, 522 ddata->module_pa,
526 size); 523 size + sizeof(u32));
527 if (!ddata->module_va) 524 if (!ddata->module_va)
528 return -EIO; 525 return -EIO;
529 526
@@ -1224,10 +1221,10 @@ static int sysc_child_suspend_noirq(struct device *dev)
1224 if (!pm_runtime_status_suspended(dev)) { 1221 if (!pm_runtime_status_suspended(dev)) {
1225 error = pm_generic_runtime_suspend(dev); 1222 error = pm_generic_runtime_suspend(dev);
1226 if (error) { 1223 if (error) {
1227 dev_err(dev, "%s error at %i: %i\n", 1224 dev_warn(dev, "%s busy at %i: %i\n",
1228 __func__, __LINE__, error); 1225 __func__, __LINE__, error);
1229 1226
1230 return error; 1227 return 0;
1231 } 1228 }
1232 1229
1233 error = sysc_runtime_suspend(ddata->dev); 1230 error = sysc_runtime_suspend(ddata->dev);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 113fc6edb2b0..a5d5a96479bf 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2546,7 +2546,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
2546 if (!CDROM_CAN(CDC_SELECT_DISC) || 2546 if (!CDROM_CAN(CDC_SELECT_DISC) ||
2547 (arg == CDSL_CURRENT || arg == CDSL_NONE)) 2547 (arg == CDSL_CURRENT || arg == CDSL_NONE))
2548 return cdi->ops->drive_status(cdi, CDSL_CURRENT); 2548 return cdi->ops->drive_status(cdi, CDSL_CURRENT);
2549 if (((int)arg >= cdi->capacity)) 2549 if (arg >= cdi->capacity)
2550 return -EINVAL; 2550 return -EINVAL;
2551 return cdrom_slot_status(cdi, arg); 2551 return cdrom_slot_status(cdi, arg);
2552} 2552}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index ce277ee0a28a..40728491f37b 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -566,5 +566,5 @@ config RANDOM_TRUST_CPU
566 that CPU manufacturer (perhaps with the insistence or mandate 566 that CPU manufacturer (perhaps with the insistence or mandate
567 of a Nation State's intelligence or law enforcement agencies) 567 of a Nation State's intelligence or law enforcement agencies)
568 has not installed a hidden back door to compromise the CPU's 568 has not installed a hidden back door to compromise the CPU's
569 random number generation facilities. 569 random number generation facilities. This can also be configured
570 570 at boot with "random.trust_cpu=on/off".
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index a3397664f800..97d6856c9c0f 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -59,8 +59,6 @@ enum bt_states {
59 BT_STATE_RESET3, 59 BT_STATE_RESET3,
60 BT_STATE_RESTART, 60 BT_STATE_RESTART,
61 BT_STATE_PRINTME, 61 BT_STATE_PRINTME,
62 BT_STATE_CAPABILITIES_BEGIN,
63 BT_STATE_CAPABILITIES_END,
64 BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */ 62 BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */
65}; 63};
66 64
@@ -86,7 +84,6 @@ struct si_sm_data {
86 int error_retries; /* end of "common" fields */ 84 int error_retries; /* end of "common" fields */
87 int nonzero_status; /* hung BMCs stay all 0 */ 85 int nonzero_status; /* hung BMCs stay all 0 */
88 enum bt_states complete; /* to divert the state machine */ 86 enum bt_states complete; /* to divert the state machine */
89 int BT_CAP_outreqs;
90 long BT_CAP_req2rsp; 87 long BT_CAP_req2rsp;
91 int BT_CAP_retries; /* Recommended retries */ 88 int BT_CAP_retries; /* Recommended retries */
92}; 89};
@@ -137,8 +134,6 @@ static char *state2txt(unsigned char state)
137 case BT_STATE_RESET3: return("RESET3"); 134 case BT_STATE_RESET3: return("RESET3");
138 case BT_STATE_RESTART: return("RESTART"); 135 case BT_STATE_RESTART: return("RESTART");
139 case BT_STATE_LONG_BUSY: return("LONG_BUSY"); 136 case BT_STATE_LONG_BUSY: return("LONG_BUSY");
140 case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN");
141 case BT_STATE_CAPABILITIES_END: return("CAP_END");
142 } 137 }
143 return("BAD STATE"); 138 return("BAD STATE");
144} 139}
@@ -185,7 +180,6 @@ static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
185 bt->complete = BT_STATE_IDLE; /* end here */ 180 bt->complete = BT_STATE_IDLE; /* end here */
186 bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC; 181 bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC;
187 bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT; 182 bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
188 /* BT_CAP_outreqs == zero is a flag to read BT Capabilities */
189 return 3; /* We claim 3 bytes of space; ought to check SPMI table */ 183 return 3; /* We claim 3 bytes of space; ought to check SPMI table */
190} 184}
191 185
@@ -451,7 +445,7 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
451 445
452static enum si_sm_result bt_event(struct si_sm_data *bt, long time) 446static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
453{ 447{
454 unsigned char status, BT_CAP[8]; 448 unsigned char status;
455 static enum bt_states last_printed = BT_STATE_PRINTME; 449 static enum bt_states last_printed = BT_STATE_PRINTME;
456 int i; 450 int i;
457 451
@@ -504,12 +498,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
504 if (status & BT_H_BUSY) /* clear a leftover H_BUSY */ 498 if (status & BT_H_BUSY) /* clear a leftover H_BUSY */
505 BT_CONTROL(BT_H_BUSY); 499 BT_CONTROL(BT_H_BUSY);
506 500
507 bt->timeout = bt->BT_CAP_req2rsp;
508
509 /* Read BT capabilities if it hasn't been done yet */
510 if (!bt->BT_CAP_outreqs)
511 BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
512 SI_SM_CALL_WITHOUT_DELAY);
513 BT_SI_SM_RETURN(SI_SM_IDLE); 501 BT_SI_SM_RETURN(SI_SM_IDLE);
514 502
515 case BT_STATE_XACTION_START: 503 case BT_STATE_XACTION_START:
@@ -614,37 +602,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
614 BT_STATE_CHANGE(BT_STATE_XACTION_START, 602 BT_STATE_CHANGE(BT_STATE_XACTION_START,
615 SI_SM_CALL_WITH_DELAY); 603 SI_SM_CALL_WITH_DELAY);
616 604
617 /*
618 * Get BT Capabilities, using timing of upper level state machine.
619 * Set outreqs to prevent infinite loop on timeout.
620 */
621 case BT_STATE_CAPABILITIES_BEGIN:
622 bt->BT_CAP_outreqs = 1;
623 {
624 unsigned char GetBT_CAP[] = { 0x18, 0x36 };
625 bt->state = BT_STATE_IDLE;
626 bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
627 }
628 bt->complete = BT_STATE_CAPABILITIES_END;
629 BT_STATE_CHANGE(BT_STATE_XACTION_START,
630 SI_SM_CALL_WITH_DELAY);
631
632 case BT_STATE_CAPABILITIES_END:
633 i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
634 bt_init_data(bt, bt->io);
635 if ((i == 8) && !BT_CAP[2]) {
636 bt->BT_CAP_outreqs = BT_CAP[3];
637 bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
638 bt->BT_CAP_retries = BT_CAP[7];
639 } else
640 printk(KERN_WARNING "IPMI BT: using default values\n");
641 if (!bt->BT_CAP_outreqs)
642 bt->BT_CAP_outreqs = 1;
643 printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n",
644 bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
645 bt->timeout = bt->BT_CAP_req2rsp;
646 return SI_SM_CALL_WITHOUT_DELAY;
647
648 default: /* should never occur */ 605 default: /* should never occur */
649 return error_recovery(bt, 606 return error_recovery(bt,
650 status, 607 status,
@@ -655,6 +612,11 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
655 612
656static int bt_detect(struct si_sm_data *bt) 613static int bt_detect(struct si_sm_data *bt)
657{ 614{
615 unsigned char GetBT_CAP[] = { 0x18, 0x36 };
616 unsigned char BT_CAP[8];
617 enum si_sm_result smi_result;
618 int rv;
619
658 /* 620 /*
659 * It's impossible for the BT status and interrupt registers to be 621 * It's impossible for the BT status and interrupt registers to be
660 * all 1's, (assuming a properly functioning, self-initialized BMC) 622 * all 1's, (assuming a properly functioning, self-initialized BMC)
@@ -665,6 +627,48 @@ static int bt_detect(struct si_sm_data *bt)
665 if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) 627 if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
666 return 1; 628 return 1;
667 reset_flags(bt); 629 reset_flags(bt);
630
631 /*
632 * Try getting the BT capabilities here.
633 */
634 rv = bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
635 if (rv) {
636 dev_warn(bt->io->dev,
637 "Can't start capabilities transaction: %d\n", rv);
638 goto out_no_bt_cap;
639 }
640
641 smi_result = SI_SM_CALL_WITHOUT_DELAY;
642 for (;;) {
643 if (smi_result == SI_SM_CALL_WITH_DELAY ||
644 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
645 schedule_timeout_uninterruptible(1);
646 smi_result = bt_event(bt, jiffies_to_usecs(1));
647 } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
648 smi_result = bt_event(bt, 0);
649 } else
650 break;
651 }
652
653 rv = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
654 bt_init_data(bt, bt->io);
655 if (rv < 8) {
656 dev_warn(bt->io->dev, "bt cap response too short: %d\n", rv);
657 goto out_no_bt_cap;
658 }
659
660 if (BT_CAP[2]) {
661 dev_warn(bt->io->dev, "Error fetching bt cap: %x\n", BT_CAP[2]);
662out_no_bt_cap:
663 dev_warn(bt->io->dev, "using default values\n");
664 } else {
665 bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
666 bt->BT_CAP_retries = BT_CAP[7];
667 }
668
669 dev_info(bt->io->dev, "req2rsp=%ld secs retries=%d\n",
670 bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
671
668 return 0; 672 return 0;
669} 673}
670 674
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 51832b8a2c62..7fc9612070a1 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -3381,39 +3381,45 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
3381 3381
3382 rv = handlers->start_processing(send_info, intf); 3382 rv = handlers->start_processing(send_info, intf);
3383 if (rv) 3383 if (rv)
3384 goto out; 3384 goto out_err;
3385 3385
3386 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); 3386 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3387 if (rv) { 3387 if (rv) {
3388 dev_err(si_dev, "Unable to get the device id: %d\n", rv); 3388 dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3389 goto out; 3389 goto out_err_started;
3390 } 3390 }
3391 3391
3392 mutex_lock(&intf->bmc_reg_mutex); 3392 mutex_lock(&intf->bmc_reg_mutex);
3393 rv = __scan_channels(intf, &id); 3393 rv = __scan_channels(intf, &id);
3394 mutex_unlock(&intf->bmc_reg_mutex); 3394 mutex_unlock(&intf->bmc_reg_mutex);
3395 if (rv)
3396 goto out_err_bmc_reg;
3395 3397
3396 out: 3398 /*
3397 if (rv) { 3399 * Keep memory order straight for RCU readers. Make
3398 ipmi_bmc_unregister(intf); 3400 * sure everything else is committed to memory before
3399 list_del_rcu(&intf->link); 3401 * setting intf_num to mark the interface valid.
3400 mutex_unlock(&ipmi_interfaces_mutex); 3402 */
3401 synchronize_srcu(&ipmi_interfaces_srcu); 3403 smp_wmb();
3402 cleanup_srcu_struct(&intf->users_srcu); 3404 intf->intf_num = i;
3403 kref_put(&intf->refcount, intf_free); 3405 mutex_unlock(&ipmi_interfaces_mutex);
3404 } else {
3405 /*
3406 * Keep memory order straight for RCU readers. Make
3407 * sure everything else is committed to memory before
3408 * setting intf_num to mark the interface valid.
3409 */
3410 smp_wmb();
3411 intf->intf_num = i;
3412 mutex_unlock(&ipmi_interfaces_mutex);
3413 3406
3414 /* After this point the interface is legal to use. */ 3407 /* After this point the interface is legal to use. */
3415 call_smi_watchers(i, intf->si_dev); 3408 call_smi_watchers(i, intf->si_dev);
3416 } 3409
3410 return 0;
3411
3412 out_err_bmc_reg:
3413 ipmi_bmc_unregister(intf);
3414 out_err_started:
3415 if (intf->handlers->shutdown)
3416 intf->handlers->shutdown(intf->send_info);
3417 out_err:
3418 list_del_rcu(&intf->link);
3419 mutex_unlock(&ipmi_interfaces_mutex);
3420 synchronize_srcu(&ipmi_interfaces_srcu);
3421 cleanup_srcu_struct(&intf->users_srcu);
3422 kref_put(&intf->refcount, intf_free);
3417 3423
3418 return rv; 3424 return rv;
3419} 3425}
@@ -3504,7 +3510,8 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
3504 } 3510 }
3505 srcu_read_unlock(&intf->users_srcu, index); 3511 srcu_read_unlock(&intf->users_srcu, index);
3506 3512
3507 intf->handlers->shutdown(intf->send_info); 3513 if (intf->handlers->shutdown)
3514 intf->handlers->shutdown(intf->send_info);
3508 3515
3509 cleanup_smi_msgs(intf); 3516 cleanup_smi_msgs(intf);
3510 3517
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 90ec010bffbd..5faa917df1b6 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2083,18 +2083,9 @@ static int try_smi_init(struct smi_info *new_smi)
2083 si_to_str[new_smi->io.si_type]); 2083 si_to_str[new_smi->io.si_type]);
2084 2084
2085 WARN_ON(new_smi->io.dev->init_name != NULL); 2085 WARN_ON(new_smi->io.dev->init_name != NULL);
2086 kfree(init_name);
2087
2088 return 0;
2089
2090out_err:
2091 if (new_smi->intf) {
2092 ipmi_unregister_smi(new_smi->intf);
2093 new_smi->intf = NULL;
2094 }
2095 2086
2087 out_err:
2096 kfree(init_name); 2088 kfree(init_name);
2097
2098 return rv; 2089 return rv;
2099} 2090}
2100 2091
@@ -2227,6 +2218,8 @@ static void shutdown_smi(void *send_info)
2227 2218
2228 kfree(smi_info->si_sm); 2219 kfree(smi_info->si_sm);
2229 smi_info->si_sm = NULL; 2220 smi_info->si_sm = NULL;
2221
2222 smi_info->intf = NULL;
2230} 2223}
2231 2224
2232/* 2225/*
@@ -2240,10 +2233,8 @@ static void cleanup_one_si(struct smi_info *smi_info)
2240 2233
2241 list_del(&smi_info->link); 2234 list_del(&smi_info->link);
2242 2235
2243 if (smi_info->intf) { 2236 if (smi_info->intf)
2244 ipmi_unregister_smi(smi_info->intf); 2237 ipmi_unregister_smi(smi_info->intf);
2245 smi_info->intf = NULL;
2246 }
2247 2238
2248 if (smi_info->pdev) { 2239 if (smi_info->pdev) {
2249 if (smi_info->pdev_registered) 2240 if (smi_info->pdev_registered)
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 18e4650c233b..29e67a80fb20 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -181,6 +181,8 @@ struct ssif_addr_info {
181 struct device *dev; 181 struct device *dev;
182 struct i2c_client *client; 182 struct i2c_client *client;
183 183
184 struct i2c_client *added_client;
185
184 struct mutex clients_mutex; 186 struct mutex clients_mutex;
185 struct list_head clients; 187 struct list_head clients;
186 188
@@ -1214,18 +1216,11 @@ static void shutdown_ssif(void *send_info)
1214 complete(&ssif_info->wake_thread); 1216 complete(&ssif_info->wake_thread);
1215 kthread_stop(ssif_info->thread); 1217 kthread_stop(ssif_info->thread);
1216 } 1218 }
1217
1218 /*
1219 * No message can be outstanding now, we have removed the
1220 * upper layer and it permitted us to do so.
1221 */
1222 kfree(ssif_info);
1223} 1219}
1224 1220
1225static int ssif_remove(struct i2c_client *client) 1221static int ssif_remove(struct i2c_client *client)
1226{ 1222{
1227 struct ssif_info *ssif_info = i2c_get_clientdata(client); 1223 struct ssif_info *ssif_info = i2c_get_clientdata(client);
1228 struct ipmi_smi *intf;
1229 struct ssif_addr_info *addr_info; 1224 struct ssif_addr_info *addr_info;
1230 1225
1231 if (!ssif_info) 1226 if (!ssif_info)
@@ -1235,9 +1230,7 @@ static int ssif_remove(struct i2c_client *client)
1235 * After this point, we won't deliver anything asychronously 1230 * After this point, we won't deliver anything asychronously
1236 * to the message handler. We can unregister ourself. 1231 * to the message handler. We can unregister ourself.
1237 */ 1232 */
1238 intf = ssif_info->intf; 1233 ipmi_unregister_smi(ssif_info->intf);
1239 ssif_info->intf = NULL;
1240 ipmi_unregister_smi(intf);
1241 1234
1242 list_for_each_entry(addr_info, &ssif_infos, link) { 1235 list_for_each_entry(addr_info, &ssif_infos, link) {
1243 if (addr_info->client == client) { 1236 if (addr_info->client == client) {
@@ -1246,6 +1239,8 @@ static int ssif_remove(struct i2c_client *client)
1246 } 1239 }
1247 } 1240 }
1248 1241
1242 kfree(ssif_info);
1243
1249 return 0; 1244 return 0;
1250} 1245}
1251 1246
@@ -1648,15 +1643,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
1648 1643
1649 out: 1644 out:
1650 if (rv) { 1645 if (rv) {
1651 /* 1646 if (addr_info)
1652 * Note that if addr_info->client is assigned, we 1647 addr_info->client = NULL;
1653 * leave it. The i2c client hangs around even if we 1648
1654 * return a failure here, and the failure here is not
1655 * propagated back to the i2c code. This seems to be
1656 * design intent, strange as it may be. But if we
1657 * don't leave it, ssif_platform_remove will not remove
1658 * the client like it should.
1659 */
1660 dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv); 1649 dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv);
1661 kfree(ssif_info); 1650 kfree(ssif_info);
1662 } 1651 }
@@ -1676,7 +1665,8 @@ static int ssif_adapter_handler(struct device *adev, void *opaque)
1676 if (adev->type != &i2c_adapter_type) 1665 if (adev->type != &i2c_adapter_type)
1677 return 0; 1666 return 0;
1678 1667
1679 i2c_new_device(to_i2c_adapter(adev), &addr_info->binfo); 1668 addr_info->added_client = i2c_new_device(to_i2c_adapter(adev),
1669 &addr_info->binfo);
1680 1670
1681 if (!addr_info->adapter_name) 1671 if (!addr_info->adapter_name)
1682 return 1; /* Only try the first I2C adapter by default. */ 1672 return 1; /* Only try the first I2C adapter by default. */
@@ -1849,7 +1839,7 @@ static int ssif_platform_remove(struct platform_device *dev)
1849 return 0; 1839 return 0;
1850 1840
1851 mutex_lock(&ssif_infos_mutex); 1841 mutex_lock(&ssif_infos_mutex);
1852 i2c_unregister_device(addr_info->client); 1842 i2c_unregister_device(addr_info->added_client);
1853 1843
1854 list_del(&addr_info->link); 1844 list_del(&addr_info->link);
1855 kfree(addr_info); 1845 kfree(addr_info);
diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c
index bb882ab161fe..e6124bd548df 100644
--- a/drivers/char/ipmi/kcs_bmc.c
+++ b/drivers/char/ipmi/kcs_bmc.c
@@ -16,6 +16,8 @@
16 16
17#include "kcs_bmc.h" 17#include "kcs_bmc.h"
18 18
19#define DEVICE_NAME "ipmi-kcs"
20
19#define KCS_MSG_BUFSIZ 1000 21#define KCS_MSG_BUFSIZ 1000
20 22
21#define KCS_ZERO_DATA 0 23#define KCS_ZERO_DATA 0
@@ -429,8 +431,6 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
429 if (!kcs_bmc) 431 if (!kcs_bmc)
430 return NULL; 432 return NULL;
431 433
432 dev_set_name(dev, "ipmi-kcs%u", channel);
433
434 spin_lock_init(&kcs_bmc->lock); 434 spin_lock_init(&kcs_bmc->lock);
435 kcs_bmc->channel = channel; 435 kcs_bmc->channel = channel;
436 436
@@ -444,7 +444,8 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
444 return NULL; 444 return NULL;
445 445
446 kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR; 446 kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
447 kcs_bmc->miscdev.name = dev_name(dev); 447 kcs_bmc->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%u",
448 DEVICE_NAME, channel);
448 kcs_bmc->miscdev.fops = &kcs_bmc_fops; 449 kcs_bmc->miscdev.fops = &kcs_bmc_fops;
449 450
450 return kcs_bmc; 451 return kcs_bmc;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index bf5f99fc36f1..c75b6cdf0053 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -779,6 +779,13 @@ static struct crng_state **crng_node_pool __read_mostly;
779 779
780static void invalidate_batched_entropy(void); 780static void invalidate_batched_entropy(void);
781 781
782static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
783static int __init parse_trust_cpu(char *arg)
784{
785 return kstrtobool(arg, &trust_cpu);
786}
787early_param("random.trust_cpu", parse_trust_cpu);
788
782static void crng_initialize(struct crng_state *crng) 789static void crng_initialize(struct crng_state *crng)
783{ 790{
784 int i; 791 int i;
@@ -799,12 +806,10 @@ static void crng_initialize(struct crng_state *crng)
799 } 806 }
800 crng->state[i] ^= rv; 807 crng->state[i] ^= rv;
801 } 808 }
802#ifdef CONFIG_RANDOM_TRUST_CPU 809 if (trust_cpu && arch_init) {
803 if (arch_init) {
804 crng_init = 2; 810 crng_init = 2;
805 pr_notice("random: crng done (trusting CPU's manufacturer)\n"); 811 pr_notice("random: crng done (trusting CPU's manufacturer)\n");
806 } 812 }
807#endif
808 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; 813 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
809} 814}
810 815
diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c
index 740af90a9508..c5edf8f2fd19 100644
--- a/drivers/clk/clk-npcm7xx.c
+++ b/drivers/clk/clk-npcm7xx.c
@@ -558,8 +558,8 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np)
558 if (!clk_base) 558 if (!clk_base)
559 goto npcm7xx_init_error; 559 goto npcm7xx_init_error;
560 560
561 npcm7xx_clk_data = kzalloc(sizeof(*npcm7xx_clk_data->hws) * 561 npcm7xx_clk_data = kzalloc(struct_size(npcm7xx_clk_data, hws,
562 NPCM7XX_NUM_CLOCKS + sizeof(npcm7xx_clk_data), GFP_KERNEL); 562 NPCM7XX_NUM_CLOCKS), GFP_KERNEL);
563 if (!npcm7xx_clk_data) 563 if (!npcm7xx_clk_data)
564 goto npcm7xx_init_np_err; 564 goto npcm7xx_init_np_err;
565 565
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index 08ef69945ffb..d977193842df 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -55,6 +55,7 @@ struct clk_plt_data {
55 u8 nparents; 55 u8 nparents;
56 struct clk_plt *clks[PMC_CLK_NUM]; 56 struct clk_plt *clks[PMC_CLK_NUM];
57 struct clk_lookup *mclk_lookup; 57 struct clk_lookup *mclk_lookup;
58 struct clk_lookup *ether_clk_lookup;
58}; 59};
59 60
60/* Return an index in parent table */ 61/* Return an index in parent table */
@@ -186,13 +187,6 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
186 pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; 187 pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
187 spin_lock_init(&pclk->lock); 188 spin_lock_init(&pclk->lock);
188 189
189 /*
190 * If the clock was already enabled by the firmware mark it as critical
191 * to avoid it being gated by the clock framework if no driver owns it.
192 */
193 if (plt_clk_is_enabled(&pclk->hw))
194 init.flags |= CLK_IS_CRITICAL;
195
196 ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); 190 ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
197 if (ret) { 191 if (ret) {
198 pclk = ERR_PTR(ret); 192 pclk = ERR_PTR(ret);
@@ -351,11 +345,20 @@ static int plt_clk_probe(struct platform_device *pdev)
351 goto err_unreg_clk_plt; 345 goto err_unreg_clk_plt;
352 } 346 }
353 347
348 data->ether_clk_lookup = clkdev_hw_create(&data->clks[4]->hw,
349 "ether_clk", NULL);
350 if (!data->ether_clk_lookup) {
351 err = -ENOMEM;
352 goto err_drop_mclk;
353 }
354
354 plt_clk_free_parent_names_loop(parent_names, data->nparents); 355 plt_clk_free_parent_names_loop(parent_names, data->nparents);
355 356
356 platform_set_drvdata(pdev, data); 357 platform_set_drvdata(pdev, data);
357 return 0; 358 return 0;
358 359
360err_drop_mclk:
361 clkdev_drop(data->mclk_lookup);
359err_unreg_clk_plt: 362err_unreg_clk_plt:
360 plt_clk_unregister_loop(data, i); 363 plt_clk_unregister_loop(data, i);
361 plt_clk_unregister_parents(data); 364 plt_clk_unregister_parents(data);
@@ -369,6 +372,7 @@ static int plt_clk_remove(struct platform_device *pdev)
369 372
370 data = platform_get_drvdata(pdev); 373 data = platform_get_drvdata(pdev);
371 374
375 clkdev_drop(data->ether_clk_lookup);
372 clkdev_drop(data->mclk_lookup); 376 clkdev_drop(data->mclk_lookup);
373 plt_clk_unregister_loop(data, PMC_CLK_NUM); 377 plt_clk_unregister_loop(data, PMC_CLK_NUM);
374 plt_clk_unregister_parents(data); 378 plt_clk_unregister_parents(data);
diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c
index fb62f3938008..3a0996f2d556 100644
--- a/drivers/clk/x86/clk-st.c
+++ b/drivers/clk/x86/clk-st.c
@@ -46,7 +46,7 @@ static int st_clk_probe(struct platform_device *pdev)
46 clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents), 46 clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents),
47 0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL); 47 0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL);
48 48
49 clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_25M]->clk); 49 clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_48M]->clk);
50 50
51 hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux", 51 hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux",
52 0, st_data->base + MISCCLKCNTL1, OSCCLKENB, 52 0, st_data->base + MISCCLKCNTL1, OSCCLKENB,
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 110483f0e3fb..e26a40971b26 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -379,9 +379,20 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
379 if (idx == -1) 379 if (idx == -1)
380 idx = i; /* first enabled state */ 380 idx = i; /* first enabled state */
381 if (s->target_residency > data->predicted_us) { 381 if (s->target_residency > data->predicted_us) {
382 if (!tick_nohz_tick_stopped()) 382 if (data->predicted_us < TICK_USEC)
383 break; 383 break;
384 384
385 if (!tick_nohz_tick_stopped()) {
386 /*
387 * If the state selected so far is shallow,
388 * waking up early won't hurt, so retain the
389 * tick in that case and let the governor run
390 * again in the next iteration of the loop.
391 */
392 expected_interval = drv->states[idx].target_residency;
393 break;
394 }
395
385 /* 396 /*
386 * If the state selected so far is shallow and this 397 * If the state selected so far is shallow and this
387 * state's target residency matches the time till the 398 * state's target residency matches the time till the
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 6e61cc93c2b0..d7aa7d7ff102 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -679,10 +679,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
679 int ret = 0; 679 int ret = 0;
680 680
681 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { 681 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
682 crypto_ablkcipher_set_flags(ablkcipher,
683 CRYPTO_TFM_RES_BAD_KEY_LEN);
684 dev_err(jrdev, "key size mismatch\n"); 682 dev_err(jrdev, "key size mismatch\n");
685 return -EINVAL; 683 goto badkey;
686 } 684 }
687 685
688 ctx->cdata.keylen = keylen; 686 ctx->cdata.keylen = keylen;
@@ -715,7 +713,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
715 return ret; 713 return ret;
716badkey: 714badkey:
717 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 715 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
718 return 0; 716 return -EINVAL;
719} 717}
720 718
721/* 719/*
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 578ea63a3109..f26d62e5533a 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
71 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); 71 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
72 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); 72 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
73 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); 73 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
74 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); 74 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
75 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); 75 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
76} 76}
77 77
78static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, 78static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
@@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
90 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); 90 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
91 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); 91 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
92 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); 92 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
93 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); 93 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
94 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); 94 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
95} 95}
96 96
97/* RSA Job Completion handler */ 97/* RSA Job Completion handler */
@@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
417 goto unmap_p; 417 goto unmap_p;
418 } 418 }
419 419
420 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); 420 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
421 if (dma_mapping_error(dev, pdb->tmp1_dma)) { 421 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
422 dev_err(dev, "Unable to map RSA tmp1 memory\n"); 422 dev_err(dev, "Unable to map RSA tmp1 memory\n");
423 goto unmap_q; 423 goto unmap_q;
424 } 424 }
425 425
426 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); 426 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
427 if (dma_mapping_error(dev, pdb->tmp2_dma)) { 427 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
428 dev_err(dev, "Unable to map RSA tmp2 memory\n"); 428 dev_err(dev, "Unable to map RSA tmp2 memory\n");
429 goto unmap_tmp1; 429 goto unmap_tmp1;
@@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
451 return 0; 451 return 0;
452 452
453unmap_tmp1: 453unmap_tmp1:
454 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); 454 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
455unmap_q: 455unmap_q:
456 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); 456 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
457unmap_p: 457unmap_p:
@@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
504 goto unmap_dq; 504 goto unmap_dq;
505 } 505 }
506 506
507 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); 507 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
508 if (dma_mapping_error(dev, pdb->tmp1_dma)) { 508 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
509 dev_err(dev, "Unable to map RSA tmp1 memory\n"); 509 dev_err(dev, "Unable to map RSA tmp1 memory\n");
510 goto unmap_qinv; 510 goto unmap_qinv;
511 } 511 }
512 512
513 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); 513 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
514 if (dma_mapping_error(dev, pdb->tmp2_dma)) { 514 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
515 dev_err(dev, "Unable to map RSA tmp2 memory\n"); 515 dev_err(dev, "Unable to map RSA tmp2 memory\n");
516 goto unmap_tmp1; 516 goto unmap_tmp1;
@@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
538 return 0; 538 return 0;
539 539
540unmap_tmp1: 540unmap_tmp1:
541 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); 541 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
542unmap_qinv: 542unmap_qinv:
543 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); 543 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
544unmap_dq: 544unmap_dq:
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index f4f258075b89..acdd72016ffe 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg)
190 BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); 190 BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
191 191
192 /* Unmap just-run descriptor so we can post-process */ 192 /* Unmap just-run descriptor so we can post-process */
193 dma_unmap_single(dev, jrp->outring[hw_idx].desc, 193 dma_unmap_single(dev,
194 caam_dma_to_cpu(jrp->outring[hw_idx].desc),
194 jrp->entinfo[sw_idx].desc_size, 195 jrp->entinfo[sw_idx].desc_size,
195 DMA_TO_DEVICE); 196 DMA_TO_DEVICE);
196 197
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h
index 9a476bb6d4c7..af596455b420 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_dev.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
@@ -35,6 +35,7 @@ struct nitrox_cmdq {
35 /* requests in backlog queues */ 35 /* requests in backlog queues */
36 atomic_t backlog_count; 36 atomic_t backlog_count;
37 37
38 int write_idx;
38 /* command size 32B/64B */ 39 /* command size 32B/64B */
39 u8 instr_size; 40 u8 instr_size;
40 u8 qno; 41 u8 qno;
@@ -87,7 +88,7 @@ struct nitrox_bh {
87 struct bh_data *slc; 88 struct bh_data *slc;
88}; 89};
89 90
90/* NITROX-5 driver state */ 91/* NITROX-V driver state */
91#define NITROX_UCODE_LOADED 0 92#define NITROX_UCODE_LOADED 0
92#define NITROX_READY 1 93#define NITROX_READY 1
93 94
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index ebe267379ac9..4d31df07777f 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq)
36 cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); 36 cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
37 cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); 37 cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
38 cmdq->qsize = (qsize + PKT_IN_ALIGN); 38 cmdq->qsize = (qsize + PKT_IN_ALIGN);
39 cmdq->write_idx = 0;
39 40
40 spin_lock_init(&cmdq->response_lock); 41 spin_lock_init(&cmdq->response_lock);
41 spin_lock_init(&cmdq->cmdq_lock); 42 spin_lock_init(&cmdq->cmdq_lock);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index deaefd532aaa..4a362fc22f62 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -42,6 +42,16 @@
42 * Invalid flag options in AES-CCM IV. 42 * Invalid flag options in AES-CCM IV.
43 */ 43 */
44 44
45static inline int incr_index(int index, int count, int max)
46{
47 if ((index + count) >= max)
48 index = index + count - max;
49 else
50 index += count;
51
52 return index;
53}
54
45/** 55/**
46 * dma_free_sglist - unmap and free the sg lists. 56 * dma_free_sglist - unmap and free the sg lists.
47 * @ndev: N5 device 57 * @ndev: N5 device
@@ -426,30 +436,29 @@ static void post_se_instr(struct nitrox_softreq *sr,
426 struct nitrox_cmdq *cmdq) 436 struct nitrox_cmdq *cmdq)
427{ 437{
428 struct nitrox_device *ndev = sr->ndev; 438 struct nitrox_device *ndev = sr->ndev;
429 union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell; 439 int idx;
430 u64 offset;
431 u8 *ent; 440 u8 *ent;
432 441
433 spin_lock_bh(&cmdq->cmdq_lock); 442 spin_lock_bh(&cmdq->cmdq_lock);
434 443
435 /* get the next write offset */ 444 idx = cmdq->write_idx;
436 offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno);
437 pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset);
438 /* copy the instruction */ 445 /* copy the instruction */
439 ent = cmdq->head + pkt_in_baoff_dbell.s.aoff; 446 ent = cmdq->head + (idx * cmdq->instr_size);
440 memcpy(ent, &sr->instr, cmdq->instr_size); 447 memcpy(ent, &sr->instr, cmdq->instr_size);
441 /* flush the command queue updates */
442 dma_wmb();
443 448
444 sr->tstamp = jiffies;
445 atomic_set(&sr->status, REQ_POSTED); 449 atomic_set(&sr->status, REQ_POSTED);
446 response_list_add(sr, cmdq); 450 response_list_add(sr, cmdq);
451 sr->tstamp = jiffies;
452 /* flush the command queue updates */
453 dma_wmb();
447 454
448 /* Ring doorbell with count 1 */ 455 /* Ring doorbell with count 1 */
449 writeq(1, cmdq->dbell_csr_addr); 456 writeq(1, cmdq->dbell_csr_addr);
450 /* orders the doorbell rings */ 457 /* orders the doorbell rings */
451 mmiowb(); 458 mmiowb();
452 459
460 cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
461
453 spin_unlock_bh(&cmdq->cmdq_lock); 462 spin_unlock_bh(&cmdq->cmdq_lock);
454} 463}
455 464
@@ -459,6 +468,9 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
459 struct nitrox_softreq *sr, *tmp; 468 struct nitrox_softreq *sr, *tmp;
460 int ret = 0; 469 int ret = 0;
461 470
471 if (!atomic_read(&cmdq->backlog_count))
472 return 0;
473
462 spin_lock_bh(&cmdq->backlog_lock); 474 spin_lock_bh(&cmdq->backlog_lock);
463 475
464 list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { 476 list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
@@ -466,7 +478,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
466 478
467 /* submit until space available */ 479 /* submit until space available */
468 if (unlikely(cmdq_full(cmdq, ndev->qlen))) { 480 if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
469 ret = -EBUSY; 481 ret = -ENOSPC;
470 break; 482 break;
471 } 483 }
472 /* delete from backlog list */ 484 /* delete from backlog list */
@@ -491,23 +503,20 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
491{ 503{
492 struct nitrox_cmdq *cmdq = sr->cmdq; 504 struct nitrox_cmdq *cmdq = sr->cmdq;
493 struct nitrox_device *ndev = sr->ndev; 505 struct nitrox_device *ndev = sr->ndev;
494 int ret = -EBUSY; 506
507 /* try to post backlog requests */
508 post_backlog_cmds(cmdq);
495 509
496 if (unlikely(cmdq_full(cmdq, ndev->qlen))) { 510 if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
497 if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 511 if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
498 return -EAGAIN; 512 return -ENOSPC;
499 513 /* add to backlog list */
500 backlog_list_add(sr, cmdq); 514 backlog_list_add(sr, cmdq);
501 } else { 515 return -EBUSY;
502 ret = post_backlog_cmds(cmdq);
503 if (ret) {
504 backlog_list_add(sr, cmdq);
505 return ret;
506 }
507 post_se_instr(sr, cmdq);
508 ret = -EINPROGRESS;
509 } 516 }
510 return ret; 517 post_se_instr(sr, cmdq);
518
519 return -EINPROGRESS;
511} 520}
512 521
513/** 522/**
@@ -624,11 +633,9 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
624 */ 633 */
625 sr->instr.fdata[0] = *((u64 *)&req->gph); 634 sr->instr.fdata[0] = *((u64 *)&req->gph);
626 sr->instr.fdata[1] = 0; 635 sr->instr.fdata[1] = 0;
627 /* flush the soft_req changes before posting the cmd */
628 wmb();
629 636
630 ret = nitrox_enqueue_request(sr); 637 ret = nitrox_enqueue_request(sr);
631 if (ret == -EAGAIN) 638 if (ret == -ENOSPC)
632 goto send_fail; 639 goto send_fail;
633 640
634 return ret; 641 return ret;
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h
index a53a0e6ba024..7725b6ee14ef 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -96,6 +96,10 @@ enum csk_flags {
96 CSK_CONN_INLINE, /* Connection on HW */ 96 CSK_CONN_INLINE, /* Connection on HW */
97}; 97};
98 98
99enum chtls_cdev_state {
100 CHTLS_CDEV_STATE_UP = 1
101};
102
99struct listen_ctx { 103struct listen_ctx {
100 struct sock *lsk; 104 struct sock *lsk;
101 struct chtls_dev *cdev; 105 struct chtls_dev *cdev;
@@ -146,6 +150,7 @@ struct chtls_dev {
146 unsigned int send_page_order; 150 unsigned int send_page_order;
147 int max_host_sndbuf; 151 int max_host_sndbuf;
148 struct key_map kmap; 152 struct key_map kmap;
153 unsigned int cdev_state;
149}; 154};
150 155
151struct chtls_hws { 156struct chtls_hws {
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index 9b07f9165658..f59b044ebd25 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -160,6 +160,7 @@ static void chtls_register_dev(struct chtls_dev *cdev)
160 tlsdev->hash = chtls_create_hash; 160 tlsdev->hash = chtls_create_hash;
161 tlsdev->unhash = chtls_destroy_hash; 161 tlsdev->unhash = chtls_destroy_hash;
162 tls_register_device(&cdev->tlsdev); 162 tls_register_device(&cdev->tlsdev);
163 cdev->cdev_state = CHTLS_CDEV_STATE_UP;
163} 164}
164 165
165static void chtls_unregister_dev(struct chtls_dev *cdev) 166static void chtls_unregister_dev(struct chtls_dev *cdev)
@@ -281,8 +282,10 @@ static void chtls_free_all_uld(void)
281 struct chtls_dev *cdev, *tmp; 282 struct chtls_dev *cdev, *tmp;
282 283
283 mutex_lock(&cdev_mutex); 284 mutex_lock(&cdev_mutex);
284 list_for_each_entry_safe(cdev, tmp, &cdev_list, list) 285 list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
285 chtls_free_uld(cdev); 286 if (cdev->cdev_state == CHTLS_CDEV_STATE_UP)
287 chtls_free_uld(cdev);
288 }
286 mutex_unlock(&cdev_mutex); 289 mutex_unlock(&cdev_mutex);
287} 290}
288 291
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index 5285ece4f33a..b71895871be3 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
107 ret = crypto_skcipher_encrypt(req); 107 ret = crypto_skcipher_encrypt(req);
108 skcipher_request_zero(req); 108 skcipher_request_zero(req);
109 } else { 109 } else {
110 preempt_disable();
111 pagefault_disable();
112 enable_kernel_vsx();
113
114 blkcipher_walk_init(&walk, dst, src, nbytes); 110 blkcipher_walk_init(&walk, dst, src, nbytes);
115 ret = blkcipher_walk_virt(desc, &walk); 111 ret = blkcipher_walk_virt(desc, &walk);
116 while ((nbytes = walk.nbytes)) { 112 while ((nbytes = walk.nbytes)) {
113 preempt_disable();
114 pagefault_disable();
115 enable_kernel_vsx();
117 aes_p8_cbc_encrypt(walk.src.virt.addr, 116 aes_p8_cbc_encrypt(walk.src.virt.addr,
118 walk.dst.virt.addr, 117 walk.dst.virt.addr,
119 nbytes & AES_BLOCK_MASK, 118 nbytes & AES_BLOCK_MASK,
120 &ctx->enc_key, walk.iv, 1); 119 &ctx->enc_key, walk.iv, 1);
120 disable_kernel_vsx();
121 pagefault_enable();
122 preempt_enable();
123
121 nbytes &= AES_BLOCK_SIZE - 1; 124 nbytes &= AES_BLOCK_SIZE - 1;
122 ret = blkcipher_walk_done(desc, &walk, nbytes); 125 ret = blkcipher_walk_done(desc, &walk, nbytes);
123 } 126 }
124
125 disable_kernel_vsx();
126 pagefault_enable();
127 preempt_enable();
128 } 127 }
129 128
130 return ret; 129 return ret;
@@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
147 ret = crypto_skcipher_decrypt(req); 146 ret = crypto_skcipher_decrypt(req);
148 skcipher_request_zero(req); 147 skcipher_request_zero(req);
149 } else { 148 } else {
150 preempt_disable();
151 pagefault_disable();
152 enable_kernel_vsx();
153
154 blkcipher_walk_init(&walk, dst, src, nbytes); 149 blkcipher_walk_init(&walk, dst, src, nbytes);
155 ret = blkcipher_walk_virt(desc, &walk); 150 ret = blkcipher_walk_virt(desc, &walk);
156 while ((nbytes = walk.nbytes)) { 151 while ((nbytes = walk.nbytes)) {
152 preempt_disable();
153 pagefault_disable();
154 enable_kernel_vsx();
157 aes_p8_cbc_encrypt(walk.src.virt.addr, 155 aes_p8_cbc_encrypt(walk.src.virt.addr,
158 walk.dst.virt.addr, 156 walk.dst.virt.addr,
159 nbytes & AES_BLOCK_MASK, 157 nbytes & AES_BLOCK_MASK,
160 &ctx->dec_key, walk.iv, 0); 158 &ctx->dec_key, walk.iv, 0);
159 disable_kernel_vsx();
160 pagefault_enable();
161 preempt_enable();
162
161 nbytes &= AES_BLOCK_SIZE - 1; 163 nbytes &= AES_BLOCK_SIZE - 1;
162 ret = blkcipher_walk_done(desc, &walk, nbytes); 164 ret = blkcipher_walk_done(desc, &walk, nbytes);
163 } 165 }
164
165 disable_kernel_vsx();
166 pagefault_enable();
167 preempt_enable();
168 } 166 }
169 167
170 return ret; 168 return ret;
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index 8bd9aff0f55f..e9954a7d4694 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
116 ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); 116 ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
117 skcipher_request_zero(req); 117 skcipher_request_zero(req);
118 } else { 118 } else {
119 blkcipher_walk_init(&walk, dst, src, nbytes);
120
121 ret = blkcipher_walk_virt(desc, &walk);
122
119 preempt_disable(); 123 preempt_disable();
120 pagefault_disable(); 124 pagefault_disable();
121 enable_kernel_vsx(); 125 enable_kernel_vsx();
122 126
123 blkcipher_walk_init(&walk, dst, src, nbytes);
124
125 ret = blkcipher_walk_virt(desc, &walk);
126 iv = walk.iv; 127 iv = walk.iv;
127 memset(tweak, 0, AES_BLOCK_SIZE); 128 memset(tweak, 0, AES_BLOCK_SIZE);
128 aes_p8_encrypt(iv, tweak, &ctx->tweak_key); 129 aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
129 130
131 disable_kernel_vsx();
132 pagefault_enable();
133 preempt_enable();
134
130 while ((nbytes = walk.nbytes)) { 135 while ((nbytes = walk.nbytes)) {
136 preempt_disable();
137 pagefault_disable();
138 enable_kernel_vsx();
131 if (enc) 139 if (enc)
132 aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, 140 aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
133 nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); 141 nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
134 else 142 else
135 aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, 143 aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
136 nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); 144 nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
145 disable_kernel_vsx();
146 pagefault_enable();
147 preempt_enable();
137 148
138 nbytes &= AES_BLOCK_SIZE - 1; 149 nbytes &= AES_BLOCK_SIZE - 1;
139 ret = blkcipher_walk_done(desc, &walk, nbytes); 150 ret = blkcipher_walk_done(desc, &walk, nbytes);
140 } 151 }
141
142 disable_kernel_vsx();
143 pagefault_enable();
144 preempt_enable();
145 } 152 }
146 return ret; 153 return ret;
147} 154}
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 6fd46083e629..bbe4d72ca105 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -392,7 +392,8 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
392{ 392{
393 struct file *filp = vmf->vma->vm_file; 393 struct file *filp = vmf->vma->vm_file;
394 unsigned long fault_size; 394 unsigned long fault_size;
395 int rc, id; 395 vm_fault_t rc = VM_FAULT_SIGBUS;
396 int id;
396 pfn_t pfn; 397 pfn_t pfn;
397 struct dev_dax *dev_dax = filp->private_data; 398 struct dev_dax *dev_dax = filp->private_data;
398 399
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index b76cb17d879c..adfd316db1a8 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -639,7 +639,7 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
639 int ret; 639 int ret;
640 struct device *dev = &mbdev->dev; 640 struct device *dev = &mbdev->dev;
641 641
642 mic_dma_dev = kzalloc(sizeof(*mic_dma_dev), GFP_KERNEL); 642 mic_dma_dev = devm_kzalloc(dev, sizeof(*mic_dma_dev), GFP_KERNEL);
643 if (!mic_dma_dev) { 643 if (!mic_dma_dev) {
644 ret = -ENOMEM; 644 ret = -ENOMEM;
645 goto alloc_error; 645 goto alloc_error;
@@ -664,7 +664,6 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
664reg_error: 664reg_error:
665 mic_dma_uninit(mic_dma_dev); 665 mic_dma_uninit(mic_dma_dev);
666init_error: 666init_error:
667 kfree(mic_dma_dev);
668 mic_dma_dev = NULL; 667 mic_dma_dev = NULL;
669alloc_error: 668alloc_error:
670 dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret); 669 dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret);
@@ -674,7 +673,6 @@ alloc_error:
674static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev) 673static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev)
675{ 674{
676 mic_dma_uninit(mic_dma_dev); 675 mic_dma_uninit(mic_dma_dev);
677 kfree(mic_dma_dev);
678} 676}
679 677
680/* DEBUGFS CODE */ 678/* DEBUGFS CODE */
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 721e6c57beae..64342944d917 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -166,7 +166,13 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
166 le32_to_cpu(attr->sustained_freq_khz); 166 le32_to_cpu(attr->sustained_freq_khz);
167 dom_info->sustained_perf_level = 167 dom_info->sustained_perf_level =
168 le32_to_cpu(attr->sustained_perf_level); 168 le32_to_cpu(attr->sustained_perf_level);
169 dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) / 169 if (!dom_info->sustained_freq_khz ||
170 !dom_info->sustained_perf_level)
171 /* CPUFreq converts to kHz, hence default 1000 */
172 dom_info->mult_factor = 1000;
173 else
174 dom_info->mult_factor =
175 (dom_info->sustained_freq_khz * 1000) /
170 dom_info->sustained_perf_level; 176 dom_info->sustained_perf_level;
171 memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); 177 memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
172 } 178 }
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index fc9fd2d0482f..0b840531ef33 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -420,7 +420,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
420 /* Create region for each port */ 420 /* Create region for each port */
421 fme_region = dfl_fme_create_region(pdata, mgr, 421 fme_region = dfl_fme_create_region(pdata, mgr,
422 fme_br->br, i); 422 fme_br->br, i);
423 if (!fme_region) { 423 if (IS_ERR(fme_region)) {
424 ret = PTR_ERR(fme_region); 424 ret = PTR_ERR(fme_region);
425 goto destroy_region; 425 goto destroy_region;
426 } 426 }
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index 3530ccd17e04..da9781a2ef4a 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -41,6 +41,8 @@ struct adp5588_gpio {
41 uint8_t int_en[3]; 41 uint8_t int_en[3];
42 uint8_t irq_mask[3]; 42 uint8_t irq_mask[3];
43 uint8_t irq_stat[3]; 43 uint8_t irq_stat[3];
44 uint8_t int_input_en[3];
45 uint8_t int_lvl_cached[3];
44}; 46};
45 47
46static int adp5588_gpio_read(struct i2c_client *client, u8 reg) 48static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
@@ -173,12 +175,28 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
173 struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); 175 struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
174 int i; 176 int i;
175 177
176 for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) 178 for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
179 if (dev->int_input_en[i]) {
180 mutex_lock(&dev->lock);
181 dev->dir[i] &= ~dev->int_input_en[i];
182 dev->int_input_en[i] = 0;
183 adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
184 dev->dir[i]);
185 mutex_unlock(&dev->lock);
186 }
187
188 if (dev->int_lvl_cached[i] != dev->int_lvl[i]) {
189 dev->int_lvl_cached[i] = dev->int_lvl[i];
190 adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i,
191 dev->int_lvl[i]);
192 }
193
177 if (dev->int_en[i] ^ dev->irq_mask[i]) { 194 if (dev->int_en[i] ^ dev->irq_mask[i]) {
178 dev->int_en[i] = dev->irq_mask[i]; 195 dev->int_en[i] = dev->irq_mask[i];
179 adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i, 196 adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
180 dev->int_en[i]); 197 dev->int_en[i]);
181 } 198 }
199 }
182 200
183 mutex_unlock(&dev->irq_lock); 201 mutex_unlock(&dev->irq_lock);
184} 202}
@@ -221,9 +239,7 @@ static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
221 else 239 else
222 return -EINVAL; 240 return -EINVAL;
223 241
224 adp5588_gpio_direction_input(&dev->gpio_chip, gpio); 242 dev->int_input_en[bank] |= bit;
225 adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
226 dev->int_lvl[bank]);
227 243
228 return 0; 244 return 0;
229} 245}
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 28da700f5f52..044888fd96a1 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -728,6 +728,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
728out_unregister: 728out_unregister:
729 dwapb_gpio_unregister(gpio); 729 dwapb_gpio_unregister(gpio);
730 dwapb_irq_teardown(gpio); 730 dwapb_irq_teardown(gpio);
731 clk_disable_unprepare(gpio->clk);
731 732
732 return err; 733 return err;
733} 734}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index c48ed9d89ff5..8b9d7e42c600 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -25,7 +25,6 @@
25 25
26struct acpi_gpio_event { 26struct acpi_gpio_event {
27 struct list_head node; 27 struct list_head node;
28 struct list_head initial_sync_list;
29 acpi_handle handle; 28 acpi_handle handle;
30 unsigned int pin; 29 unsigned int pin;
31 unsigned int irq; 30 unsigned int irq;
@@ -49,10 +48,19 @@ struct acpi_gpio_chip {
49 struct mutex conn_lock; 48 struct mutex conn_lock;
50 struct gpio_chip *chip; 49 struct gpio_chip *chip;
51 struct list_head events; 50 struct list_head events;
51 struct list_head deferred_req_irqs_list_entry;
52}; 52};
53 53
54static LIST_HEAD(acpi_gpio_initial_sync_list); 54/*
55static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock); 55 * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
56 * (so builtin drivers) we register the ACPI GpioInt event handlers from a
57 * late_initcall_sync handler, so that other builtin drivers can register their
58 * OpRegions before the event handlers can run. This list contains gpiochips
59 * for which the acpi_gpiochip_request_interrupts() has been deferred.
60 */
61static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
62static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
63static bool acpi_gpio_deferred_req_irqs_done;
56 64
57static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) 65static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
58{ 66{
@@ -89,21 +97,6 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
89 return gpiochip_get_desc(chip, pin); 97 return gpiochip_get_desc(chip, pin);
90} 98}
91 99
92static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
93{
94 mutex_lock(&acpi_gpio_initial_sync_list_lock);
95 list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
96 mutex_unlock(&acpi_gpio_initial_sync_list_lock);
97}
98
99static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
100{
101 mutex_lock(&acpi_gpio_initial_sync_list_lock);
102 if (!list_empty(&event->initial_sync_list))
103 list_del_init(&event->initial_sync_list);
104 mutex_unlock(&acpi_gpio_initial_sync_list_lock);
105}
106
107static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) 100static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
108{ 101{
109 struct acpi_gpio_event *event = data; 102 struct acpi_gpio_event *event = data;
@@ -186,7 +179,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
186 179
187 gpiod_direction_input(desc); 180 gpiod_direction_input(desc);
188 181
189 value = gpiod_get_value(desc); 182 value = gpiod_get_value_cansleep(desc);
190 183
191 ret = gpiochip_lock_as_irq(chip, pin); 184 ret = gpiochip_lock_as_irq(chip, pin);
192 if (ret) { 185 if (ret) {
@@ -229,7 +222,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
229 event->irq = irq; 222 event->irq = irq;
230 event->pin = pin; 223 event->pin = pin;
231 event->desc = desc; 224 event->desc = desc;
232 INIT_LIST_HEAD(&event->initial_sync_list);
233 225
234 ret = request_threaded_irq(event->irq, NULL, handler, irqflags, 226 ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
235 "ACPI:Event", event); 227 "ACPI:Event", event);
@@ -251,10 +243,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
251 * may refer to OperationRegions from other (builtin) drivers which 243 * may refer to OperationRegions from other (builtin) drivers which
252 * may be probed after us. 244 * may be probed after us.
253 */ 245 */
254 if (handler == acpi_gpio_irq_handler && 246 if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
255 (((irqflags & IRQF_TRIGGER_RISING) && value == 1) || 247 ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
256 ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))) 248 handler(event->irq, event);
257 acpi_gpio_add_to_initial_sync_list(event);
258 249
259 return AE_OK; 250 return AE_OK;
260 251
@@ -283,6 +274,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
283 struct acpi_gpio_chip *acpi_gpio; 274 struct acpi_gpio_chip *acpi_gpio;
284 acpi_handle handle; 275 acpi_handle handle;
285 acpi_status status; 276 acpi_status status;
277 bool defer;
286 278
287 if (!chip->parent || !chip->to_irq) 279 if (!chip->parent || !chip->to_irq)
288 return; 280 return;
@@ -295,6 +287,16 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
295 if (ACPI_FAILURE(status)) 287 if (ACPI_FAILURE(status))
296 return; 288 return;
297 289
290 mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
291 defer = !acpi_gpio_deferred_req_irqs_done;
292 if (defer)
293 list_add(&acpi_gpio->deferred_req_irqs_list_entry,
294 &acpi_gpio_deferred_req_irqs_list);
295 mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
296
297 if (defer)
298 return;
299
298 acpi_walk_resources(handle, "_AEI", 300 acpi_walk_resources(handle, "_AEI",
299 acpi_gpiochip_request_interrupt, acpi_gpio); 301 acpi_gpiochip_request_interrupt, acpi_gpio);
300} 302}
@@ -325,11 +327,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
325 if (ACPI_FAILURE(status)) 327 if (ACPI_FAILURE(status))
326 return; 328 return;
327 329
330 mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
331 if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry))
332 list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
333 mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
334
328 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { 335 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
329 struct gpio_desc *desc; 336 struct gpio_desc *desc;
330 337
331 acpi_gpio_del_from_initial_sync_list(event);
332
333 if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) 338 if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
334 disable_irq_wake(event->irq); 339 disable_irq_wake(event->irq);
335 340
@@ -1052,6 +1057,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
1052 1057
1053 acpi_gpio->chip = chip; 1058 acpi_gpio->chip = chip;
1054 INIT_LIST_HEAD(&acpi_gpio->events); 1059 INIT_LIST_HEAD(&acpi_gpio->events);
1060 INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry);
1055 1061
1056 status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio); 1062 status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio);
1057 if (ACPI_FAILURE(status)) { 1063 if (ACPI_FAILURE(status)) {
@@ -1198,20 +1204,28 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
1198 return con_id == NULL; 1204 return con_id == NULL;
1199} 1205}
1200 1206
1201/* Sync the initial state of handlers after all builtin drivers have probed */ 1207/* Run deferred acpi_gpiochip_request_interrupts() */
1202static int acpi_gpio_initial_sync(void) 1208static int acpi_gpio_handle_deferred_request_interrupts(void)
1203{ 1209{
1204 struct acpi_gpio_event *event, *ep; 1210 struct acpi_gpio_chip *acpi_gpio, *tmp;
1211
1212 mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
1213 list_for_each_entry_safe(acpi_gpio, tmp,
1214 &acpi_gpio_deferred_req_irqs_list,
1215 deferred_req_irqs_list_entry) {
1216 acpi_handle handle;
1205 1217
1206 mutex_lock(&acpi_gpio_initial_sync_list_lock); 1218 handle = ACPI_HANDLE(acpi_gpio->chip->parent);
1207 list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list, 1219 acpi_walk_resources(handle, "_AEI",
1208 initial_sync_list) { 1220 acpi_gpiochip_request_interrupt, acpi_gpio);
1209 acpi_evaluate_object(event->handle, NULL, NULL, NULL); 1221
1210 list_del_init(&event->initial_sync_list); 1222 list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
1211 } 1223 }
1212 mutex_unlock(&acpi_gpio_initial_sync_list_lock); 1224
1225 acpi_gpio_deferred_req_irqs_done = true;
1226 mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
1213 1227
1214 return 0; 1228 return 0;
1215} 1229}
1216/* We must use _sync so that this runs after the first deferred_probe run */ 1230/* We must use _sync so that this runs after the first deferred_probe run */
1217late_initcall_sync(acpi_gpio_initial_sync); 1231late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index a4f1157d6aa0..d4e7a09598fa 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -31,6 +31,7 @@ static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
31 struct of_phandle_args *gpiospec = data; 31 struct of_phandle_args *gpiospec = data;
32 32
33 return chip->gpiodev->dev.of_node == gpiospec->np && 33 return chip->gpiodev->dev.of_node == gpiospec->np &&
34 chip->of_xlate &&
34 chip->of_xlate(chip, gpiospec, NULL) >= 0; 35 chip->of_xlate(chip, gpiospec, NULL) >= 0;
35} 36}
36 37
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 502b94fb116a..b31d121a876b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -39,6 +39,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
39{ 39{
40 struct drm_gem_object *gobj; 40 struct drm_gem_object *gobj;
41 unsigned long size; 41 unsigned long size;
42 int r;
42 43
43 gobj = drm_gem_object_lookup(p->filp, data->handle); 44 gobj = drm_gem_object_lookup(p->filp, data->handle);
44 if (gobj == NULL) 45 if (gobj == NULL)
@@ -50,20 +51,26 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
50 p->uf_entry.tv.shared = true; 51 p->uf_entry.tv.shared = true;
51 p->uf_entry.user_pages = NULL; 52 p->uf_entry.user_pages = NULL;
52 53
53 size = amdgpu_bo_size(p->uf_entry.robj);
54 if (size != PAGE_SIZE || (data->offset + 8) > size)
55 return -EINVAL;
56
57 *offset = data->offset;
58
59 drm_gem_object_put_unlocked(gobj); 54 drm_gem_object_put_unlocked(gobj);
60 55
56 size = amdgpu_bo_size(p->uf_entry.robj);
57 if (size != PAGE_SIZE || (data->offset + 8) > size) {
58 r = -EINVAL;
59 goto error_unref;
60 }
61
61 if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { 62 if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
62 amdgpu_bo_unref(&p->uf_entry.robj); 63 r = -EINVAL;
63 return -EINVAL; 64 goto error_unref;
64 } 65 }
65 66
67 *offset = data->offset;
68
66 return 0; 69 return 0;
70
71error_unref:
72 amdgpu_bo_unref(&p->uf_entry.robj);
73 return r;
67} 74}
68 75
69static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p, 76static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
@@ -1012,13 +1019,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
1012 if (r) 1019 if (r)
1013 return r; 1020 return r;
1014 1021
1015 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) { 1022 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
1016 parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; 1023 parser->job->preamble_status |=
1017 if (!parser->ctx->preamble_presented) { 1024 AMDGPU_PREAMBLE_IB_PRESENT;
1018 parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1019 parser->ctx->preamble_presented = true;
1020 }
1021 }
1022 1025
1023 if (parser->ring && parser->ring != ring) 1026 if (parser->ring && parser->ring != ring)
1024 return -EINVAL; 1027 return -EINVAL;
@@ -1207,26 +1210,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1207 1210
1208 int r; 1211 int r;
1209 1212
1213 job = p->job;
1214 p->job = NULL;
1215
1216 r = drm_sched_job_init(&job->base, entity, p->filp);
1217 if (r)
1218 goto error_unlock;
1219
1220 /* No memory allocation is allowed while holding the mn lock */
1210 amdgpu_mn_lock(p->mn); 1221 amdgpu_mn_lock(p->mn);
1211 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 1222 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1212 struct amdgpu_bo *bo = e->robj; 1223 struct amdgpu_bo *bo = e->robj;
1213 1224
1214 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { 1225 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
1215 amdgpu_mn_unlock(p->mn); 1226 r = -ERESTARTSYS;
1216 return -ERESTARTSYS; 1227 goto error_abort;
1217 } 1228 }
1218 } 1229 }
1219 1230
1220 job = p->job;
1221 p->job = NULL;
1222
1223 r = drm_sched_job_init(&job->base, entity, p->filp);
1224 if (r) {
1225 amdgpu_job_free(job);
1226 amdgpu_mn_unlock(p->mn);
1227 return r;
1228 }
1229
1230 job->owner = p->filp; 1231 job->owner = p->filp;
1231 p->fence = dma_fence_get(&job->base.s_fence->finished); 1232 p->fence = dma_fence_get(&job->base.s_fence->finished);
1232 1233
@@ -1241,6 +1242,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1241 1242
1242 amdgpu_cs_post_dependencies(p); 1243 amdgpu_cs_post_dependencies(p);
1243 1244
1245 if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1246 !p->ctx->preamble_presented) {
1247 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1248 p->ctx->preamble_presented = true;
1249 }
1250
1244 cs->out.handle = seq; 1251 cs->out.handle = seq;
1245 job->uf_sequence = seq; 1252 job->uf_sequence = seq;
1246 1253
@@ -1258,6 +1265,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1258 amdgpu_mn_unlock(p->mn); 1265 amdgpu_mn_unlock(p->mn);
1259 1266
1260 return 0; 1267 return 0;
1268
1269error_abort:
1270 dma_fence_put(&job->base.s_fence->finished);
1271 job->base.s_fence = NULL;
1272 amdgpu_mn_unlock(p->mn);
1273
1274error_unlock:
1275 amdgpu_job_free(job);
1276 return r;
1261} 1277}
1262 1278
1263int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 1279int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8ab5ccbc14ac..39bf2ce548c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2063,6 +2063,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2063 static enum amd_ip_block_type ip_order[] = { 2063 static enum amd_ip_block_type ip_order[] = {
2064 AMD_IP_BLOCK_TYPE_GMC, 2064 AMD_IP_BLOCK_TYPE_GMC,
2065 AMD_IP_BLOCK_TYPE_COMMON, 2065 AMD_IP_BLOCK_TYPE_COMMON,
2066 AMD_IP_BLOCK_TYPE_PSP,
2066 AMD_IP_BLOCK_TYPE_IH, 2067 AMD_IP_BLOCK_TYPE_IH,
2067 }; 2068 };
2068 2069
@@ -2093,7 +2094,6 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2093 2094
2094 static enum amd_ip_block_type ip_order[] = { 2095 static enum amd_ip_block_type ip_order[] = {
2095 AMD_IP_BLOCK_TYPE_SMC, 2096 AMD_IP_BLOCK_TYPE_SMC,
2096 AMD_IP_BLOCK_TYPE_PSP,
2097 AMD_IP_BLOCK_TYPE_DCE, 2097 AMD_IP_BLOCK_TYPE_DCE,
2098 AMD_IP_BLOCK_TYPE_GFX, 2098 AMD_IP_BLOCK_TYPE_GFX,
2099 AMD_IP_BLOCK_TYPE_SDMA, 2099 AMD_IP_BLOCK_TYPE_SDMA,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 5518e623fed2..51b5e977ca88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
164 return r; 164 return r;
165 } 165 }
166 166
167 need_ctx_switch = ring->current_ctx != fence_ctx;
167 if (ring->funcs->emit_pipeline_sync && job && 168 if (ring->funcs->emit_pipeline_sync && job &&
168 ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || 169 ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
170 (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
169 amdgpu_vm_need_pipeline_sync(ring, job))) { 171 amdgpu_vm_need_pipeline_sync(ring, job))) {
170 need_pipe_sync = true; 172 need_pipe_sync = true;
171 dma_fence_put(tmp); 173 dma_fence_put(tmp);
@@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
196 } 198 }
197 199
198 skip_preamble = ring->current_ctx == fence_ctx; 200 skip_preamble = ring->current_ctx == fence_ctx;
199 need_ctx_switch = ring->current_ctx != fence_ctx;
200 if (job && ring->funcs->emit_cntxcntl) { 201 if (job && ring->funcs->emit_cntxcntl) {
201 if (need_ctx_switch) 202 if (need_ctx_switch)
202 status |= AMDGPU_HAVE_CTX_SWITCH; 203 status |= AMDGPU_HAVE_CTX_SWITCH;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 8f98629fbe59..7b4e657a95c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1932,14 +1932,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1932 amdgpu_fence_wait_empty(ring); 1932 amdgpu_fence_wait_empty(ring);
1933 } 1933 }
1934 1934
1935 mutex_lock(&adev->pm.mutex);
1936 /* update battery/ac status */
1937 if (power_supply_is_system_supplied() > 0)
1938 adev->pm.ac_power = true;
1939 else
1940 adev->pm.ac_power = false;
1941 mutex_unlock(&adev->pm.mutex);
1942
1943 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1935 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1944 if (!amdgpu_device_has_dc_support(adev)) { 1936 if (!amdgpu_device_has_dc_support(adev)) {
1945 mutex_lock(&adev->pm.mutex); 1937 mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ece0ac703e27..b17771dd5ce7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
172 * is validated on next vm use to avoid fault. 172 * is validated on next vm use to avoid fault.
173 * */ 173 * */
174 list_move_tail(&base->vm_status, &vm->evicted); 174 list_move_tail(&base->vm_status, &vm->evicted);
175 base->moved = true;
175} 176}
176 177
177/** 178/**
@@ -369,7 +370,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
369 uint64_t addr; 370 uint64_t addr;
370 int r; 371 int r;
371 372
372 addr = amdgpu_bo_gpu_offset(bo);
373 entries = amdgpu_bo_size(bo) / 8; 373 entries = amdgpu_bo_size(bo) / 8;
374 374
375 if (pte_support_ats) { 375 if (pte_support_ats) {
@@ -401,6 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
401 if (r) 401 if (r)
402 goto error; 402 goto error;
403 403
404 addr = amdgpu_bo_gpu_offset(bo);
404 if (ats_entries) { 405 if (ats_entries) {
405 uint64_t ats_value; 406 uint64_t ats_value;
406 407
@@ -2483,28 +2484,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2483 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size 2484 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2484 * 2485 *
2485 * @adev: amdgpu_device pointer 2486 * @adev: amdgpu_device pointer
2486 * @vm_size: the default vm size if it's set auto 2487 * @min_vm_size: the minimum vm size in GB if it's set auto
2487 * @fragment_size_default: Default PTE fragment size 2488 * @fragment_size_default: Default PTE fragment size
2488 * @max_level: max VMPT level 2489 * @max_level: max VMPT level
2489 * @max_bits: max address space size in bits 2490 * @max_bits: max address space size in bits
2490 * 2491 *
2491 */ 2492 */
2492void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, 2493void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2493 uint32_t fragment_size_default, unsigned max_level, 2494 uint32_t fragment_size_default, unsigned max_level,
2494 unsigned max_bits) 2495 unsigned max_bits)
2495{ 2496{
2497 unsigned int max_size = 1 << (max_bits - 30);
2498 unsigned int vm_size;
2496 uint64_t tmp; 2499 uint64_t tmp;
2497 2500
2498 /* adjust vm size first */ 2501 /* adjust vm size first */
2499 if (amdgpu_vm_size != -1) { 2502 if (amdgpu_vm_size != -1) {
2500 unsigned max_size = 1 << (max_bits - 30);
2501
2502 vm_size = amdgpu_vm_size; 2503 vm_size = amdgpu_vm_size;
2503 if (vm_size > max_size) { 2504 if (vm_size > max_size) {
2504 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", 2505 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2505 amdgpu_vm_size, max_size); 2506 amdgpu_vm_size, max_size);
2506 vm_size = max_size; 2507 vm_size = max_size;
2507 } 2508 }
2509 } else {
2510 struct sysinfo si;
2511 unsigned int phys_ram_gb;
2512
2513 /* Optimal VM size depends on the amount of physical
2514 * RAM available. Underlying requirements and
2515 * assumptions:
2516 *
2517 * - Need to map system memory and VRAM from all GPUs
2518 * - VRAM from other GPUs not known here
2519 * - Assume VRAM <= system memory
2520 * - On GFX8 and older, VM space can be segmented for
2521 * different MTYPEs
2522 * - Need to allow room for fragmentation, guard pages etc.
2523 *
2524 * This adds up to a rough guess of system memory x3.
2525 * Round up to power of two to maximize the available
2526 * VM size with the given page table size.
2527 */
2528 si_meminfo(&si);
2529 phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2530 (1 << 30) - 1) >> 30;
2531 vm_size = roundup_pow_of_two(
2532 min(max(phys_ram_gb * 3, min_vm_size), max_size));
2508 } 2533 }
2509 2534
2510 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; 2535 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 67a15d439ac0..9fa9df0c5e7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -321,7 +321,7 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
321void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); 321void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
322void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 322void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
323 struct amdgpu_bo_va *bo_va); 323 struct amdgpu_bo_va *bo_va);
324void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, 324void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
325 uint32_t fragment_size_default, unsigned max_level, 325 uint32_t fragment_size_default, unsigned max_level,
326 unsigned max_bits); 326 unsigned max_bits);
327int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 327int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 5cd45210113f..5a9534a82d40 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -5664,6 +5664,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
5664 if (amdgpu_sriov_vf(adev)) 5664 if (amdgpu_sriov_vf(adev))
5665 return 0; 5665 return 0;
5666 5666
5667 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
5668 AMD_PG_SUPPORT_RLC_SMU_HS |
5669 AMD_PG_SUPPORT_CP |
5670 AMD_PG_SUPPORT_GFX_DMG))
5671 adev->gfx.rlc.funcs->enter_safe_mode(adev);
5667 switch (adev->asic_type) { 5672 switch (adev->asic_type) {
5668 case CHIP_CARRIZO: 5673 case CHIP_CARRIZO:
5669 case CHIP_STONEY: 5674 case CHIP_STONEY:
@@ -5713,7 +5718,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
5713 default: 5718 default:
5714 break; 5719 break;
5715 } 5720 }
5716 5721 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
5722 AMD_PG_SUPPORT_RLC_SMU_HS |
5723 AMD_PG_SUPPORT_CP |
5724 AMD_PG_SUPPORT_GFX_DMG))
5725 adev->gfx.rlc.funcs->exit_safe_mode(adev);
5717 return 0; 5726 return 0;
5718} 5727}
5719 5728
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 75317f283c69..ad151fefa41f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -632,12 +632,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
632 amdgpu_gart_table_vram_unpin(adev); 632 amdgpu_gart_table_vram_unpin(adev);
633} 633}
634 634
635static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
636{
637 amdgpu_gart_table_vram_free(adev);
638 amdgpu_gart_fini(adev);
639}
640
641static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, 635static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
642 u32 status, u32 addr, u32 mc_client) 636 u32 status, u32 addr, u32 mc_client)
643{ 637{
@@ -935,8 +929,9 @@ static int gmc_v6_0_sw_fini(void *handle)
935 929
936 amdgpu_gem_force_release(adev); 930 amdgpu_gem_force_release(adev);
937 amdgpu_vm_manager_fini(adev); 931 amdgpu_vm_manager_fini(adev);
938 gmc_v6_0_gart_fini(adev); 932 amdgpu_gart_table_vram_free(adev);
939 amdgpu_bo_fini(adev); 933 amdgpu_bo_fini(adev);
934 amdgpu_gart_fini(adev);
940 release_firmware(adev->gmc.fw); 935 release_firmware(adev->gmc.fw);
941 adev->gmc.fw = NULL; 936 adev->gmc.fw = NULL;
942 937
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 36dc367c4b45..f8d8a3a73e42 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -747,19 +747,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
747} 747}
748 748
749/** 749/**
750 * gmc_v7_0_gart_fini - vm fini callback
751 *
752 * @adev: amdgpu_device pointer
753 *
754 * Tears down the driver GART/VM setup (CIK).
755 */
756static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
757{
758 amdgpu_gart_table_vram_free(adev);
759 amdgpu_gart_fini(adev);
760}
761
762/**
763 * gmc_v7_0_vm_decode_fault - print human readable fault info 750 * gmc_v7_0_vm_decode_fault - print human readable fault info
764 * 751 *
765 * @adev: amdgpu_device pointer 752 * @adev: amdgpu_device pointer
@@ -1095,8 +1082,9 @@ static int gmc_v7_0_sw_fini(void *handle)
1095 amdgpu_gem_force_release(adev); 1082 amdgpu_gem_force_release(adev);
1096 amdgpu_vm_manager_fini(adev); 1083 amdgpu_vm_manager_fini(adev);
1097 kfree(adev->gmc.vm_fault_info); 1084 kfree(adev->gmc.vm_fault_info);
1098 gmc_v7_0_gart_fini(adev); 1085 amdgpu_gart_table_vram_free(adev);
1099 amdgpu_bo_fini(adev); 1086 amdgpu_bo_fini(adev);
1087 amdgpu_gart_fini(adev);
1100 release_firmware(adev->gmc.fw); 1088 release_firmware(adev->gmc.fw);
1101 adev->gmc.fw = NULL; 1089 adev->gmc.fw = NULL;
1102 1090
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 70fc97b59b4f..9333109b210d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -969,19 +969,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
969} 969}
970 970
971/** 971/**
972 * gmc_v8_0_gart_fini - vm fini callback
973 *
974 * @adev: amdgpu_device pointer
975 *
976 * Tears down the driver GART/VM setup (CIK).
977 */
978static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
979{
980 amdgpu_gart_table_vram_free(adev);
981 amdgpu_gart_fini(adev);
982}
983
984/**
985 * gmc_v8_0_vm_decode_fault - print human readable fault info 972 * gmc_v8_0_vm_decode_fault - print human readable fault info
986 * 973 *
987 * @adev: amdgpu_device pointer 974 * @adev: amdgpu_device pointer
@@ -1199,8 +1186,9 @@ static int gmc_v8_0_sw_fini(void *handle)
1199 amdgpu_gem_force_release(adev); 1186 amdgpu_gem_force_release(adev);
1200 amdgpu_vm_manager_fini(adev); 1187 amdgpu_vm_manager_fini(adev);
1201 kfree(adev->gmc.vm_fault_info); 1188 kfree(adev->gmc.vm_fault_info);
1202 gmc_v8_0_gart_fini(adev); 1189 amdgpu_gart_table_vram_free(adev);
1203 amdgpu_bo_fini(adev); 1190 amdgpu_bo_fini(adev);
1191 amdgpu_gart_fini(adev);
1204 release_firmware(adev->gmc.fw); 1192 release_firmware(adev->gmc.fw);
1205 adev->gmc.fw = NULL; 1193 adev->gmc.fw = NULL;
1206 1194
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 399a5db27649..72f8018fa2a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -942,26 +942,12 @@ static int gmc_v9_0_sw_init(void *handle)
942 return 0; 942 return 0;
943} 943}
944 944
945/**
946 * gmc_v9_0_gart_fini - vm fini callback
947 *
948 * @adev: amdgpu_device pointer
949 *
950 * Tears down the driver GART/VM setup (CIK).
951 */
952static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
953{
954 amdgpu_gart_table_vram_free(adev);
955 amdgpu_gart_fini(adev);
956}
957
958static int gmc_v9_0_sw_fini(void *handle) 945static int gmc_v9_0_sw_fini(void *handle)
959{ 946{
960 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 947 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
961 948
962 amdgpu_gem_force_release(adev); 949 amdgpu_gem_force_release(adev);
963 amdgpu_vm_manager_fini(adev); 950 amdgpu_vm_manager_fini(adev);
964 gmc_v9_0_gart_fini(adev);
965 951
966 /* 952 /*
967 * TODO: 953 * TODO:
@@ -974,7 +960,9 @@ static int gmc_v9_0_sw_fini(void *handle)
974 */ 960 */
975 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); 961 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
976 962
963 amdgpu_gart_table_vram_free(adev);
977 amdgpu_bo_fini(adev); 964 amdgpu_bo_fini(adev);
965 amdgpu_gart_fini(adev);
978 966
979 return 0; 967 return 0;
980} 968}
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 3f57f6463dc8..cb79a93c2eb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -65,8 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
65 int min_temp, int max_temp); 65 int min_temp, int max_temp);
66static int kv_init_fps_limits(struct amdgpu_device *adev); 66static int kv_init_fps_limits(struct amdgpu_device *adev);
67 67
68static void kv_dpm_powergate_uvd(void *handle, bool gate);
69static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
70static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); 68static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
71static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); 69static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
72 70
@@ -1354,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
1354 return ret; 1352 return ret;
1355 } 1353 }
1356 1354
1357 kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
1358
1359 if (adev->irq.installed && 1355 if (adev->irq.installed &&
1360 amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { 1356 amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
1361 ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); 1357 ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
@@ -1374,6 +1370,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
1374 1370
1375static void kv_dpm_disable(struct amdgpu_device *adev) 1371static void kv_dpm_disable(struct amdgpu_device *adev)
1376{ 1372{
1373 struct kv_power_info *pi = kv_get_pi(adev);
1374
1377 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1375 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
1378 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); 1376 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
1379 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1377 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
@@ -1387,8 +1385,10 @@ static void kv_dpm_disable(struct amdgpu_device *adev)
1387 /* powerup blocks */ 1385 /* powerup blocks */
1388 kv_dpm_powergate_acp(adev, false); 1386 kv_dpm_powergate_acp(adev, false);
1389 kv_dpm_powergate_samu(adev, false); 1387 kv_dpm_powergate_samu(adev, false);
1390 kv_dpm_powergate_vce(adev, false); 1388 if (pi->caps_vce_pg) /* power on the VCE block */
1391 kv_dpm_powergate_uvd(adev, false); 1389 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
1390 if (pi->caps_uvd_pg) /* power on the UVD block */
1391 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
1392 1392
1393 kv_enable_smc_cac(adev, false); 1393 kv_enable_smc_cac(adev, false);
1394 kv_enable_didt(adev, false); 1394 kv_enable_didt(adev, false);
@@ -1551,7 +1551,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
1551 int ret; 1551 int ret;
1552 1552
1553 if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { 1553 if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
1554 kv_dpm_powergate_vce(adev, false);
1555 if (pi->caps_stable_p_state) 1554 if (pi->caps_stable_p_state)
1556 pi->vce_boot_level = table->count - 1; 1555 pi->vce_boot_level = table->count - 1;
1557 else 1556 else
@@ -1573,7 +1572,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
1573 kv_enable_vce_dpm(adev, true); 1572 kv_enable_vce_dpm(adev, true);
1574 } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { 1573 } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
1575 kv_enable_vce_dpm(adev, false); 1574 kv_enable_vce_dpm(adev, false);
1576 kv_dpm_powergate_vce(adev, true);
1577 } 1575 }
1578 1576
1579 return 0; 1577 return 0;
@@ -1702,24 +1700,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate)
1702 } 1700 }
1703} 1701}
1704 1702
1705static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) 1703static void kv_dpm_powergate_vce(void *handle, bool gate)
1706{ 1704{
1705 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1707 struct kv_power_info *pi = kv_get_pi(adev); 1706 struct kv_power_info *pi = kv_get_pi(adev);
1708 1707 int ret;
1709 if (pi->vce_power_gated == gate)
1710 return;
1711 1708
1712 pi->vce_power_gated = gate; 1709 pi->vce_power_gated = gate;
1713 1710
1714 if (!pi->caps_vce_pg) 1711 if (gate) {
1715 return; 1712 /* stop the VCE block */
1716 1713 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
1717 if (gate) 1714 AMD_PG_STATE_GATE);
1718 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); 1715 kv_enable_vce_dpm(adev, false);
1719 else 1716 if (pi->caps_vce_pg) /* power off the VCE block */
1720 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); 1717 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
1718 } else {
1719 if (pi->caps_vce_pg) /* power on the VCE block */
1720 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
1721 kv_enable_vce_dpm(adev, true);
1722 /* re-init the VCE block */
1723 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
1724 AMD_PG_STATE_UNGATE);
1725 }
1721} 1726}
1722 1727
1728
1723static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) 1729static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
1724{ 1730{
1725 struct kv_power_info *pi = kv_get_pi(adev); 1731 struct kv_power_info *pi = kv_get_pi(adev);
@@ -3061,7 +3067,7 @@ static int kv_dpm_hw_init(void *handle)
3061 else 3067 else
3062 adev->pm.dpm_enabled = true; 3068 adev->pm.dpm_enabled = true;
3063 mutex_unlock(&adev->pm.mutex); 3069 mutex_unlock(&adev->pm.mutex);
3064 3070 amdgpu_pm_compute_clocks(adev);
3065 return ret; 3071 return ret;
3066} 3072}
3067 3073
@@ -3313,6 +3319,9 @@ static int kv_set_powergating_by_smu(void *handle,
3313 case AMD_IP_BLOCK_TYPE_UVD: 3319 case AMD_IP_BLOCK_TYPE_UVD:
3314 kv_dpm_powergate_uvd(handle, gate); 3320 kv_dpm_powergate_uvd(handle, gate);
3315 break; 3321 break;
3322 case AMD_IP_BLOCK_TYPE_VCE:
3323 kv_dpm_powergate_vce(handle, gate);
3324 break;
3316 default: 3325 default:
3317 break; 3326 break;
3318 } 3327 }
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index e7ca4623cfb9..7c3b634d8d5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -70,6 +70,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
70 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100), 70 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
71 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 71 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
72 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), 72 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
73 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
73 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07), 74 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
74 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), 75 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
75 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100), 76 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
@@ -81,7 +82,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
81 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 82 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
82 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100), 83 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
83 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 84 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
84 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0) 85 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
86 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xfc000000, 0x00000000)
85}; 87};
86 88
87static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { 89static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
@@ -109,7 +111,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
109 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 111 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
110 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100), 112 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
111 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 113 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
112 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0) 114 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
115 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
113}; 116};
114 117
115static const struct soc15_reg_golden golden_settings_sdma_4_2[] = 118static const struct soc15_reg_golden golden_settings_sdma_4_2[] =
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index db327b412562..1de96995e690 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev)
6887 6887
6888 si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); 6888 si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
6889 si_thermal_start_thermal_controller(adev); 6889 si_thermal_start_thermal_controller(adev);
6890 ni_update_current_ps(adev, boot_ps);
6891 6890
6892 return 0; 6891 return 0;
6893} 6892}
@@ -7763,7 +7762,7 @@ static int si_dpm_hw_init(void *handle)
7763 else 7762 else
7764 adev->pm.dpm_enabled = true; 7763 adev->pm.dpm_enabled = true;
7765 mutex_unlock(&adev->pm.mutex); 7764 mutex_unlock(&adev->pm.mutex);
7766 7765 amdgpu_pm_compute_clocks(adev);
7767 return ret; 7766 return ret;
7768} 7767}
7769 7768
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index fbe878ae1e8c..4ba0003a9d32 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp,
480{ 480{
481 struct dc_context *ctx = pp->ctx; 481 struct dc_context *ctx = pp->ctx;
482 struct amdgpu_device *adev = ctx->driver_context; 482 struct amdgpu_device *adev = ctx->driver_context;
483 void *pp_handle = adev->powerplay.pp_handle;
483 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 484 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
485 struct pp_display_clock_request clock = {0};
484 486
485 if (!pp_funcs || !pp_funcs->display_configuration_changed) 487 if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
486 return; 488 return;
487 489
488 amdgpu_dpm_display_configuration_changed(adev); 490 clock.clock_type = amd_pp_dcf_clock;
491 clock.clock_freq_in_khz = req->hard_min_dcefclk_khz;
492 pp_funcs->display_clock_voltage_request(pp_handle, &clock);
493
494 clock.clock_type = amd_pp_f_clock;
495 clock.clock_freq_in_khz = req->hard_min_fclk_khz;
496 pp_funcs->display_clock_voltage_request(pp_handle, &clock);
489} 497}
490 498
491void pp_rv_set_wm_ranges(struct pp_smu *pp, 499void pp_rv_set_wm_ranges(struct pp_smu *pp,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 567867915d32..37eaf72ace54 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -754,8 +754,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
754 * fail-safe mode 754 * fail-safe mode
755 */ 755 */
756 if (dc_is_hdmi_signal(link->connector_signal) || 756 if (dc_is_hdmi_signal(link->connector_signal) ||
757 dc_is_dvi_signal(link->connector_signal)) 757 dc_is_dvi_signal(link->connector_signal)) {
758 if (prev_sink != NULL)
759 dc_sink_release(prev_sink);
760
758 return false; 761 return false;
762 }
759 default: 763 default:
760 break; 764 break;
761 } 765 }
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 6e3f56684f4e..51ed99a37803 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -170,20 +170,22 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
170 unsigned int tiling_mode = 0; 170 unsigned int tiling_mode = 0;
171 unsigned int stride = 0; 171 unsigned int stride = 0;
172 172
173 switch (info->drm_format_mod << 10) { 173 switch (info->drm_format_mod) {
174 case PLANE_CTL_TILED_LINEAR: 174 case DRM_FORMAT_MOD_LINEAR:
175 tiling_mode = I915_TILING_NONE; 175 tiling_mode = I915_TILING_NONE;
176 break; 176 break;
177 case PLANE_CTL_TILED_X: 177 case I915_FORMAT_MOD_X_TILED:
178 tiling_mode = I915_TILING_X; 178 tiling_mode = I915_TILING_X;
179 stride = info->stride; 179 stride = info->stride;
180 break; 180 break;
181 case PLANE_CTL_TILED_Y: 181 case I915_FORMAT_MOD_Y_TILED:
182 case I915_FORMAT_MOD_Yf_TILED:
182 tiling_mode = I915_TILING_Y; 183 tiling_mode = I915_TILING_Y;
183 stride = info->stride; 184 stride = info->stride;
184 break; 185 break;
185 default: 186 default:
186 gvt_dbg_core("not supported tiling mode\n"); 187 gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
188 info->drm_format_mod);
187 } 189 }
188 obj->tiling_and_stride = tiling_mode | stride; 190 obj->tiling_and_stride = tiling_mode | stride;
189 } else { 191 } else {
@@ -222,9 +224,26 @@ static int vgpu_get_plane_info(struct drm_device *dev,
222 info->height = p.height; 224 info->height = p.height;
223 info->stride = p.stride; 225 info->stride = p.stride;
224 info->drm_format = p.drm_format; 226 info->drm_format = p.drm_format;
225 info->drm_format_mod = p.tiled; 227
228 switch (p.tiled) {
229 case PLANE_CTL_TILED_LINEAR:
230 info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
231 break;
232 case PLANE_CTL_TILED_X:
233 info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
234 break;
235 case PLANE_CTL_TILED_Y:
236 info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
237 break;
238 case PLANE_CTL_TILED_YF:
239 info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
240 break;
241 default:
242 gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
243 }
244
226 info->size = (((p.stride * p.height * p.bpp) / 8) + 245 info->size = (((p.stride * p.height * p.bpp) / 8) +
227 (PAGE_SIZE - 1)) >> PAGE_SHIFT; 246 (PAGE_SIZE - 1)) >> PAGE_SHIFT;
228 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { 247 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
229 ret = intel_vgpu_decode_cursor_plane(vgpu, &c); 248 ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
230 if (ret) 249 if (ret)
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index face664be3e8..481896fb712a 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -220,8 +220,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
220 if (IS_SKYLAKE(dev_priv) 220 if (IS_SKYLAKE(dev_priv)
221 || IS_KABYLAKE(dev_priv) 221 || IS_KABYLAKE(dev_priv)
222 || IS_BROXTON(dev_priv)) { 222 || IS_BROXTON(dev_priv)) {
223 plane->tiled = (val & PLANE_CTL_TILED_MASK) >> 223 plane->tiled = val & PLANE_CTL_TILED_MASK;
224 _PLANE_CTL_TILED_SHIFT;
225 fmt = skl_format_to_drm( 224 fmt = skl_format_to_drm(
226 val & PLANE_CTL_FORMAT_MASK, 225 val & PLANE_CTL_FORMAT_MASK,
227 val & PLANE_CTL_ORDER_RGBX, 226 val & PLANE_CTL_ORDER_RGBX,
@@ -260,7 +259,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
260 return -EINVAL; 259 return -EINVAL;
261 } 260 }
262 261
263 plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10), 262 plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
264 (IS_SKYLAKE(dev_priv) 263 (IS_SKYLAKE(dev_priv)
265 || IS_KABYLAKE(dev_priv) 264 || IS_KABYLAKE(dev_priv)
266 || IS_BROXTON(dev_priv)) ? 265 || IS_BROXTON(dev_priv)) ?
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
index cb055f3c81a2..60c155085029 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.h
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -101,7 +101,7 @@ struct intel_gvt;
101/* color space conversion and gamma correction are not included */ 101/* color space conversion and gamma correction are not included */
102struct intel_vgpu_primary_plane_format { 102struct intel_vgpu_primary_plane_format {
103 u8 enabled; /* plane is enabled */ 103 u8 enabled; /* plane is enabled */
104 u8 tiled; /* X-tiled */ 104 u32 tiled; /* tiling mode: linear, X-tiled, Y tiled, etc */
105 u8 bpp; /* bits per pixel */ 105 u8 bpp; /* bits per pixel */
106 u32 hw_format; /* format field in the PRI_CTL register */ 106 u32 hw_format; /* format field in the PRI_CTL register */
107 u32 drm_format; /* format in DRM definition */ 107 u32 drm_format; /* format in DRM definition */
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 7a58ca555197..72afa518edd9 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1296,6 +1296,19 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
1296 return 0; 1296 return 0;
1297} 1297}
1298 1298
1299static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
1300 unsigned int offset, void *p_data, unsigned int bytes)
1301{
1302 write_vreg(vgpu, offset, p_data, bytes);
1303
1304 if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
1305 vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
1306 else
1307 vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
1308
1309 return 0;
1310}
1311
1299static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu, 1312static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
1300 unsigned int offset, void *p_data, unsigned int bytes) 1313 unsigned int offset, void *p_data, unsigned int bytes)
1301{ 1314{
@@ -1525,9 +1538,15 @@ static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
1525 u32 v = *(u32 *)p_data; 1538 u32 v = *(u32 *)p_data;
1526 u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0; 1539 u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
1527 1540
1528 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; 1541 switch (offset) {
1529 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; 1542 case _PHY_CTL_FAMILY_EDP:
1530 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; 1543 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
1544 break;
1545 case _PHY_CTL_FAMILY_DDI:
1546 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
1547 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
1548 break;
1549 }
1531 1550
1532 vgpu_vreg(vgpu, offset) = v; 1551 vgpu_vreg(vgpu, offset) = v;
1533 1552
@@ -2812,6 +2831,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2812 MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL, 2831 MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
2813 skl_power_well_ctl_write); 2832 skl_power_well_ctl_write);
2814 2833
2834 MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
2835
2815 MMIO_D(_MMIO(0xa210), D_SKL_PLUS); 2836 MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
2816 MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); 2837 MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
2817 MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); 2838 MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -2987,8 +3008,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2987 NULL, gen9_trtte_write); 3008 NULL, gen9_trtte_write);
2988 MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write); 3009 MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
2989 3010
2990 MMIO_D(_MMIO(0x45008), D_SKL_PLUS);
2991
2992 MMIO_D(_MMIO(0x46430), D_SKL_PLUS); 3011 MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
2993 3012
2994 MMIO_D(_MMIO(0x46520), D_SKL_PLUS); 3013 MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
@@ -3025,7 +3044,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
3025 MMIO_D(_MMIO(0x44500), D_SKL_PLUS); 3044 MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
3026 MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); 3045 MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3027 MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, 3046 MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3028 NULL, NULL); 3047 NULL, NULL);
3048 MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3049 NULL, NULL);
3029 3050
3030 MMIO_D(_MMIO(0x4ab8), D_KBL); 3051 MMIO_D(_MMIO(0x4ab8), D_KBL);
3031 MMIO_D(_MMIO(0x2248), D_KBL | D_SKL); 3052 MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index a45f46d8537f..c7afee37b2b8 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -32,6 +32,7 @@
32#include <linux/device.h> 32#include <linux/device.h>
33#include <linux/mm.h> 33#include <linux/mm.h>
34#include <linux/mmu_context.h> 34#include <linux/mmu_context.h>
35#include <linux/sched/mm.h>
35#include <linux/types.h> 36#include <linux/types.h>
36#include <linux/list.h> 37#include <linux/list.h>
37#include <linux/rbtree.h> 38#include <linux/rbtree.h>
@@ -1792,16 +1793,21 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1792 info = (struct kvmgt_guest_info *)handle; 1793 info = (struct kvmgt_guest_info *)handle;
1793 kvm = info->kvm; 1794 kvm = info->kvm;
1794 1795
1795 if (kthread) 1796 if (kthread) {
1797 if (!mmget_not_zero(kvm->mm))
1798 return -EFAULT;
1796 use_mm(kvm->mm); 1799 use_mm(kvm->mm);
1800 }
1797 1801
1798 idx = srcu_read_lock(&kvm->srcu); 1802 idx = srcu_read_lock(&kvm->srcu);
1799 ret = write ? kvm_write_guest(kvm, gpa, buf, len) : 1803 ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1800 kvm_read_guest(kvm, gpa, buf, len); 1804 kvm_read_guest(kvm, gpa, buf, len);
1801 srcu_read_unlock(&kvm->srcu, idx); 1805 srcu_read_unlock(&kvm->srcu, idx);
1802 1806
1803 if (kthread) 1807 if (kthread) {
1804 unuse_mm(kvm->mm); 1808 unuse_mm(kvm->mm);
1809 mmput(kvm->mm);
1810 }
1805 1811
1806 return ret; 1812 return ret;
1807} 1813}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 42e1e6bdcc2c..e872f4847fbe 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -562,11 +562,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
562 * performace for batch mmio read/write, so we need 562 * performace for batch mmio read/write, so we need
563 * handle forcewake mannually. 563 * handle forcewake mannually.
564 */ 564 */
565 intel_runtime_pm_get(dev_priv);
566 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 565 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
567 switch_mmio(pre, next, ring_id); 566 switch_mmio(pre, next, ring_id);
568 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 567 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
569 intel_runtime_pm_put(dev_priv);
570} 568}
571 569
572/** 570/**
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index fa75a2eead90..b0d3a43ccd03 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -42,8 +42,6 @@
42#define DEVICE_TYPE_EFP3 0x20 42#define DEVICE_TYPE_EFP3 0x20
43#define DEVICE_TYPE_EFP4 0x10 43#define DEVICE_TYPE_EFP4 0x10
44 44
45#define DEV_SIZE 38
46
47struct opregion_header { 45struct opregion_header {
48 u8 signature[16]; 46 u8 signature[16];
49 u32 size; 47 u32 size;
@@ -63,6 +61,10 @@ struct bdb_data_header {
63 u16 size; /* data size */ 61 u16 size; /* data size */
64} __packed; 62} __packed;
65 63
64/* For supporting windows guest with opregion, here hardcode the emulated
65 * bdb header version as '186', and the corresponding child_device_config
66 * length should be '33' but not '38'.
67 */
66struct efp_child_device_config { 68struct efp_child_device_config {
67 u16 handle; 69 u16 handle;
68 u16 device_type; 70 u16 device_type;
@@ -109,12 +111,6 @@ struct efp_child_device_config {
109 u8 mipi_bridge_type; /* 171 */ 111 u8 mipi_bridge_type; /* 171 */
110 u16 device_class_ext; 112 u16 device_class_ext;
111 u8 dvo_function; 113 u8 dvo_function;
112 u8 dp_usb_type_c:1; /* 195 */
113 u8 skip6:7;
114 u8 dp_usb_type_c_2x_gpio_index; /* 195 */
115 u16 dp_usb_type_c_2x_gpio_pin; /* 195 */
116 u8 iboost_dp:4; /* 196 */
117 u8 iboost_hdmi:4; /* 196 */
118} __packed; 114} __packed;
119 115
120struct vbt { 116struct vbt {
@@ -155,7 +151,7 @@ static void virt_vbt_generation(struct vbt *v)
155 v->header.bdb_offset = offsetof(struct vbt, bdb_header); 151 v->header.bdb_offset = offsetof(struct vbt, bdb_header);
156 152
157 strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK"); 153 strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
158 v->bdb_header.version = 186; /* child_dev_size = 38 */ 154 v->bdb_header.version = 186; /* child_dev_size = 33 */
159 v->bdb_header.header_size = sizeof(v->bdb_header); 155 v->bdb_header.header_size = sizeof(v->bdb_header);
160 156
161 v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header) 157 v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
@@ -169,11 +165,13 @@ static void virt_vbt_generation(struct vbt *v)
169 165
170 /* child device */ 166 /* child device */
171 num_child = 4; /* each port has one child */ 167 num_child = 4; /* each port has one child */
168 v->general_definitions.child_dev_size =
169 sizeof(struct efp_child_device_config);
172 v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS; 170 v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS;
173 /* size will include child devices */ 171 /* size will include child devices */
174 v->general_definitions_header.size = 172 v->general_definitions_header.size =
175 sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE; 173 sizeof(struct bdb_general_definitions) +
176 v->general_definitions.child_dev_size = DEV_SIZE; 174 num_child * v->general_definitions.child_dev_size;
177 175
178 /* portA */ 176 /* portA */
179 v->child0.handle = DEVICE_TYPE_EFP1; 177 v->child0.handle = DEVICE_TYPE_EFP1;
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 09d7bb72b4ff..c32e7d5e8629 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -47,11 +47,15 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
47 return false; 47 return false;
48} 48}
49 49
50/* We give 2 seconds higher prio for vGPU during start */
51#define GVT_SCHED_VGPU_PRI_TIME 2
52
50struct vgpu_sched_data { 53struct vgpu_sched_data {
51 struct list_head lru_list; 54 struct list_head lru_list;
52 struct intel_vgpu *vgpu; 55 struct intel_vgpu *vgpu;
53 bool active; 56 bool active;
54 57 bool pri_sched;
58 ktime_t pri_time;
55 ktime_t sched_in_time; 59 ktime_t sched_in_time;
56 ktime_t sched_time; 60 ktime_t sched_time;
57 ktime_t left_ts; 61 ktime_t left_ts;
@@ -183,6 +187,14 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
183 if (!vgpu_has_pending_workload(vgpu_data->vgpu)) 187 if (!vgpu_has_pending_workload(vgpu_data->vgpu))
184 continue; 188 continue;
185 189
190 if (vgpu_data->pri_sched) {
191 if (ktime_before(ktime_get(), vgpu_data->pri_time)) {
192 vgpu = vgpu_data->vgpu;
193 break;
194 } else
195 vgpu_data->pri_sched = false;
196 }
197
186 /* Return the vGPU only if it has time slice left */ 198 /* Return the vGPU only if it has time slice left */
187 if (vgpu_data->left_ts > 0) { 199 if (vgpu_data->left_ts > 0) {
188 vgpu = vgpu_data->vgpu; 200 vgpu = vgpu_data->vgpu;
@@ -202,6 +214,7 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
202 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 214 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
203 struct vgpu_sched_data *vgpu_data; 215 struct vgpu_sched_data *vgpu_data;
204 struct intel_vgpu *vgpu = NULL; 216 struct intel_vgpu *vgpu = NULL;
217
205 /* no active vgpu or has already had a target */ 218 /* no active vgpu or has already had a target */
206 if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) 219 if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
207 goto out; 220 goto out;
@@ -209,12 +222,13 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
209 vgpu = find_busy_vgpu(sched_data); 222 vgpu = find_busy_vgpu(sched_data);
210 if (vgpu) { 223 if (vgpu) {
211 scheduler->next_vgpu = vgpu; 224 scheduler->next_vgpu = vgpu;
212
213 /* Move the last used vGPU to the tail of lru_list */
214 vgpu_data = vgpu->sched_data; 225 vgpu_data = vgpu->sched_data;
215 list_del_init(&vgpu_data->lru_list); 226 if (!vgpu_data->pri_sched) {
216 list_add_tail(&vgpu_data->lru_list, 227 /* Move the last used vGPU to the tail of lru_list */
217 &sched_data->lru_runq_head); 228 list_del_init(&vgpu_data->lru_list);
229 list_add_tail(&vgpu_data->lru_list,
230 &sched_data->lru_runq_head);
231 }
218 } else { 232 } else {
219 scheduler->next_vgpu = gvt->idle_vgpu; 233 scheduler->next_vgpu = gvt->idle_vgpu;
220 } 234 }
@@ -328,11 +342,17 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
328{ 342{
329 struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; 343 struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
330 struct vgpu_sched_data *vgpu_data = vgpu->sched_data; 344 struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
345 ktime_t now;
331 346
332 if (!list_empty(&vgpu_data->lru_list)) 347 if (!list_empty(&vgpu_data->lru_list))
333 return; 348 return;
334 349
335 list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head); 350 now = ktime_get();
351 vgpu_data->pri_time = ktime_add(now,
352 ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0));
353 vgpu_data->pri_sched = true;
354
355 list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head);
336 356
337 if (!hrtimer_active(&sched_data->timer)) 357 if (!hrtimer_active(&sched_data->timer))
338 hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), 358 hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
@@ -426,6 +446,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
426 &vgpu->gvt->scheduler; 446 &vgpu->gvt->scheduler;
427 int ring_id; 447 int ring_id;
428 struct vgpu_sched_data *vgpu_data = vgpu->sched_data; 448 struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
449 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
429 450
430 if (!vgpu_data->active) 451 if (!vgpu_data->active)
431 return; 452 return;
@@ -444,6 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
444 scheduler->current_vgpu = NULL; 465 scheduler->current_vgpu = NULL;
445 } 466 }
446 467
468 intel_runtime_pm_get(dev_priv);
447 spin_lock_bh(&scheduler->mmio_context_lock); 469 spin_lock_bh(&scheduler->mmio_context_lock);
448 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { 470 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
449 if (scheduler->engine_owner[ring_id] == vgpu) { 471 if (scheduler->engine_owner[ring_id] == vgpu) {
@@ -452,5 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
452 } 474 }
453 } 475 }
454 spin_unlock_bh(&scheduler->mmio_context_lock); 476 spin_unlock_bh(&scheduler->mmio_context_lock);
477 intel_runtime_pm_put(dev_priv);
455 mutex_unlock(&vgpu->gvt->sched_lock); 478 mutex_unlock(&vgpu->gvt->sched_lock);
456} 479}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 08ec7446282e..9e63cd47b60f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -10422,7 +10422,7 @@ enum skl_power_gate {
10422 _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ 10422 _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
10423 _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) 10423 _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
10424#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 10424#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
10425 _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ 10425 _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \
10426 _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) 10426 _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
10427#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) 10427#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16)
10428#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) 10428#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0)
@@ -10437,7 +10437,7 @@ enum skl_power_gate {
10437 _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ 10437 _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
10438 _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) 10438 _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
10439#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 10439#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
10440 _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \ 10440 _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \
10441 _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) 10441 _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
10442#define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16) 10442#define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16)
10443#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) 10443#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 11d834f94220..98358b4b36de 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -199,7 +199,6 @@ vma_create(struct drm_i915_gem_object *obj,
199 vma->flags |= I915_VMA_GGTT; 199 vma->flags |= I915_VMA_GGTT;
200 list_add(&vma->obj_link, &obj->vma_list); 200 list_add(&vma->obj_link, &obj->vma_list);
201 } else { 201 } else {
202 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
203 list_add_tail(&vma->obj_link, &obj->vma_list); 202 list_add_tail(&vma->obj_link, &obj->vma_list);
204 } 203 }
205 204
@@ -807,9 +806,6 @@ static void __i915_vma_destroy(struct i915_vma *vma)
807 if (vma->obj) 806 if (vma->obj)
808 rb_erase(&vma->obj_node, &vma->obj->vma_tree); 807 rb_erase(&vma->obj_node, &vma->obj->vma_tree);
809 808
810 if (!i915_vma_is_ggtt(vma))
811 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
812
813 rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { 809 rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) {
814 GEM_BUG_ON(i915_gem_active_isset(&iter->base)); 810 GEM_BUG_ON(i915_gem_active_isset(&iter->base));
815 kfree(iter); 811 kfree(iter);
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index b725835b47ef..769f3f586661 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -962,9 +962,6 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
962{ 962{
963 int ret; 963 int ret;
964 964
965 if (INTEL_INFO(dev_priv)->num_pipes == 0)
966 return;
967
968 ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); 965 ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
969 if (ret < 0) { 966 if (ret < 0) {
970 DRM_ERROR("failed to add audio component (%d)\n", ret); 967 DRM_ERROR("failed to add audio component (%d)\n", ret);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 8761513f3532..c9af34861d9e 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2708,7 +2708,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
2708 if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) 2708 if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
2709 intel_dp_stop_link_train(intel_dp); 2709 intel_dp_stop_link_train(intel_dp);
2710 2710
2711 intel_ddi_enable_pipe_clock(crtc_state); 2711 if (!is_mst)
2712 intel_ddi_enable_pipe_clock(crtc_state);
2712} 2713}
2713 2714
2714static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, 2715static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -2810,14 +2811,14 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
2810 bool is_mst = intel_crtc_has_type(old_crtc_state, 2811 bool is_mst = intel_crtc_has_type(old_crtc_state,
2811 INTEL_OUTPUT_DP_MST); 2812 INTEL_OUTPUT_DP_MST);
2812 2813
2813 intel_ddi_disable_pipe_clock(old_crtc_state); 2814 if (!is_mst) {
2814 2815 intel_ddi_disable_pipe_clock(old_crtc_state);
2815 /* 2816 /*
2816 * Power down sink before disabling the port, otherwise we end 2817 * Power down sink before disabling the port, otherwise we end
2817 * up getting interrupts from the sink on detecting link loss. 2818 * up getting interrupts from the sink on detecting link loss.
2818 */ 2819 */
2819 if (!is_mst)
2820 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 2820 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2821 }
2821 2822
2822 intel_disable_ddi_buf(encoder); 2823 intel_disable_ddi_buf(encoder);
2823 2824
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ed3fa1c8a983..d2951096bca0 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2988,6 +2988,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
2988 int w = drm_rect_width(&plane_state->base.src) >> 16; 2988 int w = drm_rect_width(&plane_state->base.src) >> 16;
2989 int h = drm_rect_height(&plane_state->base.src) >> 16; 2989 int h = drm_rect_height(&plane_state->base.src) >> 16;
2990 int dst_x = plane_state->base.dst.x1; 2990 int dst_x = plane_state->base.dst.x1;
2991 int dst_w = drm_rect_width(&plane_state->base.dst);
2991 int pipe_src_w = crtc_state->pipe_src_w; 2992 int pipe_src_w = crtc_state->pipe_src_w;
2992 int max_width = skl_max_plane_width(fb, 0, rotation); 2993 int max_width = skl_max_plane_width(fb, 0, rotation);
2993 int max_height = 4096; 2994 int max_height = 4096;
@@ -3009,10 +3010,10 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
3009 * screen may cause FIFO underflow and display corruption. 3010 * screen may cause FIFO underflow and display corruption.
3010 */ 3011 */
3011 if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && 3012 if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
3012 (dst_x + w < 4 || dst_x > pipe_src_w - 4)) { 3013 (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) {
3013 DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n", 3014 DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
3014 dst_x + w < 4 ? "end" : "start", 3015 dst_x + dst_w < 4 ? "end" : "start",
3015 dst_x + w < 4 ? dst_x + w : dst_x, 3016 dst_x + dst_w < 4 ? dst_x + dst_w : dst_x,
3016 4, pipe_src_w - 4); 3017 4, pipe_src_w - 4);
3017 return -ERANGE; 3018 return -ERANGE;
3018 } 3019 }
@@ -5078,10 +5079,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5078 mutex_lock(&dev_priv->pcu_lock); 5079 mutex_lock(&dev_priv->pcu_lock);
5079 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 5080 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5080 mutex_unlock(&dev_priv->pcu_lock); 5081 mutex_unlock(&dev_priv->pcu_lock);
5081 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 5082 /*
5083 * Wait for PCODE to finish disabling IPS. The BSpec specified
5084 * 42ms timeout value leads to occasional timeouts so use 100ms
5085 * instead.
5086 */
5082 if (intel_wait_for_register(dev_priv, 5087 if (intel_wait_for_register(dev_priv,
5083 IPS_CTL, IPS_ENABLE, 0, 5088 IPS_CTL, IPS_ENABLE, 0,
5084 42)) 5089 100))
5085 DRM_ERROR("Timed out waiting for IPS disable\n"); 5090 DRM_ERROR("Timed out waiting for IPS disable\n");
5086 } else { 5091 } else {
5087 I915_WRITE(IPS_CTL, 0); 5092 I915_WRITE(IPS_CTL, 0);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index cd0f649b57a5..1193202766a2 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4160,18 +4160,6 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4160 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 4160 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4161} 4161}
4162 4162
4163/*
4164 * If display is now connected check links status,
4165 * there has been known issues of link loss triggering
4166 * long pulse.
4167 *
4168 * Some sinks (eg. ASUS PB287Q) seem to perform some
4169 * weird HPD ping pong during modesets. So we can apparently
4170 * end up with HPD going low during a modeset, and then
4171 * going back up soon after. And once that happens we must
4172 * retrain the link to get a picture. That's in case no
4173 * userspace component reacted to intermittent HPD dip.
4174 */
4175int intel_dp_retrain_link(struct intel_encoder *encoder, 4163int intel_dp_retrain_link(struct intel_encoder *encoder,
4176 struct drm_modeset_acquire_ctx *ctx) 4164 struct drm_modeset_acquire_ctx *ctx)
4177{ 4165{
@@ -4661,7 +4649,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
4661} 4649}
4662 4650
4663static int 4651static int
4664intel_dp_long_pulse(struct intel_connector *connector) 4652intel_dp_long_pulse(struct intel_connector *connector,
4653 struct drm_modeset_acquire_ctx *ctx)
4665{ 4654{
4666 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 4655 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
4667 struct intel_dp *intel_dp = intel_attached_dp(&connector->base); 4656 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
@@ -4720,6 +4709,22 @@ intel_dp_long_pulse(struct intel_connector *connector)
4720 */ 4709 */
4721 status = connector_status_disconnected; 4710 status = connector_status_disconnected;
4722 goto out; 4711 goto out;
4712 } else {
4713 /*
4714 * If display is now connected check links status,
4715 * there has been known issues of link loss triggering
4716 * long pulse.
4717 *
4718 * Some sinks (eg. ASUS PB287Q) seem to perform some
4719 * weird HPD ping pong during modesets. So we can apparently
4720 * end up with HPD going low during a modeset, and then
4721 * going back up soon after. And once that happens we must
4722 * retrain the link to get a picture. That's in case no
4723 * userspace component reacted to intermittent HPD dip.
4724 */
4725 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4726
4727 intel_dp_retrain_link(encoder, ctx);
4723 } 4728 }
4724 4729
4725 /* 4730 /*
@@ -4781,7 +4786,7 @@ intel_dp_detect(struct drm_connector *connector,
4781 return ret; 4786 return ret;
4782 } 4787 }
4783 4788
4784 status = intel_dp_long_pulse(intel_dp->attached_connector); 4789 status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);
4785 } 4790 }
4786 4791
4787 intel_dp->detect_done = false; 4792 intel_dp->detect_done = false;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 7e3e01607643..4ecd65375603 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -166,6 +166,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
166 struct intel_connector *connector = 166 struct intel_connector *connector =
167 to_intel_connector(old_conn_state->connector); 167 to_intel_connector(old_conn_state->connector);
168 168
169 intel_ddi_disable_pipe_clock(old_crtc_state);
170
169 /* this can fail */ 171 /* this can fail */
170 drm_dp_check_act_status(&intel_dp->mst_mgr); 172 drm_dp_check_act_status(&intel_dp->mst_mgr);
171 /* and this can also fail */ 173 /* and this can also fail */
@@ -252,6 +254,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
252 I915_WRITE(DP_TP_STATUS(port), temp); 254 I915_WRITE(DP_TP_STATUS(port), temp);
253 255
254 ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); 256 ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
257
258 intel_ddi_enable_pipe_clock(pipe_config);
255} 259}
256 260
257static void intel_mst_enable_dp(struct intel_encoder *encoder, 261static void intel_mst_enable_dp(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a9076402dcb0..192972a7d287 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -943,8 +943,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
943 943
944 ret = i2c_transfer(adapter, &msg, 1); 944 ret = i2c_transfer(adapter, &msg, 1);
945 if (ret == 1) 945 if (ret == 1)
946 return 0; 946 ret = 0;
947 return ret >= 0 ? -EIO : ret; 947 else if (ret >= 0)
948 ret = -EIO;
949
950 kfree(write_buf);
951 return ret;
948} 952}
949 953
950static 954static
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 5dae16ccd9f1..3e085c5f2b81 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
74 DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", 74 DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
75 lspcon_mode_name(mode)); 75 lspcon_mode_name(mode));
76 76
77 wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100); 77 wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400);
78 if (current_mode != mode) 78 if (current_mode != mode)
79 DRM_ERROR("LSPCON mode hasn't settled\n"); 79 DRM_ERROR("LSPCON mode hasn't settled\n");
80 80
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index c2f10d899329..443dfaefd7a6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -181,8 +181,9 @@ struct intel_overlay {
181 u32 brightness, contrast, saturation; 181 u32 brightness, contrast, saturation;
182 u32 old_xscale, old_yscale; 182 u32 old_xscale, old_yscale;
183 /* register access */ 183 /* register access */
184 u32 flip_addr;
185 struct drm_i915_gem_object *reg_bo; 184 struct drm_i915_gem_object *reg_bo;
185 struct overlay_registers __iomem *regs;
186 u32 flip_addr;
186 /* flip handling */ 187 /* flip handling */
187 struct i915_gem_active last_flip; 188 struct i915_gem_active last_flip;
188}; 189};
@@ -210,29 +211,6 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
210 PCI_DEVFN(0, 0), I830_CLOCK_GATE, val); 211 PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
211} 212}
212 213
213static struct overlay_registers __iomem *
214intel_overlay_map_regs(struct intel_overlay *overlay)
215{
216 struct drm_i915_private *dev_priv = overlay->i915;
217 struct overlay_registers __iomem *regs;
218
219 if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
220 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
221 else
222 regs = io_mapping_map_wc(&dev_priv->ggtt.iomap,
223 overlay->flip_addr,
224 PAGE_SIZE);
225
226 return regs;
227}
228
229static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
230 struct overlay_registers __iomem *regs)
231{
232 if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
233 io_mapping_unmap(regs);
234}
235
236static void intel_overlay_submit_request(struct intel_overlay *overlay, 214static void intel_overlay_submit_request(struct intel_overlay *overlay,
237 struct i915_request *rq, 215 struct i915_request *rq,
238 i915_gem_retire_fn retire) 216 i915_gem_retire_fn retire)
@@ -784,13 +762,13 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
784 struct drm_i915_gem_object *new_bo, 762 struct drm_i915_gem_object *new_bo,
785 struct put_image_params *params) 763 struct put_image_params *params)
786{ 764{
787 int ret, tmp_width; 765 struct overlay_registers __iomem *regs = overlay->regs;
788 struct overlay_registers __iomem *regs;
789 bool scale_changed = false;
790 struct drm_i915_private *dev_priv = overlay->i915; 766 struct drm_i915_private *dev_priv = overlay->i915;
791 u32 swidth, swidthsw, sheight, ostride; 767 u32 swidth, swidthsw, sheight, ostride;
792 enum pipe pipe = overlay->crtc->pipe; 768 enum pipe pipe = overlay->crtc->pipe;
769 bool scale_changed = false;
793 struct i915_vma *vma; 770 struct i915_vma *vma;
771 int ret, tmp_width;
794 772
795 lockdep_assert_held(&dev_priv->drm.struct_mutex); 773 lockdep_assert_held(&dev_priv->drm.struct_mutex);
796 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 774 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
@@ -815,30 +793,19 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
815 793
816 if (!overlay->active) { 794 if (!overlay->active) {
817 u32 oconfig; 795 u32 oconfig;
818 regs = intel_overlay_map_regs(overlay); 796
819 if (!regs) {
820 ret = -ENOMEM;
821 goto out_unpin;
822 }
823 oconfig = OCONF_CC_OUT_8BIT; 797 oconfig = OCONF_CC_OUT_8BIT;
824 if (IS_GEN4(dev_priv)) 798 if (IS_GEN4(dev_priv))
825 oconfig |= OCONF_CSC_MODE_BT709; 799 oconfig |= OCONF_CSC_MODE_BT709;
826 oconfig |= pipe == 0 ? 800 oconfig |= pipe == 0 ?
827 OCONF_PIPE_A : OCONF_PIPE_B; 801 OCONF_PIPE_A : OCONF_PIPE_B;
828 iowrite32(oconfig, &regs->OCONFIG); 802 iowrite32(oconfig, &regs->OCONFIG);
829 intel_overlay_unmap_regs(overlay, regs);
830 803
831 ret = intel_overlay_on(overlay); 804 ret = intel_overlay_on(overlay);
832 if (ret != 0) 805 if (ret != 0)
833 goto out_unpin; 806 goto out_unpin;
834 } 807 }
835 808
836 regs = intel_overlay_map_regs(overlay);
837 if (!regs) {
838 ret = -ENOMEM;
839 goto out_unpin;
840 }
841
842 iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS); 809 iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
843 iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ); 810 iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
844 811
@@ -882,8 +849,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
882 849
883 iowrite32(overlay_cmd_reg(params), &regs->OCMD); 850 iowrite32(overlay_cmd_reg(params), &regs->OCMD);
884 851
885 intel_overlay_unmap_regs(overlay, regs);
886
887 ret = intel_overlay_continue(overlay, vma, scale_changed); 852 ret = intel_overlay_continue(overlay, vma, scale_changed);
888 if (ret) 853 if (ret)
889 goto out_unpin; 854 goto out_unpin;
@@ -901,7 +866,6 @@ out_pin_section:
901int intel_overlay_switch_off(struct intel_overlay *overlay) 866int intel_overlay_switch_off(struct intel_overlay *overlay)
902{ 867{
903 struct drm_i915_private *dev_priv = overlay->i915; 868 struct drm_i915_private *dev_priv = overlay->i915;
904 struct overlay_registers __iomem *regs;
905 int ret; 869 int ret;
906 870
907 lockdep_assert_held(&dev_priv->drm.struct_mutex); 871 lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -918,9 +882,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
918 if (ret != 0) 882 if (ret != 0)
919 return ret; 883 return ret;
920 884
921 regs = intel_overlay_map_regs(overlay); 885 iowrite32(0, &overlay->regs->OCMD);
922 iowrite32(0, &regs->OCMD);
923 intel_overlay_unmap_regs(overlay, regs);
924 886
925 return intel_overlay_off(overlay); 887 return intel_overlay_off(overlay);
926} 888}
@@ -1305,7 +1267,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
1305 struct drm_intel_overlay_attrs *attrs = data; 1267 struct drm_intel_overlay_attrs *attrs = data;
1306 struct drm_i915_private *dev_priv = to_i915(dev); 1268 struct drm_i915_private *dev_priv = to_i915(dev);
1307 struct intel_overlay *overlay; 1269 struct intel_overlay *overlay;
1308 struct overlay_registers __iomem *regs;
1309 int ret; 1270 int ret;
1310 1271
1311 overlay = dev_priv->overlay; 1272 overlay = dev_priv->overlay;
@@ -1345,15 +1306,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
1345 overlay->contrast = attrs->contrast; 1306 overlay->contrast = attrs->contrast;
1346 overlay->saturation = attrs->saturation; 1307 overlay->saturation = attrs->saturation;
1347 1308
1348 regs = intel_overlay_map_regs(overlay); 1309 update_reg_attrs(overlay, overlay->regs);
1349 if (!regs) {
1350 ret = -ENOMEM;
1351 goto out_unlock;
1352 }
1353
1354 update_reg_attrs(overlay, regs);
1355
1356 intel_overlay_unmap_regs(overlay, regs);
1357 1310
1358 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { 1311 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
1359 if (IS_GEN2(dev_priv)) 1312 if (IS_GEN2(dev_priv))
@@ -1386,12 +1339,47 @@ out_unlock:
1386 return ret; 1339 return ret;
1387} 1340}
1388 1341
1342static int get_registers(struct intel_overlay *overlay, bool use_phys)
1343{
1344 struct drm_i915_gem_object *obj;
1345 struct i915_vma *vma;
1346 int err;
1347
1348 obj = i915_gem_object_create_stolen(overlay->i915, PAGE_SIZE);
1349 if (obj == NULL)
1350 obj = i915_gem_object_create_internal(overlay->i915, PAGE_SIZE);
1351 if (IS_ERR(obj))
1352 return PTR_ERR(obj);
1353
1354 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
1355 if (IS_ERR(vma)) {
1356 err = PTR_ERR(vma);
1357 goto err_put_bo;
1358 }
1359
1360 if (use_phys)
1361 overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
1362 else
1363 overlay->flip_addr = i915_ggtt_offset(vma);
1364 overlay->regs = i915_vma_pin_iomap(vma);
1365 i915_vma_unpin(vma);
1366
1367 if (IS_ERR(overlay->regs)) {
1368 err = PTR_ERR(overlay->regs);
1369 goto err_put_bo;
1370 }
1371
1372 overlay->reg_bo = obj;
1373 return 0;
1374
1375err_put_bo:
1376 i915_gem_object_put(obj);
1377 return err;
1378}
1379
1389void intel_setup_overlay(struct drm_i915_private *dev_priv) 1380void intel_setup_overlay(struct drm_i915_private *dev_priv)
1390{ 1381{
1391 struct intel_overlay *overlay; 1382 struct intel_overlay *overlay;
1392 struct drm_i915_gem_object *reg_bo;
1393 struct overlay_registers __iomem *regs;
1394 struct i915_vma *vma = NULL;
1395 int ret; 1383 int ret;
1396 1384
1397 if (!HAS_OVERLAY(dev_priv)) 1385 if (!HAS_OVERLAY(dev_priv))
@@ -1401,46 +1389,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
1401 if (!overlay) 1389 if (!overlay)
1402 return; 1390 return;
1403 1391
1404 mutex_lock(&dev_priv->drm.struct_mutex);
1405 if (WARN_ON(dev_priv->overlay))
1406 goto out_free;
1407
1408 overlay->i915 = dev_priv; 1392 overlay->i915 = dev_priv;
1409 1393
1410 reg_bo = NULL;
1411 if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
1412 reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE);
1413 if (reg_bo == NULL)
1414 reg_bo = i915_gem_object_create(dev_priv, PAGE_SIZE);
1415 if (IS_ERR(reg_bo))
1416 goto out_free;
1417 overlay->reg_bo = reg_bo;
1418
1419 if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
1420 ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
1421 if (ret) {
1422 DRM_ERROR("failed to attach phys overlay regs\n");
1423 goto out_free_bo;
1424 }
1425 overlay->flip_addr = reg_bo->phys_handle->busaddr;
1426 } else {
1427 vma = i915_gem_object_ggtt_pin(reg_bo, NULL,
1428 0, PAGE_SIZE, PIN_MAPPABLE);
1429 if (IS_ERR(vma)) {
1430 DRM_ERROR("failed to pin overlay register bo\n");
1431 ret = PTR_ERR(vma);
1432 goto out_free_bo;
1433 }
1434 overlay->flip_addr = i915_ggtt_offset(vma);
1435
1436 ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
1437 if (ret) {
1438 DRM_ERROR("failed to move overlay register bo into the GTT\n");
1439 goto out_unpin_bo;
1440 }
1441 }
1442
1443 /* init all values */
1444 overlay->color_key = 0x0101fe; 1394 overlay->color_key = 0x0101fe;
1445 overlay->color_key_enabled = true; 1395 overlay->color_key_enabled = true;
1446 overlay->brightness = -19; 1396 overlay->brightness = -19;
@@ -1449,44 +1399,51 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
1449 1399
1450 init_request_active(&overlay->last_flip, NULL); 1400 init_request_active(&overlay->last_flip, NULL);
1451 1401
1452 regs = intel_overlay_map_regs(overlay); 1402 mutex_lock(&dev_priv->drm.struct_mutex);
1453 if (!regs) 1403
1454 goto out_unpin_bo; 1404 ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
1405 if (ret)
1406 goto out_free;
1407
1408 ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true);
1409 if (ret)
1410 goto out_reg_bo;
1455 1411
1456 memset_io(regs, 0, sizeof(struct overlay_registers)); 1412 mutex_unlock(&dev_priv->drm.struct_mutex);
1457 update_polyphase_filter(regs);
1458 update_reg_attrs(overlay, regs);
1459 1413
1460 intel_overlay_unmap_regs(overlay, regs); 1414 memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
1415 update_polyphase_filter(overlay->regs);
1416 update_reg_attrs(overlay, overlay->regs);
1461 1417
1462 dev_priv->overlay = overlay; 1418 dev_priv->overlay = overlay;
1463 mutex_unlock(&dev_priv->drm.struct_mutex); 1419 DRM_INFO("Initialized overlay support.\n");
1464 DRM_INFO("initialized overlay support\n");
1465 return; 1420 return;
1466 1421
1467out_unpin_bo: 1422out_reg_bo:
1468 if (vma) 1423 i915_gem_object_put(overlay->reg_bo);
1469 i915_vma_unpin(vma);
1470out_free_bo:
1471 i915_gem_object_put(reg_bo);
1472out_free: 1424out_free:
1473 mutex_unlock(&dev_priv->drm.struct_mutex); 1425 mutex_unlock(&dev_priv->drm.struct_mutex);
1474 kfree(overlay); 1426 kfree(overlay);
1475 return;
1476} 1427}
1477 1428
1478void intel_cleanup_overlay(struct drm_i915_private *dev_priv) 1429void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
1479{ 1430{
1480 if (!dev_priv->overlay) 1431 struct intel_overlay *overlay;
1432
1433 overlay = fetch_and_zero(&dev_priv->overlay);
1434 if (!overlay)
1481 return; 1435 return;
1482 1436
1483 /* The bo's should be free'd by the generic code already. 1437 /*
1438 * The bo's should be free'd by the generic code already.
1484 * Furthermore modesetting teardown happens beforehand so the 1439 * Furthermore modesetting teardown happens beforehand so the
1485 * hardware should be off already */ 1440 * hardware should be off already.
1486 WARN_ON(dev_priv->overlay->active); 1441 */
1442 WARN_ON(overlay->active);
1443
1444 i915_gem_object_put(overlay->reg_bo);
1487 1445
1488 i915_gem_object_put(dev_priv->overlay->reg_bo); 1446 kfree(overlay);
1489 kfree(dev_priv->overlay);
1490} 1447}
1491 1448
1492#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 1449#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
@@ -1498,37 +1455,11 @@ struct intel_overlay_error_state {
1498 u32 isr; 1455 u32 isr;
1499}; 1456};
1500 1457
1501static struct overlay_registers __iomem *
1502intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1503{
1504 struct drm_i915_private *dev_priv = overlay->i915;
1505 struct overlay_registers __iomem *regs;
1506
1507 if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
1508 /* Cast to make sparse happy, but it's wc memory anyway, so
1509 * equivalent to the wc io mapping on X86. */
1510 regs = (struct overlay_registers __iomem *)
1511 overlay->reg_bo->phys_handle->vaddr;
1512 else
1513 regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap,
1514 overlay->flip_addr);
1515
1516 return regs;
1517}
1518
1519static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
1520 struct overlay_registers __iomem *regs)
1521{
1522 if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
1523 io_mapping_unmap_atomic(regs);
1524}
1525
1526struct intel_overlay_error_state * 1458struct intel_overlay_error_state *
1527intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) 1459intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
1528{ 1460{
1529 struct intel_overlay *overlay = dev_priv->overlay; 1461 struct intel_overlay *overlay = dev_priv->overlay;
1530 struct intel_overlay_error_state *error; 1462 struct intel_overlay_error_state *error;
1531 struct overlay_registers __iomem *regs;
1532 1463
1533 if (!overlay || !overlay->active) 1464 if (!overlay || !overlay->active)
1534 return NULL; 1465 return NULL;
@@ -1541,18 +1472,9 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
1541 error->isr = I915_READ(ISR); 1472 error->isr = I915_READ(ISR);
1542 error->base = overlay->flip_addr; 1473 error->base = overlay->flip_addr;
1543 1474
1544 regs = intel_overlay_map_regs_atomic(overlay); 1475 memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
1545 if (!regs)
1546 goto err;
1547
1548 memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
1549 intel_overlay_unmap_regs_atomic(overlay, regs);
1550 1476
1551 return error; 1477 return error;
1552
1553err:
1554 kfree(error);
1555 return NULL;
1556} 1478}
1557 1479
1558void 1480void
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 978782a77629..28d191192945 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -132,6 +132,11 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
132 writel(0x0, comp->regs + DISP_REG_OVL_RST); 132 writel(0x0, comp->regs + DISP_REG_OVL_RST);
133} 133}
134 134
135static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
136{
137 return 4;
138}
139
135static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) 140static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
136{ 141{
137 unsigned int reg; 142 unsigned int reg;
@@ -157,6 +162,11 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx)
157 162
158static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) 163static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
159{ 164{
165 /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
166 * is defined in mediatek HW data sheet.
167 * The alphabet order in XXX is no relation to data
168 * arrangement in memory.
169 */
160 switch (fmt) { 170 switch (fmt) {
161 default: 171 default:
162 case DRM_FORMAT_RGB565: 172 case DRM_FORMAT_RGB565:
@@ -221,6 +231,7 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
221 .stop = mtk_ovl_stop, 231 .stop = mtk_ovl_stop,
222 .enable_vblank = mtk_ovl_enable_vblank, 232 .enable_vblank = mtk_ovl_enable_vblank,
223 .disable_vblank = mtk_ovl_disable_vblank, 233 .disable_vblank = mtk_ovl_disable_vblank,
234 .layer_nr = mtk_ovl_layer_nr,
224 .layer_on = mtk_ovl_layer_on, 235 .layer_on = mtk_ovl_layer_on,
225 .layer_off = mtk_ovl_layer_off, 236 .layer_off = mtk_ovl_layer_off,
226 .layer_config = mtk_ovl_layer_config, 237 .layer_config = mtk_ovl_layer_config,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 585943c81e1f..b0a5cffe345a 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -31,14 +31,31 @@
31#define RDMA_REG_UPDATE_INT BIT(0) 31#define RDMA_REG_UPDATE_INT BIT(0)
32#define DISP_REG_RDMA_GLOBAL_CON 0x0010 32#define DISP_REG_RDMA_GLOBAL_CON 0x0010
33#define RDMA_ENGINE_EN BIT(0) 33#define RDMA_ENGINE_EN BIT(0)
34#define RDMA_MODE_MEMORY BIT(1)
34#define DISP_REG_RDMA_SIZE_CON_0 0x0014 35#define DISP_REG_RDMA_SIZE_CON_0 0x0014
36#define RDMA_MATRIX_ENABLE BIT(17)
37#define RDMA_MATRIX_INT_MTX_SEL GENMASK(23, 20)
38#define RDMA_MATRIX_INT_MTX_BT601_to_RGB (6 << 20)
35#define DISP_REG_RDMA_SIZE_CON_1 0x0018 39#define DISP_REG_RDMA_SIZE_CON_1 0x0018
36#define DISP_REG_RDMA_TARGET_LINE 0x001c 40#define DISP_REG_RDMA_TARGET_LINE 0x001c
41#define DISP_RDMA_MEM_CON 0x0024
42#define MEM_MODE_INPUT_FORMAT_RGB565 (0x000 << 4)
43#define MEM_MODE_INPUT_FORMAT_RGB888 (0x001 << 4)
44#define MEM_MODE_INPUT_FORMAT_RGBA8888 (0x002 << 4)
45#define MEM_MODE_INPUT_FORMAT_ARGB8888 (0x003 << 4)
46#define MEM_MODE_INPUT_FORMAT_UYVY (0x004 << 4)
47#define MEM_MODE_INPUT_FORMAT_YUYV (0x005 << 4)
48#define MEM_MODE_INPUT_SWAP BIT(8)
49#define DISP_RDMA_MEM_SRC_PITCH 0x002c
50#define DISP_RDMA_MEM_GMC_SETTING_0 0x0030
37#define DISP_REG_RDMA_FIFO_CON 0x0040 51#define DISP_REG_RDMA_FIFO_CON 0x0040
38#define RDMA_FIFO_UNDERFLOW_EN BIT(31) 52#define RDMA_FIFO_UNDERFLOW_EN BIT(31)
39#define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16) 53#define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16)
40#define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16) 54#define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16)
41#define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) 55#define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size)
56#define DISP_RDMA_MEM_START_ADDR 0x0f00
57
58#define RDMA_MEM_GMC 0x40402020
42 59
43struct mtk_disp_rdma_data { 60struct mtk_disp_rdma_data {
44 unsigned int fifo_size; 61 unsigned int fifo_size;
@@ -138,12 +155,87 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
138 writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); 155 writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON);
139} 156}
140 157
158static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
159 unsigned int fmt)
160{
161 /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
162 * is defined in mediatek HW data sheet.
163 * The alphabet order in XXX is no relation to data
164 * arrangement in memory.
165 */
166 switch (fmt) {
167 default:
168 case DRM_FORMAT_RGB565:
169 return MEM_MODE_INPUT_FORMAT_RGB565;
170 case DRM_FORMAT_BGR565:
171 return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP;
172 case DRM_FORMAT_RGB888:
173 return MEM_MODE_INPUT_FORMAT_RGB888;
174 case DRM_FORMAT_BGR888:
175 return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP;
176 case DRM_FORMAT_RGBX8888:
177 case DRM_FORMAT_RGBA8888:
178 return MEM_MODE_INPUT_FORMAT_ARGB8888;
179 case DRM_FORMAT_BGRX8888:
180 case DRM_FORMAT_BGRA8888:
181 return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP;
182 case DRM_FORMAT_XRGB8888:
183 case DRM_FORMAT_ARGB8888:
184 return MEM_MODE_INPUT_FORMAT_RGBA8888;
185 case DRM_FORMAT_XBGR8888:
186 case DRM_FORMAT_ABGR8888:
187 return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP;
188 case DRM_FORMAT_UYVY:
189 return MEM_MODE_INPUT_FORMAT_UYVY;
190 case DRM_FORMAT_YUYV:
191 return MEM_MODE_INPUT_FORMAT_YUYV;
192 }
193}
194
195static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp)
196{
197 return 1;
198}
199
200static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
201 struct mtk_plane_state *state)
202{
203 struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
204 struct mtk_plane_pending_state *pending = &state->pending;
205 unsigned int addr = pending->addr;
206 unsigned int pitch = pending->pitch & 0xffff;
207 unsigned int fmt = pending->format;
208 unsigned int con;
209
210 con = rdma_fmt_convert(rdma, fmt);
211 writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON);
212
213 if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) {
214 rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
215 RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE);
216 rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
217 RDMA_MATRIX_INT_MTX_SEL,
218 RDMA_MATRIX_INT_MTX_BT601_to_RGB);
219 } else {
220 rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
221 RDMA_MATRIX_ENABLE, 0);
222 }
223
224 writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR);
225 writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH);
226 writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0);
227 rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON,
228 RDMA_MODE_MEMORY, RDMA_MODE_MEMORY);
229}
230
141static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = { 231static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
142 .config = mtk_rdma_config, 232 .config = mtk_rdma_config,
143 .start = mtk_rdma_start, 233 .start = mtk_rdma_start,
144 .stop = mtk_rdma_stop, 234 .stop = mtk_rdma_stop,
145 .enable_vblank = mtk_rdma_enable_vblank, 235 .enable_vblank = mtk_rdma_enable_vblank,
146 .disable_vblank = mtk_rdma_disable_vblank, 236 .disable_vblank = mtk_rdma_disable_vblank,
237 .layer_nr = mtk_rdma_layer_nr,
238 .layer_config = mtk_rdma_layer_config,
147}; 239};
148 240
149static int mtk_disp_rdma_bind(struct device *dev, struct device *master, 241static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 2d6aa150a9ff..0b976dfd04df 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -45,7 +45,8 @@ struct mtk_drm_crtc {
45 bool pending_needs_vblank; 45 bool pending_needs_vblank;
46 struct drm_pending_vblank_event *event; 46 struct drm_pending_vblank_event *event;
47 47
48 struct drm_plane planes[OVL_LAYER_NR]; 48 struct drm_plane *planes;
49 unsigned int layer_nr;
49 bool pending_planes; 50 bool pending_planes;
50 51
51 void __iomem *config_regs; 52 void __iomem *config_regs;
@@ -171,9 +172,9 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
171static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) 172static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
172{ 173{
173 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 174 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
174 struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; 175 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
175 176
176 mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base); 177 mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base);
177 178
178 return 0; 179 return 0;
179} 180}
@@ -181,9 +182,9 @@ static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
181static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc) 182static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
182{ 183{
183 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 184 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
184 struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; 185 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
185 186
186 mtk_ddp_comp_disable_vblank(ovl); 187 mtk_ddp_comp_disable_vblank(comp);
187} 188}
188 189
189static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) 190static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
@@ -286,7 +287,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
286 } 287 }
287 288
288 /* Initially configure all planes */ 289 /* Initially configure all planes */
289 for (i = 0; i < OVL_LAYER_NR; i++) { 290 for (i = 0; i < mtk_crtc->layer_nr; i++) {
290 struct drm_plane *plane = &mtk_crtc->planes[i]; 291 struct drm_plane *plane = &mtk_crtc->planes[i];
291 struct mtk_plane_state *plane_state; 292 struct mtk_plane_state *plane_state;
292 293
@@ -334,7 +335,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
334{ 335{
335 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 336 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
336 struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); 337 struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
337 struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; 338 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
338 unsigned int i; 339 unsigned int i;
339 340
340 /* 341 /*
@@ -343,7 +344,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
343 * queue update module registers on vblank. 344 * queue update module registers on vblank.
344 */ 345 */
345 if (state->pending_config) { 346 if (state->pending_config) {
346 mtk_ddp_comp_config(ovl, state->pending_width, 347 mtk_ddp_comp_config(comp, state->pending_width,
347 state->pending_height, 348 state->pending_height,
348 state->pending_vrefresh, 0); 349 state->pending_vrefresh, 0);
349 350
@@ -351,14 +352,14 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
351 } 352 }
352 353
353 if (mtk_crtc->pending_planes) { 354 if (mtk_crtc->pending_planes) {
354 for (i = 0; i < OVL_LAYER_NR; i++) { 355 for (i = 0; i < mtk_crtc->layer_nr; i++) {
355 struct drm_plane *plane = &mtk_crtc->planes[i]; 356 struct drm_plane *plane = &mtk_crtc->planes[i];
356 struct mtk_plane_state *plane_state; 357 struct mtk_plane_state *plane_state;
357 358
358 plane_state = to_mtk_plane_state(plane->state); 359 plane_state = to_mtk_plane_state(plane->state);
359 360
360 if (plane_state->pending.config) { 361 if (plane_state->pending.config) {
361 mtk_ddp_comp_layer_config(ovl, i, plane_state); 362 mtk_ddp_comp_layer_config(comp, i, plane_state);
362 plane_state->pending.config = false; 363 plane_state->pending.config = false;
363 } 364 }
364 } 365 }
@@ -370,12 +371,12 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
370 struct drm_crtc_state *old_state) 371 struct drm_crtc_state *old_state)
371{ 372{
372 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 373 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
373 struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; 374 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
374 int ret; 375 int ret;
375 376
376 DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); 377 DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
377 378
378 ret = mtk_smi_larb_get(ovl->larb_dev); 379 ret = mtk_smi_larb_get(comp->larb_dev);
379 if (ret) { 380 if (ret) {
380 DRM_ERROR("Failed to get larb: %d\n", ret); 381 DRM_ERROR("Failed to get larb: %d\n", ret);
381 return; 382 return;
@@ -383,7 +384,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
383 384
384 ret = mtk_crtc_ddp_hw_init(mtk_crtc); 385 ret = mtk_crtc_ddp_hw_init(mtk_crtc);
385 if (ret) { 386 if (ret) {
386 mtk_smi_larb_put(ovl->larb_dev); 387 mtk_smi_larb_put(comp->larb_dev);
387 return; 388 return;
388 } 389 }
389 390
@@ -395,7 +396,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
395 struct drm_crtc_state *old_state) 396 struct drm_crtc_state *old_state)
396{ 397{
397 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 398 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
398 struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; 399 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
399 int i; 400 int i;
400 401
401 DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); 402 DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
@@ -403,7 +404,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
403 return; 404 return;
404 405
405 /* Set all pending plane state to disabled */ 406 /* Set all pending plane state to disabled */
406 for (i = 0; i < OVL_LAYER_NR; i++) { 407 for (i = 0; i < mtk_crtc->layer_nr; i++) {
407 struct drm_plane *plane = &mtk_crtc->planes[i]; 408 struct drm_plane *plane = &mtk_crtc->planes[i];
408 struct mtk_plane_state *plane_state; 409 struct mtk_plane_state *plane_state;
409 410
@@ -418,7 +419,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
418 419
419 drm_crtc_vblank_off(crtc); 420 drm_crtc_vblank_off(crtc);
420 mtk_crtc_ddp_hw_fini(mtk_crtc); 421 mtk_crtc_ddp_hw_fini(mtk_crtc);
421 mtk_smi_larb_put(ovl->larb_dev); 422 mtk_smi_larb_put(comp->larb_dev);
422 423
423 mtk_crtc->enabled = false; 424 mtk_crtc->enabled = false;
424} 425}
@@ -450,7 +451,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
450 451
451 if (mtk_crtc->event) 452 if (mtk_crtc->event)
452 mtk_crtc->pending_needs_vblank = true; 453 mtk_crtc->pending_needs_vblank = true;
453 for (i = 0; i < OVL_LAYER_NR; i++) { 454 for (i = 0; i < mtk_crtc->layer_nr; i++) {
454 struct drm_plane *plane = &mtk_crtc->planes[i]; 455 struct drm_plane *plane = &mtk_crtc->planes[i];
455 struct mtk_plane_state *plane_state; 456 struct mtk_plane_state *plane_state;
456 457
@@ -516,7 +517,7 @@ err_cleanup_crtc:
516 return ret; 517 return ret;
517} 518}
518 519
519void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl) 520void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
520{ 521{
521 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 522 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
522 struct mtk_drm_private *priv = crtc->dev->dev_private; 523 struct mtk_drm_private *priv = crtc->dev->dev_private;
@@ -598,7 +599,12 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
598 mtk_crtc->ddp_comp[i] = comp; 599 mtk_crtc->ddp_comp[i] = comp;
599 } 600 }
600 601
601 for (zpos = 0; zpos < OVL_LAYER_NR; zpos++) { 602 mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
603 mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr *
604 sizeof(struct drm_plane),
605 GFP_KERNEL);
606
607 for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) {
602 type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY : 608 type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY :
603 (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : 609 (zpos == 1) ? DRM_PLANE_TYPE_CURSOR :
604 DRM_PLANE_TYPE_OVERLAY; 610 DRM_PLANE_TYPE_OVERLAY;
@@ -609,7 +615,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
609 } 615 }
610 616
611 ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0], 617 ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
612 &mtk_crtc->planes[1], pipe); 618 mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
619 NULL, pipe);
613 if (ret < 0) 620 if (ret < 0)
614 goto unprepare; 621 goto unprepare;
615 drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); 622 drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index 9d9410c67ae9..091adb2087eb 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -18,13 +18,12 @@
18#include "mtk_drm_ddp_comp.h" 18#include "mtk_drm_ddp_comp.h"
19#include "mtk_drm_plane.h" 19#include "mtk_drm_plane.h"
20 20
21#define OVL_LAYER_NR 4
22#define MTK_LUT_SIZE 512 21#define MTK_LUT_SIZE 512
23#define MTK_MAX_BPC 10 22#define MTK_MAX_BPC 10
24#define MTK_MIN_BPC 3 23#define MTK_MIN_BPC 3
25 24
26void mtk_drm_crtc_commit(struct drm_crtc *crtc); 25void mtk_drm_crtc_commit(struct drm_crtc *crtc);
27void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl); 26void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp);
28int mtk_drm_crtc_create(struct drm_device *drm_dev, 27int mtk_drm_crtc_create(struct drm_device *drm_dev,
29 const enum mtk_ddp_comp_id *path, 28 const enum mtk_ddp_comp_id *path,
30 unsigned int path_len); 29 unsigned int path_len);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 87e4191c250e..546b3e3b300b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -106,6 +106,8 @@
106#define OVL1_MOUT_EN_COLOR1 0x1 106#define OVL1_MOUT_EN_COLOR1 0x1
107#define GAMMA_MOUT_EN_RDMA1 0x1 107#define GAMMA_MOUT_EN_RDMA1 0x1
108#define RDMA0_SOUT_DPI0 0x2 108#define RDMA0_SOUT_DPI0 0x2
109#define RDMA0_SOUT_DPI1 0x3
110#define RDMA0_SOUT_DSI1 0x1
109#define RDMA0_SOUT_DSI2 0x4 111#define RDMA0_SOUT_DSI2 0x4
110#define RDMA0_SOUT_DSI3 0x5 112#define RDMA0_SOUT_DSI3 0x5
111#define RDMA1_SOUT_DPI0 0x2 113#define RDMA1_SOUT_DPI0 0x2
@@ -122,6 +124,8 @@
122#define DPI0_SEL_IN_RDMA2 0x3 124#define DPI0_SEL_IN_RDMA2 0x3
123#define DPI1_SEL_IN_RDMA1 (0x1 << 8) 125#define DPI1_SEL_IN_RDMA1 (0x1 << 8)
124#define DPI1_SEL_IN_RDMA2 (0x3 << 8) 126#define DPI1_SEL_IN_RDMA2 (0x3 << 8)
127#define DSI0_SEL_IN_RDMA1 0x1
128#define DSI0_SEL_IN_RDMA2 0x4
125#define DSI1_SEL_IN_RDMA1 0x1 129#define DSI1_SEL_IN_RDMA1 0x1
126#define DSI1_SEL_IN_RDMA2 0x4 130#define DSI1_SEL_IN_RDMA2 0x4
127#define DSI2_SEL_IN_RDMA1 (0x1 << 16) 131#define DSI2_SEL_IN_RDMA1 (0x1 << 16)
@@ -224,6 +228,12 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
224 } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) { 228 } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
225 *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; 229 *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
226 value = RDMA0_SOUT_DPI0; 230 value = RDMA0_SOUT_DPI0;
231 } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) {
232 *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
233 value = RDMA0_SOUT_DPI1;
234 } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) {
235 *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
236 value = RDMA0_SOUT_DSI1;
227 } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) { 237 } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
228 *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; 238 *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
229 value = RDMA0_SOUT_DSI2; 239 value = RDMA0_SOUT_DSI2;
@@ -282,6 +292,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
282 } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) { 292 } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
283 *addr = DISP_REG_CONFIG_DPI_SEL_IN; 293 *addr = DISP_REG_CONFIG_DPI_SEL_IN;
284 value = DPI1_SEL_IN_RDMA1; 294 value = DPI1_SEL_IN_RDMA1;
295 } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) {
296 *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
297 value = DSI0_SEL_IN_RDMA1;
285 } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) { 298 } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
286 *addr = DISP_REG_CONFIG_DSIO_SEL_IN; 299 *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
287 value = DSI1_SEL_IN_RDMA1; 300 value = DSI1_SEL_IN_RDMA1;
@@ -297,8 +310,11 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
297 } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) { 310 } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
298 *addr = DISP_REG_CONFIG_DPI_SEL_IN; 311 *addr = DISP_REG_CONFIG_DPI_SEL_IN;
299 value = DPI1_SEL_IN_RDMA2; 312 value = DPI1_SEL_IN_RDMA2;
300 } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { 313 } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) {
301 *addr = DISP_REG_CONFIG_DSIE_SEL_IN; 314 *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
315 value = DSI0_SEL_IN_RDMA2;
316 } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
317 *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
302 value = DSI1_SEL_IN_RDMA2; 318 value = DSI1_SEL_IN_RDMA2;
303 } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) { 319 } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
304 *addr = DISP_REG_CONFIG_DSIE_SEL_IN; 320 *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 7413ffeb3c9d..8399229e6ad2 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -78,6 +78,7 @@ struct mtk_ddp_comp_funcs {
78 void (*stop)(struct mtk_ddp_comp *comp); 78 void (*stop)(struct mtk_ddp_comp *comp);
79 void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); 79 void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
80 void (*disable_vblank)(struct mtk_ddp_comp *comp); 80 void (*disable_vblank)(struct mtk_ddp_comp *comp);
81 unsigned int (*layer_nr)(struct mtk_ddp_comp *comp);
81 void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); 82 void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx);
82 void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); 83 void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
83 void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, 84 void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
@@ -128,6 +129,14 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp)
128 comp->funcs->disable_vblank(comp); 129 comp->funcs->disable_vblank(comp);
129} 130}
130 131
132static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp)
133{
134 if (comp->funcs && comp->funcs->layer_nr)
135 return comp->funcs->layer_nr(comp);
136
137 return 0;
138}
139
131static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp, 140static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp,
132 unsigned int idx) 141 unsigned int idx)
133{ 142{
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 39721119713b..47ec604289b7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -381,7 +381,7 @@ static int mtk_drm_bind(struct device *dev)
381err_deinit: 381err_deinit:
382 mtk_drm_kms_deinit(drm); 382 mtk_drm_kms_deinit(drm);
383err_free: 383err_free:
384 drm_dev_unref(drm); 384 drm_dev_put(drm);
385 return ret; 385 return ret;
386} 386}
387 387
@@ -390,7 +390,7 @@ static void mtk_drm_unbind(struct device *dev)
390 struct mtk_drm_private *private = dev_get_drvdata(dev); 390 struct mtk_drm_private *private = dev_get_drvdata(dev);
391 391
392 drm_dev_unregister(private->drm); 392 drm_dev_unregister(private->drm);
393 drm_dev_unref(private->drm); 393 drm_dev_put(private->drm);
394 private->drm = NULL; 394 private->drm = NULL;
395} 395}
396 396
@@ -564,7 +564,7 @@ static int mtk_drm_remove(struct platform_device *pdev)
564 564
565 drm_dev_unregister(drm); 565 drm_dev_unregister(drm);
566 mtk_drm_kms_deinit(drm); 566 mtk_drm_kms_deinit(drm);
567 drm_dev_unref(drm); 567 drm_dev_put(drm);
568 568
569 component_master_del(&pdev->dev, &mtk_drm_ops); 569 component_master_del(&pdev->dev, &mtk_drm_ops);
570 pm_runtime_disable(&pdev->dev); 570 pm_runtime_disable(&pdev->dev);
@@ -580,29 +580,24 @@ static int mtk_drm_sys_suspend(struct device *dev)
580{ 580{
581 struct mtk_drm_private *private = dev_get_drvdata(dev); 581 struct mtk_drm_private *private = dev_get_drvdata(dev);
582 struct drm_device *drm = private->drm; 582 struct drm_device *drm = private->drm;
583 int ret;
583 584
584 drm_kms_helper_poll_disable(drm); 585 ret = drm_mode_config_helper_suspend(drm);
585
586 private->suspend_state = drm_atomic_helper_suspend(drm);
587 if (IS_ERR(private->suspend_state)) {
588 drm_kms_helper_poll_enable(drm);
589 return PTR_ERR(private->suspend_state);
590 }
591
592 DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); 586 DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n");
593 return 0; 587
588 return ret;
594} 589}
595 590
596static int mtk_drm_sys_resume(struct device *dev) 591static int mtk_drm_sys_resume(struct device *dev)
597{ 592{
598 struct mtk_drm_private *private = dev_get_drvdata(dev); 593 struct mtk_drm_private *private = dev_get_drvdata(dev);
599 struct drm_device *drm = private->drm; 594 struct drm_device *drm = private->drm;
595 int ret;
600 596
601 drm_atomic_helper_resume(drm, private->suspend_state); 597 ret = drm_mode_config_helper_resume(drm);
602 drm_kms_helper_poll_enable(drm);
603
604 DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); 598 DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n");
605 return 0; 599
600 return ret;
606} 601}
607#endif 602#endif
608 603
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 8412119bd940..5691dfa1db6f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1123,17 +1123,21 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
1123 int ret; 1123 int ret;
1124 1124
1125 if (dpcd >= 0x12) { 1125 if (dpcd >= 0x12) {
1126 ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd); 1126 /* Even if we're enabling MST, start with disabling the
1127 * branching unit to clear any sink-side MST topology state
1128 * that wasn't set by us
1129 */
1130 ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0);
1127 if (ret < 0) 1131 if (ret < 0)
1128 return ret; 1132 return ret;
1129 1133
1130 dpcd &= ~DP_MST_EN; 1134 if (state) {
1131 if (state) 1135 /* Now, start initializing */
1132 dpcd |= DP_MST_EN; 1136 ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL,
1133 1137 DP_MST_EN);
1134 ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd); 1138 if (ret < 0)
1135 if (ret < 0) 1139 return ret;
1136 return ret; 1140 }
1137 } 1141 }
1138 1142
1139 return nvif_mthd(disp, 0, &args, sizeof(args)); 1143 return nvif_mthd(disp, 0, &args, sizeof(args));
@@ -1142,31 +1146,58 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
1142int 1146int
1143nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow) 1147nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
1144{ 1148{
1145 int ret, state = 0; 1149 struct drm_dp_aux *aux;
1150 int ret;
1151 bool old_state, new_state;
1152 u8 mstm_ctrl;
1146 1153
1147 if (!mstm) 1154 if (!mstm)
1148 return 0; 1155 return 0;
1149 1156
1150 if (dpcd[0] >= 0x12) { 1157 mutex_lock(&mstm->mgr.lock);
1151 ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]); 1158
1159 old_state = mstm->mgr.mst_state;
1160 new_state = old_state;
1161 aux = mstm->mgr.aux;
1162
1163 if (old_state) {
1164 /* Just check that the MST hub is still as we expect it */
1165 ret = drm_dp_dpcd_readb(aux, DP_MSTM_CTRL, &mstm_ctrl);
1166 if (ret < 0 || !(mstm_ctrl & DP_MST_EN)) {
1167 DRM_DEBUG_KMS("Hub gone, disabling MST topology\n");
1168 new_state = false;
1169 }
1170 } else if (dpcd[0] >= 0x12) {
1171 ret = drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &dpcd[1]);
1152 if (ret < 0) 1172 if (ret < 0)
1153 return ret; 1173 goto probe_error;
1154 1174
1155 if (!(dpcd[1] & DP_MST_CAP)) 1175 if (!(dpcd[1] & DP_MST_CAP))
1156 dpcd[0] = 0x11; 1176 dpcd[0] = 0x11;
1157 else 1177 else
1158 state = allow; 1178 new_state = allow;
1179 }
1180
1181 if (new_state == old_state) {
1182 mutex_unlock(&mstm->mgr.lock);
1183 return new_state;
1159 } 1184 }
1160 1185
1161 ret = nv50_mstm_enable(mstm, dpcd[0], state); 1186 ret = nv50_mstm_enable(mstm, dpcd[0], new_state);
1162 if (ret) 1187 if (ret)
1163 return ret; 1188 goto probe_error;
1189
1190 mutex_unlock(&mstm->mgr.lock);
1164 1191
1165 ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state); 1192 ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state);
1166 if (ret) 1193 if (ret)
1167 return nv50_mstm_enable(mstm, dpcd[0], 0); 1194 return nv50_mstm_enable(mstm, dpcd[0], 0);
1168 1195
1169 return mstm->mgr.mst_state; 1196 return new_state;
1197
1198probe_error:
1199 mutex_unlock(&mstm->mgr.lock);
1200 return ret;
1170} 1201}
1171 1202
1172static void 1203static void
@@ -2074,7 +2105,7 @@ nv50_disp_atomic_state_alloc(struct drm_device *dev)
2074static const struct drm_mode_config_funcs 2105static const struct drm_mode_config_funcs
2075nv50_disp_func = { 2106nv50_disp_func = {
2076 .fb_create = nouveau_user_framebuffer_create, 2107 .fb_create = nouveau_user_framebuffer_create,
2077 .output_poll_changed = drm_fb_helper_output_poll_changed, 2108 .output_poll_changed = nouveau_fbcon_output_poll_changed,
2078 .atomic_check = nv50_disp_atomic_check, 2109 .atomic_check = nv50_disp_atomic_check,
2079 .atomic_commit = nv50_disp_atomic_commit, 2110 .atomic_commit = nv50_disp_atomic_commit,
2080 .atomic_state_alloc = nv50_disp_atomic_state_alloc, 2111 .atomic_state_alloc = nv50_disp_atomic_state_alloc,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 51932c72334e..247f72cc4d10 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -409,59 +409,45 @@ static struct nouveau_encoder *
409nouveau_connector_ddc_detect(struct drm_connector *connector) 409nouveau_connector_ddc_detect(struct drm_connector *connector)
410{ 410{
411 struct drm_device *dev = connector->dev; 411 struct drm_device *dev = connector->dev;
412 struct nouveau_connector *nv_connector = nouveau_connector(connector); 412 struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
413 struct nouveau_drm *drm = nouveau_drm(dev);
414 struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
415 struct nouveau_encoder *nv_encoder = NULL;
416 struct drm_encoder *encoder; 413 struct drm_encoder *encoder;
417 int i, panel = -ENODEV; 414 int i, ret;
418 415 bool switcheroo_ddc = false;
419 /* eDP panels need powering on by us (if the VBIOS doesn't default it
420 * to on) before doing any AUX channel transactions. LVDS panel power
421 * is handled by the SOR itself, and not required for LVDS DDC.
422 */
423 if (nv_connector->type == DCB_CONNECTOR_eDP) {
424 panel = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
425 if (panel == 0) {
426 nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
427 msleep(300);
428 }
429 }
430 416
431 drm_connector_for_each_possible_encoder(connector, encoder, i) { 417 drm_connector_for_each_possible_encoder(connector, encoder, i) {
432 nv_encoder = nouveau_encoder(encoder); 418 nv_encoder = nouveau_encoder(encoder);
433 419
434 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { 420 switch (nv_encoder->dcb->type) {
435 int ret = nouveau_dp_detect(nv_encoder); 421 case DCB_OUTPUT_DP:
422 ret = nouveau_dp_detect(nv_encoder);
436 if (ret == NOUVEAU_DP_MST) 423 if (ret == NOUVEAU_DP_MST)
437 return NULL; 424 return NULL;
438 if (ret == NOUVEAU_DP_SST) 425 else if (ret == NOUVEAU_DP_SST)
439 break; 426 found = nv_encoder;
440 } else 427
441 if ((vga_switcheroo_handler_flags() & 428 break;
442 VGA_SWITCHEROO_CAN_SWITCH_DDC) && 429 case DCB_OUTPUT_LVDS:
443 nv_encoder->dcb->type == DCB_OUTPUT_LVDS && 430 switcheroo_ddc = !!(vga_switcheroo_handler_flags() &
444 nv_encoder->i2c) { 431 VGA_SWITCHEROO_CAN_SWITCH_DDC);
445 int ret; 432 /* fall-through */
446 vga_switcheroo_lock_ddc(dev->pdev); 433 default:
447 ret = nvkm_probe_i2c(nv_encoder->i2c, 0x50); 434 if (!nv_encoder->i2c)
448 vga_switcheroo_unlock_ddc(dev->pdev);
449 if (ret)
450 break; 435 break;
451 } else 436
452 if (nv_encoder->i2c) { 437 if (switcheroo_ddc)
438 vga_switcheroo_lock_ddc(dev->pdev);
453 if (nvkm_probe_i2c(nv_encoder->i2c, 0x50)) 439 if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
454 break; 440 found = nv_encoder;
441 if (switcheroo_ddc)
442 vga_switcheroo_unlock_ddc(dev->pdev);
443
444 break;
455 } 445 }
446 if (found)
447 break;
456 } 448 }
457 449
458 /* eDP panel not detected, restore panel power GPIO to previous 450 return found;
459 * state to avoid confusing the SOR for other output types.
460 */
461 if (!nv_encoder && panel == 0)
462 nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
463
464 return nv_encoder;
465} 451}
466 452
467static struct nouveau_encoder * 453static struct nouveau_encoder *
@@ -555,12 +541,16 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
555 nv_connector->edid = NULL; 541 nv_connector->edid = NULL;
556 } 542 }
557 543
558 /* Outputs are only polled while runtime active, so acquiring a 544 /* Outputs are only polled while runtime active, so resuming the
559 * runtime PM ref here is unnecessary (and would deadlock upon 545 * device here is unnecessary (and would deadlock upon runtime suspend
560 * runtime suspend because it waits for polling to finish). 546 * because it waits for polling to finish). We do however, want to
547 * prevent the autosuspend timer from elapsing during this operation
548 * if possible.
561 */ 549 */
562 if (!drm_kms_helper_is_poll_worker()) { 550 if (drm_kms_helper_is_poll_worker()) {
563 ret = pm_runtime_get_sync(connector->dev->dev); 551 pm_runtime_get_noresume(dev->dev);
552 } else {
553 ret = pm_runtime_get_sync(dev->dev);
564 if (ret < 0 && ret != -EACCES) 554 if (ret < 0 && ret != -EACCES)
565 return conn_status; 555 return conn_status;
566 } 556 }
@@ -638,10 +628,8 @@ detect_analog:
638 628
639 out: 629 out:
640 630
641 if (!drm_kms_helper_is_poll_worker()) { 631 pm_runtime_mark_last_busy(dev->dev);
642 pm_runtime_mark_last_busy(connector->dev->dev); 632 pm_runtime_put_autosuspend(dev->dev);
643 pm_runtime_put_autosuspend(connector->dev->dev);
644 }
645 633
646 return conn_status; 634 return conn_status;
647} 635}
@@ -1105,6 +1093,26 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
1105 const struct nvif_notify_conn_rep_v0 *rep = notify->data; 1093 const struct nvif_notify_conn_rep_v0 *rep = notify->data;
1106 const char *name = connector->name; 1094 const char *name = connector->name;
1107 struct nouveau_encoder *nv_encoder; 1095 struct nouveau_encoder *nv_encoder;
1096 int ret;
1097
1098 ret = pm_runtime_get(drm->dev->dev);
1099 if (ret == 0) {
1100 /* We can't block here if there's a pending PM request
1101 * running, as we'll deadlock nouveau_display_fini() when it
1102 * calls nvif_put() on our nvif_notify struct. So, simply
1103 * defer the hotplug event until the device finishes resuming
1104 */
1105 NV_DEBUG(drm, "Deferring HPD on %s until runtime resume\n",
1106 name);
1107 schedule_work(&drm->hpd_work);
1108
1109 pm_runtime_put_noidle(drm->dev->dev);
1110 return NVIF_NOTIFY_KEEP;
1111 } else if (ret != 1 && ret != -EACCES) {
1112 NV_WARN(drm, "HPD on %s dropped due to RPM failure: %d\n",
1113 name, ret);
1114 return NVIF_NOTIFY_DROP;
1115 }
1108 1116
1109 if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { 1117 if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
1110 NV_DEBUG(drm, "service %s\n", name); 1118 NV_DEBUG(drm, "service %s\n", name);
@@ -1122,6 +1130,8 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
1122 drm_helper_hpd_irq_event(connector->dev); 1130 drm_helper_hpd_irq_event(connector->dev);
1123 } 1131 }
1124 1132
1133 pm_runtime_mark_last_busy(drm->dev->dev);
1134 pm_runtime_put_autosuspend(drm->dev->dev);
1125 return NVIF_NOTIFY_KEEP; 1135 return NVIF_NOTIFY_KEEP;
1126} 1136}
1127 1137
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 139368b31916..540c0cbbfcee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -293,7 +293,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
293 293
294static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { 294static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
295 .fb_create = nouveau_user_framebuffer_create, 295 .fb_create = nouveau_user_framebuffer_create,
296 .output_poll_changed = drm_fb_helper_output_poll_changed, 296 .output_poll_changed = nouveau_fbcon_output_poll_changed,
297}; 297};
298 298
299 299
@@ -355,8 +355,6 @@ nouveau_display_hpd_work(struct work_struct *work)
355 pm_runtime_get_sync(drm->dev->dev); 355 pm_runtime_get_sync(drm->dev->dev);
356 356
357 drm_helper_hpd_irq_event(drm->dev); 357 drm_helper_hpd_irq_event(drm->dev);
358 /* enable polling for external displays */
359 drm_kms_helper_poll_enable(drm->dev);
360 358
361 pm_runtime_mark_last_busy(drm->dev->dev); 359 pm_runtime_mark_last_busy(drm->dev->dev);
362 pm_runtime_put_sync(drm->dev->dev); 360 pm_runtime_put_sync(drm->dev->dev);
@@ -379,15 +377,29 @@ nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
379{ 377{
380 struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb); 378 struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
381 struct acpi_bus_event *info = data; 379 struct acpi_bus_event *info = data;
380 int ret;
382 381
383 if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) { 382 if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
384 if (info->type == ACPI_VIDEO_NOTIFY_PROBE) { 383 if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
385 /* 384 ret = pm_runtime_get(drm->dev->dev);
386 * This may be the only indication we receive of a 385 if (ret == 1 || ret == -EACCES) {
387 * connector hotplug on a runtime suspended GPU, 386 /* If the GPU is already awake, or in a state
388 * schedule hpd_work to check. 387 * where we can't wake it up, it can handle
389 */ 388 * it's own hotplug events.
390 schedule_work(&drm->hpd_work); 389 */
390 pm_runtime_put_autosuspend(drm->dev->dev);
391 } else if (ret == 0) {
392 /* This may be the only indication we receive
393 * of a connector hotplug on a runtime
394 * suspended GPU, schedule hpd_work to check.
395 */
396 NV_DEBUG(drm, "ACPI requested connector reprobe\n");
397 schedule_work(&drm->hpd_work);
398 pm_runtime_put_noidle(drm->dev->dev);
399 } else {
400 NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
401 ret);
402 }
391 403
392 /* acpi-video should not generate keypresses for this */ 404 /* acpi-video should not generate keypresses for this */
393 return NOTIFY_BAD; 405 return NOTIFY_BAD;
@@ -411,6 +423,11 @@ nouveau_display_init(struct drm_device *dev)
411 if (ret) 423 if (ret)
412 return ret; 424 return ret;
413 425
426 /* enable connector detection and polling for connectors without HPD
427 * support
428 */
429 drm_kms_helper_poll_enable(dev);
430
414 /* enable hotplug interrupts */ 431 /* enable hotplug interrupts */
415 drm_connector_list_iter_begin(dev, &conn_iter); 432 drm_connector_list_iter_begin(dev, &conn_iter);
416 nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { 433 nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
@@ -425,7 +442,7 @@ nouveau_display_init(struct drm_device *dev)
425} 442}
426 443
427void 444void
428nouveau_display_fini(struct drm_device *dev, bool suspend) 445nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
429{ 446{
430 struct nouveau_display *disp = nouveau_display(dev); 447 struct nouveau_display *disp = nouveau_display(dev);
431 struct nouveau_drm *drm = nouveau_drm(dev); 448 struct nouveau_drm *drm = nouveau_drm(dev);
@@ -450,6 +467,9 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
450 } 467 }
451 drm_connector_list_iter_end(&conn_iter); 468 drm_connector_list_iter_end(&conn_iter);
452 469
470 if (!runtime)
471 cancel_work_sync(&drm->hpd_work);
472
453 drm_kms_helper_poll_disable(dev); 473 drm_kms_helper_poll_disable(dev);
454 disp->fini(dev); 474 disp->fini(dev);
455} 475}
@@ -618,11 +638,11 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
618 } 638 }
619 } 639 }
620 640
621 nouveau_display_fini(dev, true); 641 nouveau_display_fini(dev, true, runtime);
622 return 0; 642 return 0;
623 } 643 }
624 644
625 nouveau_display_fini(dev, true); 645 nouveau_display_fini(dev, true, runtime);
626 646
627 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 647 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
628 struct nouveau_framebuffer *nouveau_fb; 648 struct nouveau_framebuffer *nouveau_fb;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 54aa7c3fa42d..ff92b54ce448 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -62,7 +62,7 @@ nouveau_display(struct drm_device *dev)
62int nouveau_display_create(struct drm_device *dev); 62int nouveau_display_create(struct drm_device *dev);
63void nouveau_display_destroy(struct drm_device *dev); 63void nouveau_display_destroy(struct drm_device *dev);
64int nouveau_display_init(struct drm_device *dev); 64int nouveau_display_init(struct drm_device *dev);
65void nouveau_display_fini(struct drm_device *dev, bool suspend); 65void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
66int nouveau_display_suspend(struct drm_device *dev, bool runtime); 66int nouveau_display_suspend(struct drm_device *dev, bool runtime);
67void nouveau_display_resume(struct drm_device *dev, bool runtime); 67void nouveau_display_resume(struct drm_device *dev, bool runtime);
68int nouveau_display_vblank_enable(struct drm_device *, unsigned int); 68int nouveau_display_vblank_enable(struct drm_device *, unsigned int);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index c7ec86d6c3c9..74d2283f2c28 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -230,7 +230,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
230 mutex_unlock(&drm->master.lock); 230 mutex_unlock(&drm->master.lock);
231 } 231 }
232 if (ret) { 232 if (ret) {
233 NV_ERROR(drm, "Client allocation failed: %d\n", ret); 233 NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret);
234 goto done; 234 goto done;
235 } 235 }
236 236
@@ -240,37 +240,37 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
240 }, sizeof(struct nv_device_v0), 240 }, sizeof(struct nv_device_v0),
241 &cli->device); 241 &cli->device);
242 if (ret) { 242 if (ret) {
243 NV_ERROR(drm, "Device allocation failed: %d\n", ret); 243 NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret);
244 goto done; 244 goto done;
245 } 245 }
246 246
247 ret = nvif_mclass(&cli->device.object, mmus); 247 ret = nvif_mclass(&cli->device.object, mmus);
248 if (ret < 0) { 248 if (ret < 0) {
249 NV_ERROR(drm, "No supported MMU class\n"); 249 NV_PRINTK(err, cli, "No supported MMU class\n");
250 goto done; 250 goto done;
251 } 251 }
252 252
253 ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu); 253 ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);
254 if (ret) { 254 if (ret) {
255 NV_ERROR(drm, "MMU allocation failed: %d\n", ret); 255 NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);
256 goto done; 256 goto done;
257 } 257 }
258 258
259 ret = nvif_mclass(&cli->mmu.object, vmms); 259 ret = nvif_mclass(&cli->mmu.object, vmms);
260 if (ret < 0) { 260 if (ret < 0) {
261 NV_ERROR(drm, "No supported VMM class\n"); 261 NV_PRINTK(err, cli, "No supported VMM class\n");
262 goto done; 262 goto done;
263 } 263 }
264 264
265 ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm); 265 ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);
266 if (ret) { 266 if (ret) {
267 NV_ERROR(drm, "VMM allocation failed: %d\n", ret); 267 NV_PRINTK(err, cli, "VMM allocation failed: %d\n", ret);
268 goto done; 268 goto done;
269 } 269 }
270 270
271 ret = nvif_mclass(&cli->mmu.object, mems); 271 ret = nvif_mclass(&cli->mmu.object, mems);
272 if (ret < 0) { 272 if (ret < 0) {
273 NV_ERROR(drm, "No supported MEM class\n"); 273 NV_PRINTK(err, cli, "No supported MEM class\n");
274 goto done; 274 goto done;
275 } 275 }
276 276
@@ -592,10 +592,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
592 pm_runtime_allow(dev->dev); 592 pm_runtime_allow(dev->dev);
593 pm_runtime_mark_last_busy(dev->dev); 593 pm_runtime_mark_last_busy(dev->dev);
594 pm_runtime_put(dev->dev); 594 pm_runtime_put(dev->dev);
595 } else {
596 /* enable polling for external displays */
597 drm_kms_helper_poll_enable(dev);
598 } 595 }
596
599 return 0; 597 return 0;
600 598
601fail_dispinit: 599fail_dispinit:
@@ -629,7 +627,7 @@ nouveau_drm_unload(struct drm_device *dev)
629 nouveau_debugfs_fini(drm); 627 nouveau_debugfs_fini(drm);
630 628
631 if (dev->mode_config.num_crtc) 629 if (dev->mode_config.num_crtc)
632 nouveau_display_fini(dev, false); 630 nouveau_display_fini(dev, false, false);
633 nouveau_display_destroy(dev); 631 nouveau_display_destroy(dev);
634 632
635 nouveau_bios_takedown(dev); 633 nouveau_bios_takedown(dev);
@@ -835,7 +833,6 @@ nouveau_pmops_runtime_suspend(struct device *dev)
835 return -EBUSY; 833 return -EBUSY;
836 } 834 }
837 835
838 drm_kms_helper_poll_disable(drm_dev);
839 nouveau_switcheroo_optimus_dsm(); 836 nouveau_switcheroo_optimus_dsm();
840 ret = nouveau_do_suspend(drm_dev, true); 837 ret = nouveau_do_suspend(drm_dev, true);
841 pci_save_state(pdev); 838 pci_save_state(pdev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 844498c4267c..0f64c0a1d4b3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -466,6 +466,7 @@ nouveau_fbcon_set_suspend_work(struct work_struct *work)
466 console_unlock(); 466 console_unlock();
467 467
468 if (state == FBINFO_STATE_RUNNING) { 468 if (state == FBINFO_STATE_RUNNING) {
469 nouveau_fbcon_hotplug_resume(drm->fbcon);
469 pm_runtime_mark_last_busy(drm->dev->dev); 470 pm_runtime_mark_last_busy(drm->dev->dev);
470 pm_runtime_put_sync(drm->dev->dev); 471 pm_runtime_put_sync(drm->dev->dev);
471 } 472 }
@@ -487,6 +488,61 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
487 schedule_work(&drm->fbcon_work); 488 schedule_work(&drm->fbcon_work);
488} 489}
489 490
491void
492nouveau_fbcon_output_poll_changed(struct drm_device *dev)
493{
494 struct nouveau_drm *drm = nouveau_drm(dev);
495 struct nouveau_fbdev *fbcon = drm->fbcon;
496 int ret;
497
498 if (!fbcon)
499 return;
500
501 mutex_lock(&fbcon->hotplug_lock);
502
503 ret = pm_runtime_get(dev->dev);
504 if (ret == 1 || ret == -EACCES) {
505 drm_fb_helper_hotplug_event(&fbcon->helper);
506
507 pm_runtime_mark_last_busy(dev->dev);
508 pm_runtime_put_autosuspend(dev->dev);
509 } else if (ret == 0) {
510 /* If the GPU was already in the process of suspending before
511 * this event happened, then we can't block here as we'll
512 * deadlock the runtime pmops since they wait for us to
513 * finish. So, just defer this event for when we runtime
514 * resume again. It will be handled by fbcon_work.
515 */
516 NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n");
517 fbcon->hotplug_waiting = true;
518 pm_runtime_put_noidle(drm->dev->dev);
519 } else {
520 DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n",
521 ret);
522 }
523
524 mutex_unlock(&fbcon->hotplug_lock);
525}
526
527void
528nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon)
529{
530 struct nouveau_drm *drm;
531
532 if (!fbcon)
533 return;
534 drm = nouveau_drm(fbcon->helper.dev);
535
536 mutex_lock(&fbcon->hotplug_lock);
537 if (fbcon->hotplug_waiting) {
538 fbcon->hotplug_waiting = false;
539
540 NV_DEBUG(drm, "Handling deferred fbcon HPD events\n");
541 drm_fb_helper_hotplug_event(&fbcon->helper);
542 }
543 mutex_unlock(&fbcon->hotplug_lock);
544}
545
490int 546int
491nouveau_fbcon_init(struct drm_device *dev) 547nouveau_fbcon_init(struct drm_device *dev)
492{ 548{
@@ -505,6 +561,7 @@ nouveau_fbcon_init(struct drm_device *dev)
505 561
506 drm->fbcon = fbcon; 562 drm->fbcon = fbcon;
507 INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work); 563 INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
564 mutex_init(&fbcon->hotplug_lock);
508 565
509 drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); 566 drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
510 567
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index a6f192ea3fa6..db9d52047ef8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -41,6 +41,9 @@ struct nouveau_fbdev {
41 struct nvif_object gdi; 41 struct nvif_object gdi;
42 struct nvif_object blit; 42 struct nvif_object blit;
43 struct nvif_object twod; 43 struct nvif_object twod;
44
45 struct mutex hotplug_lock;
46 bool hotplug_waiting;
44}; 47};
45 48
46void nouveau_fbcon_restore(void); 49void nouveau_fbcon_restore(void);
@@ -68,6 +71,8 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
68void nouveau_fbcon_accel_save_disable(struct drm_device *dev); 71void nouveau_fbcon_accel_save_disable(struct drm_device *dev);
69void nouveau_fbcon_accel_restore(struct drm_device *dev); 72void nouveau_fbcon_accel_restore(struct drm_device *dev);
70 73
74void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
75void nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon);
71extern int nouveau_nofbaccel; 76extern int nouveau_nofbaccel;
72 77
73#endif /* __NV50_FBCON_H__ */ 78#endif /* __NV50_FBCON_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 3da5a4305aa4..8f1ce4833230 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -46,12 +46,10 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
46 pr_err("VGA switcheroo: switched nouveau on\n"); 46 pr_err("VGA switcheroo: switched nouveau on\n");
47 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 47 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
48 nouveau_pmops_resume(&pdev->dev); 48 nouveau_pmops_resume(&pdev->dev);
49 drm_kms_helper_poll_enable(dev);
50 dev->switch_power_state = DRM_SWITCH_POWER_ON; 49 dev->switch_power_state = DRM_SWITCH_POWER_ON;
51 } else { 50 } else {
52 pr_err("VGA switcheroo: switched nouveau off\n"); 51 pr_err("VGA switcheroo: switched nouveau off\n");
53 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 52 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
54 drm_kms_helper_poll_disable(dev);
55 nouveau_switcheroo_optimus_dsm(); 53 nouveau_switcheroo_optimus_dsm();
56 nouveau_pmops_suspend(&pdev->dev); 54 nouveau_pmops_suspend(&pdev->dev);
57 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 55 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 32fa94a9773f..cbd33e87b799 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -275,6 +275,7 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
275 struct nvkm_outp *outp, *outt, *pair; 275 struct nvkm_outp *outp, *outt, *pair;
276 struct nvkm_conn *conn; 276 struct nvkm_conn *conn;
277 struct nvkm_head *head; 277 struct nvkm_head *head;
278 struct nvkm_ior *ior;
278 struct nvbios_connE connE; 279 struct nvbios_connE connE;
279 struct dcb_output dcbE; 280 struct dcb_output dcbE;
280 u8 hpd = 0, ver, hdr; 281 u8 hpd = 0, ver, hdr;
@@ -399,6 +400,19 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
399 return ret; 400 return ret;
400 } 401 }
401 402
403 /* Enforce identity-mapped SOR assignment for panels, which have
404 * certain bits (ie. backlight controls) wired to a specific SOR.
405 */
406 list_for_each_entry(outp, &disp->outp, head) {
407 if (outp->conn->info.type == DCB_CONNECTOR_LVDS ||
408 outp->conn->info.type == DCB_CONNECTOR_eDP) {
409 ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1);
410 if (!WARN_ON(!ior))
411 ior->identity = true;
412 outp->identity = true;
413 }
414 }
415
402 i = 0; 416 i = 0;
403 list_for_each_entry(head, &disp->head, head) 417 list_for_each_entry(head, &disp->head, head)
404 i = max(i, head->id + 1); 418 i = max(i, head->id + 1);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 7c5bed29ffef..5f301e632599 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -28,6 +28,7 @@
28 28
29#include <subdev/bios.h> 29#include <subdev/bios.h>
30#include <subdev/bios/init.h> 30#include <subdev/bios/init.h>
31#include <subdev/gpio.h>
31#include <subdev/i2c.h> 32#include <subdev/i2c.h>
32 33
33#include <nvif/event.h> 34#include <nvif/event.h>
@@ -412,14 +413,10 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
412} 413}
413 414
414static void 415static void
415nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior) 416nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
416{ 417{
417 struct nvkm_dp *dp = nvkm_dp(outp); 418 struct nvkm_dp *dp = nvkm_dp(outp);
418 419
419 /* Prevent link from being retrained if sink sends an IRQ. */
420 atomic_set(&dp->lt.done, 0);
421 ior->dp.nr = 0;
422
423 /* Execute DisableLT script from DP Info Table. */ 420 /* Execute DisableLT script from DP Info Table. */
424 nvbios_init(&ior->disp->engine.subdev, dp->info.script[4], 421 nvbios_init(&ior->disp->engine.subdev, dp->info.script[4],
425 init.outp = &dp->outp.info; 422 init.outp = &dp->outp.info;
@@ -428,6 +425,16 @@ nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
428 ); 425 );
429} 426}
430 427
428static void
429nvkm_dp_release(struct nvkm_outp *outp)
430{
431 struct nvkm_dp *dp = nvkm_dp(outp);
432
433 /* Prevent link from being retrained if sink sends an IRQ. */
434 atomic_set(&dp->lt.done, 0);
435 dp->outp.ior->dp.nr = 0;
436}
437
431static int 438static int
432nvkm_dp_acquire(struct nvkm_outp *outp) 439nvkm_dp_acquire(struct nvkm_outp *outp)
433{ 440{
@@ -491,7 +498,7 @@ done:
491 return ret; 498 return ret;
492} 499}
493 500
494static void 501static bool
495nvkm_dp_enable(struct nvkm_dp *dp, bool enable) 502nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
496{ 503{
497 struct nvkm_i2c_aux *aux = dp->aux; 504 struct nvkm_i2c_aux *aux = dp->aux;
@@ -505,7 +512,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
505 512
506 if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd, 513 if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd,
507 sizeof(dp->dpcd))) 514 sizeof(dp->dpcd)))
508 return; 515 return true;
509 } 516 }
510 517
511 if (dp->present) { 518 if (dp->present) {
@@ -515,6 +522,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
515 } 522 }
516 523
517 atomic_set(&dp->lt.done, 0); 524 atomic_set(&dp->lt.done, 0);
525 return false;
518} 526}
519 527
520static int 528static int
@@ -555,9 +563,38 @@ nvkm_dp_fini(struct nvkm_outp *outp)
555static void 563static void
556nvkm_dp_init(struct nvkm_outp *outp) 564nvkm_dp_init(struct nvkm_outp *outp)
557{ 565{
566 struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;
558 struct nvkm_dp *dp = nvkm_dp(outp); 567 struct nvkm_dp *dp = nvkm_dp(outp);
568
559 nvkm_notify_put(&dp->outp.conn->hpd); 569 nvkm_notify_put(&dp->outp.conn->hpd);
560 nvkm_dp_enable(dp, true); 570
571 /* eDP panels need powering on by us (if the VBIOS doesn't default it
572 * to on) before doing any AUX channel transactions. LVDS panel power
573 * is handled by the SOR itself, and not required for LVDS DDC.
574 */
575 if (dp->outp.conn->info.type == DCB_CONNECTOR_eDP) {
576 int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
577 if (power == 0)
578 nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
579
580 /* We delay here unconditionally, even if already powered,
581 * because some laptop panels having a significant resume
582 * delay before the panel begins responding.
583 *
584 * This is likely a bit of a hack, but no better idea for
585 * handling this at the moment.
586 */
587 msleep(300);
588
589 /* If the eDP panel can't be detected, we need to restore
590 * the panel power GPIO to avoid breaking another output.
591 */
592 if (!nvkm_dp_enable(dp, true) && power == 0)
593 nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0);
594 } else {
595 nvkm_dp_enable(dp, true);
596 }
597
561 nvkm_notify_get(&dp->hpd); 598 nvkm_notify_get(&dp->hpd);
562} 599}
563 600
@@ -576,6 +613,7 @@ nvkm_dp_func = {
576 .fini = nvkm_dp_fini, 613 .fini = nvkm_dp_fini,
577 .acquire = nvkm_dp_acquire, 614 .acquire = nvkm_dp_acquire,
578 .release = nvkm_dp_release, 615 .release = nvkm_dp_release,
616 .disable = nvkm_dp_disable,
579}; 617};
580 618
581static int 619static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index e0b4e0c5704e..19911211a12a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -16,6 +16,7 @@ struct nvkm_ior {
16 char name[8]; 16 char name[8];
17 17
18 struct list_head head; 18 struct list_head head;
19 bool identity;
19 20
20 struct nvkm_ior_state { 21 struct nvkm_ior_state {
21 struct nvkm_outp *outp; 22 struct nvkm_outp *outp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index f89c7b977aa5..def005dd5fda 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -501,11 +501,11 @@ nv50_disp_super_2_0(struct nv50_disp *disp, struct nvkm_head *head)
501 nv50_disp_super_ied_off(head, ior, 2); 501 nv50_disp_super_ied_off(head, ior, 2);
502 502
503 /* If we're shutting down the OR's only active head, execute 503 /* If we're shutting down the OR's only active head, execute
504 * the output path's release function. 504 * the output path's disable function.
505 */ 505 */
506 if (ior->arm.head == (1 << head->id)) { 506 if (ior->arm.head == (1 << head->id)) {
507 if ((outp = ior->arm.outp) && outp->func->release) 507 if ((outp = ior->arm.outp) && outp->func->disable)
508 outp->func->release(outp, ior); 508 outp->func->disable(outp, ior);
509 } 509 }
510} 510}
511 511
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index be9e7f8c3b23..c62030c96fba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -93,6 +93,8 @@ nvkm_outp_release(struct nvkm_outp *outp, u8 user)
93 if (ior) { 93 if (ior) {
94 outp->acquired &= ~user; 94 outp->acquired &= ~user;
95 if (!outp->acquired) { 95 if (!outp->acquired) {
96 if (outp->func->release && outp->ior)
97 outp->func->release(outp);
96 outp->ior->asy.outp = NULL; 98 outp->ior->asy.outp = NULL;
97 outp->ior = NULL; 99 outp->ior = NULL;
98 } 100 }
@@ -127,17 +129,26 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
127 if (proto == UNKNOWN) 129 if (proto == UNKNOWN)
128 return -ENOSYS; 130 return -ENOSYS;
129 131
132 /* Deal with panels requiring identity-mapped SOR assignment. */
133 if (outp->identity) {
134 ior = nvkm_ior_find(outp->disp, SOR, ffs(outp->info.or) - 1);
135 if (WARN_ON(!ior))
136 return -ENOSPC;
137 return nvkm_outp_acquire_ior(outp, user, ior);
138 }
139
130 /* First preference is to reuse the OR that is currently armed 140 /* First preference is to reuse the OR that is currently armed
131 * on HW, if any, in order to prevent unnecessary switching. 141 * on HW, if any, in order to prevent unnecessary switching.
132 */ 142 */
133 list_for_each_entry(ior, &outp->disp->ior, head) { 143 list_for_each_entry(ior, &outp->disp->ior, head) {
134 if (!ior->asy.outp && ior->arm.outp == outp) 144 if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp)
135 return nvkm_outp_acquire_ior(outp, user, ior); 145 return nvkm_outp_acquire_ior(outp, user, ior);
136 } 146 }
137 147
138 /* Failing that, a completely unused OR is the next best thing. */ 148 /* Failing that, a completely unused OR is the next best thing. */
139 list_for_each_entry(ior, &outp->disp->ior, head) { 149 list_for_each_entry(ior, &outp->disp->ior, head) {
140 if (!ior->asy.outp && ior->type == type && !ior->arm.outp && 150 if (!ior->identity &&
151 !ior->asy.outp && ior->type == type && !ior->arm.outp &&
141 (ior->func->route.set || ior->id == __ffs(outp->info.or))) 152 (ior->func->route.set || ior->id == __ffs(outp->info.or)))
142 return nvkm_outp_acquire_ior(outp, user, ior); 153 return nvkm_outp_acquire_ior(outp, user, ior);
143 } 154 }
@@ -146,7 +157,7 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
146 * but will be released during the next modeset. 157 * but will be released during the next modeset.
147 */ 158 */
148 list_for_each_entry(ior, &outp->disp->ior, head) { 159 list_for_each_entry(ior, &outp->disp->ior, head) {
149 if (!ior->asy.outp && ior->type == type && 160 if (!ior->identity && !ior->asy.outp && ior->type == type &&
150 (ior->func->route.set || ior->id == __ffs(outp->info.or))) 161 (ior->func->route.set || ior->id == __ffs(outp->info.or)))
151 return nvkm_outp_acquire_ior(outp, user, ior); 162 return nvkm_outp_acquire_ior(outp, user, ior);
152 } 163 }
@@ -245,7 +256,6 @@ nvkm_outp_ctor(const struct nvkm_outp_func *func, struct nvkm_disp *disp,
245 outp->index = index; 256 outp->index = index;
246 outp->info = *dcbE; 257 outp->info = *dcbE;
247 outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index); 258 outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
248 outp->or = ffs(outp->info.or) - 1;
249 259
250 OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x " 260 OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
251 "edid %x bus %d head %x", 261 "edid %x bus %d head %x",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index ea84d7d5741a..6c8aa5cfed9d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -13,10 +13,10 @@ struct nvkm_outp {
13 struct dcb_output info; 13 struct dcb_output info;
14 14
15 struct nvkm_i2c_bus *i2c; 15 struct nvkm_i2c_bus *i2c;
16 int or;
17 16
18 struct list_head head; 17 struct list_head head;
19 struct nvkm_conn *conn; 18 struct nvkm_conn *conn;
19 bool identity;
20 20
21 /* Assembly state. */ 21 /* Assembly state. */
22#define NVKM_OUTP_PRIV 1 22#define NVKM_OUTP_PRIV 1
@@ -41,7 +41,8 @@ struct nvkm_outp_func {
41 void (*init)(struct nvkm_outp *); 41 void (*init)(struct nvkm_outp *);
42 void (*fini)(struct nvkm_outp *); 42 void (*fini)(struct nvkm_outp *);
43 int (*acquire)(struct nvkm_outp *); 43 int (*acquire)(struct nvkm_outp *);
44 void (*release)(struct nvkm_outp *, struct nvkm_ior *); 44 void (*release)(struct nvkm_outp *);
45 void (*disable)(struct nvkm_outp *, struct nvkm_ior *);
45}; 46};
46 47
47#define OUTP_MSG(o,l,f,a...) do { \ 48#define OUTP_MSG(o,l,f,a...) do { \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index b80618e35491..17235e940ca9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -86,10 +86,8 @@ pmu_load(struct nv50_devinit *init, u8 type, bool post,
86 struct nvkm_bios *bios = subdev->device->bios; 86 struct nvkm_bios *bios = subdev->device->bios;
87 struct nvbios_pmuR pmu; 87 struct nvbios_pmuR pmu;
88 88
89 if (!nvbios_pmuRm(bios, type, &pmu)) { 89 if (!nvbios_pmuRm(bios, type, &pmu))
90 nvkm_error(subdev, "VBIOS PMU fuc %02x not found\n", type);
91 return -EINVAL; 90 return -EINVAL;
92 }
93 91
94 if (!post) 92 if (!post)
95 return 0; 93 return 0;
@@ -124,29 +122,30 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
124 return -EINVAL; 122 return -EINVAL;
125 } 123 }
126 124
125 /* Upload DEVINIT application from VBIOS onto PMU. */
127 ret = pmu_load(init, 0x04, post, &exec, &args); 126 ret = pmu_load(init, 0x04, post, &exec, &args);
128 if (ret) 127 if (ret) {
128 nvkm_error(subdev, "VBIOS PMU/DEVINIT not found\n");
129 return ret; 129 return ret;
130 }
130 131
131 /* upload first chunk of init data */ 132 /* Upload tables required by opcodes in boot scripts. */
132 if (post) { 133 if (post) {
133 // devinit tables
134 u32 pmu = pmu_args(init, args + 0x08, 0x08); 134 u32 pmu = pmu_args(init, args + 0x08, 0x08);
135 u32 img = nvbios_rd16(bios, bit_I.offset + 0x14); 135 u32 img = nvbios_rd16(bios, bit_I.offset + 0x14);
136 u32 len = nvbios_rd16(bios, bit_I.offset + 0x16); 136 u32 len = nvbios_rd16(bios, bit_I.offset + 0x16);
137 pmu_data(init, pmu, img, len); 137 pmu_data(init, pmu, img, len);
138 } 138 }
139 139
140 /* upload second chunk of init data */ 140 /* Upload boot scripts. */
141 if (post) { 141 if (post) {
142 // devinit boot scripts
143 u32 pmu = pmu_args(init, args + 0x08, 0x10); 142 u32 pmu = pmu_args(init, args + 0x08, 0x10);
144 u32 img = nvbios_rd16(bios, bit_I.offset + 0x18); 143 u32 img = nvbios_rd16(bios, bit_I.offset + 0x18);
145 u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a); 144 u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a);
146 pmu_data(init, pmu, img, len); 145 pmu_data(init, pmu, img, len);
147 } 146 }
148 147
149 /* execute init tables */ 148 /* Execute DEVINIT. */
150 if (post) { 149 if (post) {
151 nvkm_wr32(device, 0x10a040, 0x00005000); 150 nvkm_wr32(device, 0x10a040, 0x00005000);
152 pmu_exec(init, exec); 151 pmu_exec(init, exec);
@@ -157,8 +156,11 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
157 return -ETIMEDOUT; 156 return -ETIMEDOUT;
158 } 157 }
159 158
160 /* load and execute some other ucode image (bios therm?) */ 159 /* Optional: Execute PRE_OS application on PMU, which should at
161 return pmu_load(init, 0x01, post, NULL, NULL); 160 * least take care of fans until a full PMU has been loaded.
161 */
162 pmu_load(init, 0x01, post, NULL, NULL);
163 return 0;
162} 164}
163 165
164static const struct nvkm_devinit_func 166static const struct nvkm_devinit_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index de269eb482dd..7459def78d50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -1423,7 +1423,7 @@ nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
1423void 1423void
1424nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst) 1424nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
1425{ 1425{
1426 if (vmm->func->part && inst) { 1426 if (inst && vmm->func->part) {
1427 mutex_lock(&vmm->mutex); 1427 mutex_lock(&vmm->mutex);
1428 vmm->func->part(vmm, inst); 1428 vmm->func->part(vmm, inst);
1429 mutex_unlock(&vmm->mutex); 1429 mutex_unlock(&vmm->mutex);
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 25b7bd56ae11..1cb41992aaa1 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -335,7 +335,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
335 struct hid_field *field, struct hid_usage *usage, 335 struct hid_field *field, struct hid_usage *usage,
336 unsigned long **bit, int *max) 336 unsigned long **bit, int *max)
337{ 337{
338 if (usage->hid == (HID_UP_CUSTOM | 0x0003)) { 338 if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
339 usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
339 /* The fn key on Apple USB keyboards */ 340 /* The fn key on Apple USB keyboards */
340 set_bit(EV_REP, hi->input->evbit); 341 set_bit(EV_REP, hi->input->evbit);
341 hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN); 342 hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
@@ -472,6 +473,12 @@ static const struct hid_device_id apple_devices[] = {
472 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 473 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
473 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI), 474 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
474 .driver_data = APPLE_HAS_FN }, 475 .driver_data = APPLE_HAS_FN },
476 { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
477 .driver_data = APPLE_HAS_FN },
478 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
479 .driver_data = APPLE_HAS_FN },
480 { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
481 .driver_data = APPLE_HAS_FN },
475 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), 482 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
476 .driver_data = APPLE_HAS_FN }, 483 .driver_data = APPLE_HAS_FN },
477 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO), 484 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3da354af7a0a..44564f61e9cc 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1000,7 +1000,7 @@ int hid_open_report(struct hid_device *device)
1000 parser = vzalloc(sizeof(struct hid_parser)); 1000 parser = vzalloc(sizeof(struct hid_parser));
1001 if (!parser) { 1001 if (!parser) {
1002 ret = -ENOMEM; 1002 ret = -ENOMEM;
1003 goto err; 1003 goto alloc_err;
1004 } 1004 }
1005 1005
1006 parser->device = device; 1006 parser->device = device;
@@ -1039,6 +1039,7 @@ int hid_open_report(struct hid_device *device)
1039 hid_err(device, "unbalanced delimiter at end of report description\n"); 1039 hid_err(device, "unbalanced delimiter at end of report description\n");
1040 goto err; 1040 goto err;
1041 } 1041 }
1042 kfree(parser->collection_stack);
1042 vfree(parser); 1043 vfree(parser);
1043 device->status |= HID_STAT_PARSED; 1044 device->status |= HID_STAT_PARSED;
1044 return 0; 1045 return 0;
@@ -1047,6 +1048,8 @@ int hid_open_report(struct hid_device *device)
1047 1048
1048 hid_err(device, "item fetching failed at offset %d\n", (int)(end - start)); 1049 hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
1049err: 1050err:
1051 kfree(parser->collection_stack);
1052alloc_err:
1050 vfree(parser); 1053 vfree(parser);
1051 hid_close_report(device); 1054 hid_close_report(device);
1052 return ret; 1055 return ret;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 79bdf0c7e351..5146ee029db4 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -88,6 +88,7 @@
88#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101 88#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
89 89
90#define USB_VENDOR_ID_APPLE 0x05ac 90#define USB_VENDOR_ID_APPLE 0x05ac
91#define BT_VENDOR_ID_APPLE 0x004c
91#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304 92#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
92#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d 93#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
93#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e 94#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
@@ -157,6 +158,7 @@
157#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256 158#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
158#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257 159#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257
159#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267 160#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267
161#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI 0x026c
160#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 162#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
161#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 163#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
162#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 164#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
@@ -528,9 +530,6 @@
528#define I2C_VENDOR_ID_HANTICK 0x0911 530#define I2C_VENDOR_ID_HANTICK 0x0911
529#define I2C_PRODUCT_ID_HANTICK_5288 0x5288 531#define I2C_PRODUCT_ID_HANTICK_5288 0x5288
530 532
531#define I2C_VENDOR_ID_RAYD 0x2386
532#define I2C_PRODUCT_ID_RAYD_3118 0x3118
533
534#define USB_VENDOR_ID_HANWANG 0x0b57 533#define USB_VENDOR_ID_HANWANG 0x0b57
535#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000 534#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000
536#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff 535#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff
@@ -950,6 +949,7 @@
950#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 949#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
951#define USB_DEVICE_ID_SAITEK_PS1000 0x0621 950#define USB_DEVICE_ID_SAITEK_PS1000 0x0621
952#define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb 951#define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb
952#define USB_DEVICE_ID_SAITEK_RAT7_CONTAGION 0x0ccd
953#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7 953#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
954#define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa 954#define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
955#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 955#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 4e94ea3e280a..a481eaf39e88 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1582,6 +1582,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
1582 input_dev->dev.parent = &hid->dev; 1582 input_dev->dev.parent = &hid->dev;
1583 1583
1584 hidinput->input = input_dev; 1584 hidinput->input = input_dev;
1585 hidinput->application = application;
1585 list_add_tail(&hidinput->list, &hid->inputs); 1586 list_add_tail(&hidinput->list, &hid->inputs);
1586 1587
1587 INIT_LIST_HEAD(&hidinput->reports); 1588 INIT_LIST_HEAD(&hidinput->reports);
@@ -1677,8 +1678,7 @@ static struct hid_input *hidinput_match_application(struct hid_report *report)
1677 struct hid_input *hidinput; 1678 struct hid_input *hidinput;
1678 1679
1679 list_for_each_entry(hidinput, &hid->inputs, list) { 1680 list_for_each_entry(hidinput, &hid->inputs, list) {
1680 if (hidinput->report && 1681 if (hidinput->application == report->application)
1681 hidinput->report->application == report->application)
1682 return hidinput; 1682 return hidinput;
1683 } 1683 }
1684 1684
@@ -1815,6 +1815,7 @@ void hidinput_disconnect(struct hid_device *hid)
1815 input_unregister_device(hidinput->input); 1815 input_unregister_device(hidinput->input);
1816 else 1816 else
1817 input_free_device(hidinput->input); 1817 input_free_device(hidinput->input);
1818 kfree(hidinput->name);
1818 kfree(hidinput); 1819 kfree(hidinput);
1819 } 1820 }
1820 1821
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 40fbb7c52723..da954f3f4da7 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1375,7 +1375,8 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
1375 struct hid_usage *usage, 1375 struct hid_usage *usage,
1376 enum latency_mode latency, 1376 enum latency_mode latency,
1377 bool surface_switch, 1377 bool surface_switch,
1378 bool button_switch) 1378 bool button_switch,
1379 bool *inputmode_found)
1379{ 1380{
1380 struct mt_device *td = hid_get_drvdata(hdev); 1381 struct mt_device *td = hid_get_drvdata(hdev);
1381 struct mt_class *cls = &td->mtclass; 1382 struct mt_class *cls = &td->mtclass;
@@ -1387,6 +1388,14 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
1387 1388
1388 switch (usage->hid) { 1389 switch (usage->hid) {
1389 case HID_DG_INPUTMODE: 1390 case HID_DG_INPUTMODE:
1391 /*
1392 * Some elan panels wrongly declare 2 input mode features,
1393 * and silently ignore when we set the value in the second
1394 * field. Skip the second feature and hope for the best.
1395 */
1396 if (*inputmode_found)
1397 return false;
1398
1390 if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) { 1399 if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) {
1391 report_len = hid_report_len(report); 1400 report_len = hid_report_len(report);
1392 buf = hid_alloc_report_buf(report, GFP_KERNEL); 1401 buf = hid_alloc_report_buf(report, GFP_KERNEL);
@@ -1402,6 +1411,7 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
1402 } 1411 }
1403 1412
1404 field->value[index] = td->inputmode_value; 1413 field->value[index] = td->inputmode_value;
1414 *inputmode_found = true;
1405 return true; 1415 return true;
1406 1416
1407 case HID_DG_CONTACTMAX: 1417 case HID_DG_CONTACTMAX:
@@ -1439,6 +1449,7 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
1439 struct hid_usage *usage; 1449 struct hid_usage *usage;
1440 int i, j; 1450 int i, j;
1441 bool update_report; 1451 bool update_report;
1452 bool inputmode_found = false;
1442 1453
1443 rep_enum = &hdev->report_enum[HID_FEATURE_REPORT]; 1454 rep_enum = &hdev->report_enum[HID_FEATURE_REPORT];
1444 list_for_each_entry(rep, &rep_enum->report_list, list) { 1455 list_for_each_entry(rep, &rep_enum->report_list, list) {
@@ -1457,7 +1468,8 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
1457 usage, 1468 usage,
1458 latency, 1469 latency,
1459 surface_switch, 1470 surface_switch,
1460 button_switch)) 1471 button_switch,
1472 &inputmode_found))
1461 update_report = true; 1473 update_report = true;
1462 } 1474 }
1463 } 1475 }
@@ -1685,6 +1697,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
1685 */ 1697 */
1686 hdev->quirks |= HID_QUIRK_INPUT_PER_APP; 1698 hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
1687 1699
1700 if (id->group != HID_GROUP_MULTITOUCH_WIN_8)
1701 hdev->quirks |= HID_QUIRK_MULTI_INPUT;
1702
1688 timer_setup(&td->release_timer, mt_expired_timeout, 0); 1703 timer_setup(&td->release_timer, mt_expired_timeout, 0);
1689 1704
1690 ret = hid_parse(hdev); 1705 ret = hid_parse(hdev);
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index 39e642686ff0..683861f324e3 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -183,6 +183,8 @@ static const struct hid_device_id saitek_devices[] = {
183 .driver_data = SAITEK_RELEASE_MODE_RAT7 }, 183 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
184 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7), 184 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
185 .driver_data = SAITEK_RELEASE_MODE_RAT7 }, 185 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
186 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_CONTAGION),
187 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
186 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9), 188 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9),
187 .driver_data = SAITEK_RELEASE_MODE_RAT7 }, 189 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
188 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9), 190 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 50af72baa5ca..2b63487057c2 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -579,6 +579,28 @@ void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev)
579} 579}
580EXPORT_SYMBOL_GPL(sensor_hub_device_close); 580EXPORT_SYMBOL_GPL(sensor_hub_device_close);
581 581
582static __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc,
583 unsigned int *rsize)
584{
585 /*
586 * Checks if the report descriptor of Thinkpad Helix 2 has a logical
587 * minimum for magnetic flux axis greater than the maximum.
588 */
589 if (hdev->product == USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA &&
590 *rsize == 2558 && rdesc[913] == 0x17 && rdesc[914] == 0x40 &&
591 rdesc[915] == 0x81 && rdesc[916] == 0x08 &&
592 rdesc[917] == 0x00 && rdesc[918] == 0x27 &&
593 rdesc[921] == 0x07 && rdesc[922] == 0x00) {
594 /* Sets negative logical minimum for mag x, y and z */
595 rdesc[914] = rdesc[935] = rdesc[956] = 0xc0;
596 rdesc[915] = rdesc[936] = rdesc[957] = 0x7e;
597 rdesc[916] = rdesc[937] = rdesc[958] = 0xf7;
598 rdesc[917] = rdesc[938] = rdesc[959] = 0xff;
599 }
600
601 return rdesc;
602}
603
582static int sensor_hub_probe(struct hid_device *hdev, 604static int sensor_hub_probe(struct hid_device *hdev,
583 const struct hid_device_id *id) 605 const struct hid_device_id *id)
584{ 606{
@@ -743,6 +765,7 @@ static struct hid_driver sensor_hub_driver = {
743 .probe = sensor_hub_probe, 765 .probe = sensor_hub_probe,
744 .remove = sensor_hub_remove, 766 .remove = sensor_hub_remove,
745 .raw_event = sensor_hub_raw_event, 767 .raw_event = sensor_hub_raw_event,
768 .report_fixup = sensor_hub_report_fixup,
746#ifdef CONFIG_PM 769#ifdef CONFIG_PM
747 .suspend = sensor_hub_suspend, 770 .suspend = sensor_hub_suspend,
748 .resume = sensor_hub_resume, 771 .resume = sensor_hub_resume,
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 2ce194a84868..f3076659361a 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -170,8 +170,6 @@ static const struct i2c_hid_quirks {
170 I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, 170 I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
171 { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, 171 { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
172 I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, 172 I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
173 { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118,
174 I2C_HID_QUIRK_RESEND_REPORT_DESCR },
175 { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH, 173 { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
176 I2C_HID_QUIRK_RESEND_REPORT_DESCR }, 174 I2C_HID_QUIRK_RESEND_REPORT_DESCR },
177 { 0, 0 } 175 { 0, 0 }
@@ -1235,11 +1233,16 @@ static int i2c_hid_resume(struct device *dev)
1235 pm_runtime_enable(dev); 1233 pm_runtime_enable(dev);
1236 1234
1237 enable_irq(client->irq); 1235 enable_irq(client->irq);
1238 ret = i2c_hid_hwreset(client); 1236
1237 /* Instead of resetting device, simply powers the device on. This
1238 * solves "incomplete reports" on Raydium devices 2386:3118 and
1239 * 2386:4B33
1240 */
1241 ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
1239 if (ret) 1242 if (ret)
1240 return ret; 1243 return ret;
1241 1244
1242 /* RAYDIUM device (2386:3118) need to re-send report descr cmd 1245 /* Some devices need to re-send report descr cmd
1243 * after resume, after this it will be back normal. 1246 * after resume, after this it will be back normal.
1244 * otherwise it issues too many incomplete reports. 1247 * otherwise it issues too many incomplete reports.
1245 */ 1248 */
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 97869b7410eb..da133716bed0 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -29,6 +29,7 @@
29#define CNL_Ax_DEVICE_ID 0x9DFC 29#define CNL_Ax_DEVICE_ID 0x9DFC
30#define GLK_Ax_DEVICE_ID 0x31A2 30#define GLK_Ax_DEVICE_ID 0x31A2
31#define CNL_H_DEVICE_ID 0xA37C 31#define CNL_H_DEVICE_ID 0xA37C
32#define SPT_H_DEVICE_ID 0xA135
32 33
33#define REVISION_ID_CHT_A0 0x6 34#define REVISION_ID_CHT_A0 0x6
34#define REVISION_ID_CHT_Ax_SI 0x0 35#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 050f9872f5c0..a1125a5c7965 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -38,6 +38,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
38 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)}, 38 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
39 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)}, 39 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
40 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)}, 40 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
41 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
41 {0, } 42 {0, }
42}; 43};
43MODULE_DEVICE_TABLE(pci, ish_pci_tbl); 44MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index b1b548a21f91..c71cc857b649 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1291,6 +1291,9 @@ static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
1291 if (!attribute->show) 1291 if (!attribute->show)
1292 return -EIO; 1292 return -EIO;
1293 1293
1294 if (chan->state != CHANNEL_OPENED_STATE)
1295 return -EINVAL;
1296
1294 return attribute->show(chan, buf); 1297 return attribute->show(chan, buf);
1295} 1298}
1296 1299
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 90837f7c7d0f..f4c7516eb989 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -302,14 +302,18 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
302 return clamp_val(reg, 0, 1023) & (0xff << 2); 302 return clamp_val(reg, 0, 1023) & (0xff << 2);
303} 303}
304 304
305static u16 adt7475_read_word(struct i2c_client *client, int reg) 305static int adt7475_read_word(struct i2c_client *client, int reg)
306{ 306{
307 u16 val; 307 int val1, val2;
308 308
309 val = i2c_smbus_read_byte_data(client, reg); 309 val1 = i2c_smbus_read_byte_data(client, reg);
310 val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8); 310 if (val1 < 0)
311 return val1;
312 val2 = i2c_smbus_read_byte_data(client, reg + 1);
313 if (val2 < 0)
314 return val2;
311 315
312 return val; 316 return val1 | (val2 << 8);
313} 317}
314 318
315static void adt7475_write_word(struct i2c_client *client, int reg, u16 val) 319static void adt7475_write_word(struct i2c_client *client, int reg, u16 val)
@@ -962,13 +966,14 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr,
962{ 966{
963 struct adt7475_data *data = adt7475_update_device(dev); 967 struct adt7475_data *data = adt7475_update_device(dev);
964 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); 968 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
965 int i = clamp_val(data->range[sattr->index] & 0xf, 0, 969 int idx;
966 ARRAY_SIZE(pwmfreq_table) - 1);
967 970
968 if (IS_ERR(data)) 971 if (IS_ERR(data))
969 return PTR_ERR(data); 972 return PTR_ERR(data);
973 idx = clamp_val(data->range[sattr->index] & 0xf, 0,
974 ARRAY_SIZE(pwmfreq_table) - 1);
970 975
971 return sprintf(buf, "%d\n", pwmfreq_table[i]); 976 return sprintf(buf, "%d\n", pwmfreq_table[idx]);
972} 977}
973 978
974static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr, 979static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr,
@@ -1004,6 +1009,10 @@ static ssize_t pwm_use_point2_pwm_at_crit_show(struct device *dev,
1004 char *buf) 1009 char *buf)
1005{ 1010{
1006 struct adt7475_data *data = adt7475_update_device(dev); 1011 struct adt7475_data *data = adt7475_update_device(dev);
1012
1013 if (IS_ERR(data))
1014 return PTR_ERR(data);
1015
1007 return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY)); 1016 return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY));
1008} 1017}
1009 1018
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index e9e6aeabbf84..71d3445ba869 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -17,7 +17,7 @@
17 * Bi-directional Current/Power Monitor with I2C Interface 17 * Bi-directional Current/Power Monitor with I2C Interface
18 * Datasheet: http://www.ti.com/product/ina230 18 * Datasheet: http://www.ti.com/product/ina230
19 * 19 *
20 * Copyright (C) 2012 Lothar Felten <l-felten@ti.com> 20 * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
21 * Thanks to Jan Volkering 21 * Thanks to Jan Volkering
22 * 22 *
23 * This program is free software; you can redistribute it and/or modify 23 * This program is free software; you can redistribute it and/or modify
@@ -329,6 +329,15 @@ static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
329 return 0; 329 return 0;
330} 330}
331 331
332static ssize_t ina2xx_show_shunt(struct device *dev,
333 struct device_attribute *da,
334 char *buf)
335{
336 struct ina2xx_data *data = dev_get_drvdata(dev);
337
338 return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt);
339}
340
332static ssize_t ina2xx_store_shunt(struct device *dev, 341static ssize_t ina2xx_store_shunt(struct device *dev,
333 struct device_attribute *da, 342 struct device_attribute *da,
334 const char *buf, size_t count) 343 const char *buf, size_t count)
@@ -403,7 +412,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
403 412
404/* shunt resistance */ 413/* shunt resistance */
405static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, 414static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
406 ina2xx_show_value, ina2xx_store_shunt, 415 ina2xx_show_shunt, ina2xx_store_shunt,
407 INA2XX_CALIBRATION); 416 INA2XX_CALIBRATION);
408 417
409/* update interval (ina226 only) */ 418/* update interval (ina226 only) */
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index c6bd61e4695a..944f5b63aecd 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -63,6 +63,7 @@
63#include <linux/bitops.h> 63#include <linux/bitops.h>
64#include <linux/dmi.h> 64#include <linux/dmi.h>
65#include <linux/io.h> 65#include <linux/io.h>
66#include <linux/nospec.h>
66#include "lm75.h" 67#include "lm75.h"
67 68
68#define USE_ALTERNATE 69#define USE_ALTERNATE
@@ -2689,6 +2690,7 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr,
2689 return err; 2690 return err;
2690 if (val > NUM_TEMP) 2691 if (val > NUM_TEMP)
2691 return -EINVAL; 2692 return -EINVAL;
2693 val = array_index_nospec(val, NUM_TEMP + 1);
2692 if (val && (!(data->have_temp & BIT(val - 1)) || 2694 if (val && (!(data->have_temp & BIT(val - 1)) ||
2693 !data->temp_src[val - 1])) 2695 !data->temp_src[val - 1]))
2694 return -EINVAL; 2696 return -EINVAL;
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
index fb4e4a6bb1f6..be5ba4690895 100644
--- a/drivers/hwmon/raspberrypi-hwmon.c
+++ b/drivers/hwmon/raspberrypi-hwmon.c
@@ -164,3 +164,4 @@ module_platform_driver(rpi_hwmon_driver);
164MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>"); 164MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
165MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver"); 165MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver");
166MODULE_LICENSE("GPL v2"); 166MODULE_LICENSE("GPL v2");
167MODULE_ALIAS("platform:raspberrypi-hwmon");
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 6ec65adaba49..c33dcfb87993 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -110,8 +110,8 @@ static int sclhi(struct i2c_algo_bit_data *adap)
110 } 110 }
111#ifdef DEBUG 111#ifdef DEBUG
112 if (jiffies != start && i2c_debug >= 3) 112 if (jiffies != start && i2c_debug >= 3)
113 pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go " 113 pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go high\n",
114 "high\n", jiffies - start); 114 jiffies - start);
115#endif 115#endif
116 116
117done: 117done:
@@ -171,8 +171,9 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c)
171 setsda(adap, sb); 171 setsda(adap, sb);
172 udelay((adap->udelay + 1) / 2); 172 udelay((adap->udelay + 1) / 2);
173 if (sclhi(adap) < 0) { /* timed out */ 173 if (sclhi(adap) < 0) { /* timed out */
174 bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " 174 bit_dbg(1, &i2c_adap->dev,
175 "timeout at bit #%d\n", (int)c, i); 175 "i2c_outb: 0x%02x, timeout at bit #%d\n",
176 (int)c, i);
176 return -ETIMEDOUT; 177 return -ETIMEDOUT;
177 } 178 }
178 /* FIXME do arbitration here: 179 /* FIXME do arbitration here:
@@ -185,8 +186,8 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c)
185 } 186 }
186 sdahi(adap); 187 sdahi(adap);
187 if (sclhi(adap) < 0) { /* timeout */ 188 if (sclhi(adap) < 0) { /* timeout */
188 bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " 189 bit_dbg(1, &i2c_adap->dev,
189 "timeout at ack\n", (int)c); 190 "i2c_outb: 0x%02x, timeout at ack\n", (int)c);
190 return -ETIMEDOUT; 191 return -ETIMEDOUT;
191 } 192 }
192 193
@@ -215,8 +216,9 @@ static int i2c_inb(struct i2c_adapter *i2c_adap)
215 sdahi(adap); 216 sdahi(adap);
216 for (i = 0; i < 8; i++) { 217 for (i = 0; i < 8; i++) {
217 if (sclhi(adap) < 0) { /* timeout */ 218 if (sclhi(adap) < 0) { /* timeout */
218 bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit " 219 bit_dbg(1, &i2c_adap->dev,
219 "#%d\n", 7 - i); 220 "i2c_inb: timeout at bit #%d\n",
221 7 - i);
220 return -ETIMEDOUT; 222 return -ETIMEDOUT;
221 } 223 }
222 indata *= 2; 224 indata *= 2;
@@ -265,8 +267,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
265 goto bailout; 267 goto bailout;
266 } 268 }
267 if (!scl) { 269 if (!scl) {
268 printk(KERN_WARNING "%s: SCL unexpected low " 270 printk(KERN_WARNING
269 "while pulling SDA low!\n", name); 271 "%s: SCL unexpected low while pulling SDA low!\n",
272 name);
270 goto bailout; 273 goto bailout;
271 } 274 }
272 275
@@ -278,8 +281,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
278 goto bailout; 281 goto bailout;
279 } 282 }
280 if (!scl) { 283 if (!scl) {
281 printk(KERN_WARNING "%s: SCL unexpected low " 284 printk(KERN_WARNING
282 "while pulling SDA high!\n", name); 285 "%s: SCL unexpected low while pulling SDA high!\n",
286 name);
283 goto bailout; 287 goto bailout;
284 } 288 }
285 289
@@ -291,8 +295,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
291 goto bailout; 295 goto bailout;
292 } 296 }
293 if (!sda) { 297 if (!sda) {
294 printk(KERN_WARNING "%s: SDA unexpected low " 298 printk(KERN_WARNING
295 "while pulling SCL low!\n", name); 299 "%s: SDA unexpected low while pulling SCL low!\n",
300 name);
296 goto bailout; 301 goto bailout;
297 } 302 }
298 303
@@ -304,8 +309,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
304 goto bailout; 309 goto bailout;
305 } 310 }
306 if (!sda) { 311 if (!sda) {
307 printk(KERN_WARNING "%s: SDA unexpected low " 312 printk(KERN_WARNING
308 "while pulling SCL high!\n", name); 313 "%s: SDA unexpected low while pulling SCL high!\n",
314 name);
309 goto bailout; 315 goto bailout;
310 } 316 }
311 317
@@ -352,8 +358,8 @@ static int try_address(struct i2c_adapter *i2c_adap,
352 i2c_start(adap); 358 i2c_start(adap);
353 } 359 }
354 if (i && ret) 360 if (i && ret)
355 bit_dbg(1, &i2c_adap->dev, "Used %d tries to %s client at " 361 bit_dbg(1, &i2c_adap->dev,
356 "0x%02x: %s\n", i + 1, 362 "Used %d tries to %s client at 0x%02x: %s\n", i + 1,
357 addr & 1 ? "read from" : "write to", addr >> 1, 363 addr & 1 ? "read from" : "write to", addr >> 1,
358 ret == 1 ? "success" : "failed, timeout?"); 364 ret == 1 ? "success" : "failed, timeout?");
359 return ret; 365 return ret;
@@ -442,8 +448,9 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
442 if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) { 448 if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) {
443 if (!(flags & I2C_M_NO_RD_ACK)) 449 if (!(flags & I2C_M_NO_RD_ACK))
444 acknak(i2c_adap, 0); 450 acknak(i2c_adap, 0);
445 dev_err(&i2c_adap->dev, "readbytes: invalid " 451 dev_err(&i2c_adap->dev,
446 "block length (%d)\n", inval); 452 "readbytes: invalid block length (%d)\n",
453 inval);
447 return -EPROTO; 454 return -EPROTO;
448 } 455 }
449 /* The original count value accounts for the extra 456 /* The original count value accounts for the extra
@@ -506,8 +513,8 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
506 return -ENXIO; 513 return -ENXIO;
507 } 514 }
508 if (flags & I2C_M_RD) { 515 if (flags & I2C_M_RD) {
509 bit_dbg(3, &i2c_adap->dev, "emitting repeated " 516 bit_dbg(3, &i2c_adap->dev,
510 "start condition\n"); 517 "emitting repeated start condition\n");
511 i2c_repstart(adap); 518 i2c_repstart(adap);
512 /* okay, now switch into reading mode */ 519 /* okay, now switch into reading mode */
513 addr |= 0x01; 520 addr |= 0x01;
@@ -564,8 +571,8 @@ static int bit_xfer(struct i2c_adapter *i2c_adap,
564 } 571 }
565 ret = bit_doAddress(i2c_adap, pmsg); 572 ret = bit_doAddress(i2c_adap, pmsg);
566 if ((ret != 0) && !nak_ok) { 573 if ((ret != 0) && !nak_ok) {
567 bit_dbg(1, &i2c_adap->dev, "NAK from " 574 bit_dbg(1, &i2c_adap->dev,
568 "device addr 0x%02x msg #%d\n", 575 "NAK from device addr 0x%02x msg #%d\n",
569 msgs[i].addr, i); 576 msgs[i].addr, i);
570 goto bailout; 577 goto bailout;
571 } 578 }
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index e18442b9973a..94d94b4a9a0d 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -708,7 +708,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
708 i2c_set_adapdata(adap, dev); 708 i2c_set_adapdata(adap, dev);
709 709
710 if (dev->pm_disabled) { 710 if (dev->pm_disabled) {
711 dev_pm_syscore_device(dev->dev, true);
712 irq_flags = IRQF_NO_SUSPEND; 711 irq_flags = IRQF_NO_SUSPEND;
713 } else { 712 } else {
714 irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; 713 irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 1a8d2da5b000..b5750fd85125 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -434,6 +434,9 @@ static int dw_i2c_plat_suspend(struct device *dev)
434{ 434{
435 struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); 435 struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
436 436
437 if (i_dev->pm_disabled)
438 return 0;
439
437 i_dev->disable(i_dev); 440 i_dev->disable(i_dev);
438 i2c_dw_prepare_clk(i_dev, false); 441 i2c_dw_prepare_clk(i_dev, false);
439 442
@@ -444,7 +447,9 @@ static int dw_i2c_plat_resume(struct device *dev)
444{ 447{
445 struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); 448 struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
446 449
447 i2c_dw_prepare_clk(i_dev, true); 450 if (!i_dev->pm_disabled)
451 i2c_dw_prepare_clk(i_dev, true);
452
448 i_dev->init(i_dev); 453 i_dev->init(i_dev);
449 454
450 return 0; 455 return 0;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 941c223f6491..c91e145ef5a5 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -140,6 +140,7 @@
140 140
141#define SBREG_BAR 0x10 141#define SBREG_BAR 0x10
142#define SBREG_SMBCTRL 0xc6000c 142#define SBREG_SMBCTRL 0xc6000c
143#define SBREG_SMBCTRL_DNV 0xcf000c
143 144
144/* Host status bits for SMBPCISTS */ 145/* Host status bits for SMBPCISTS */
145#define SMBPCISTS_INTS BIT(3) 146#define SMBPCISTS_INTS BIT(3)
@@ -1399,7 +1400,11 @@ static void i801_add_tco(struct i801_priv *priv)
1399 spin_unlock(&p2sb_spinlock); 1400 spin_unlock(&p2sb_spinlock);
1400 1401
1401 res = &tco_res[ICH_RES_MEM_OFF]; 1402 res = &tco_res[ICH_RES_MEM_OFF];
1402 res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL; 1403 if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
1404 res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
1405 else
1406 res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
1407
1403 res->end = res->start + 3; 1408 res->end = res->start + 3;
1404 res->flags = IORESOURCE_MEM; 1409 res->flags = IORESOURCE_MEM;
1405 1410
@@ -1415,6 +1420,13 @@ static void i801_add_tco(struct i801_priv *priv)
1415} 1420}
1416 1421
1417#ifdef CONFIG_ACPI 1422#ifdef CONFIG_ACPI
1423static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv,
1424 acpi_physical_address address)
1425{
1426 return address >= priv->smba &&
1427 address <= pci_resource_end(priv->pci_dev, SMBBAR);
1428}
1429
1418static acpi_status 1430static acpi_status
1419i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, 1431i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
1420 u64 *value, void *handler_context, void *region_context) 1432 u64 *value, void *handler_context, void *region_context)
@@ -1430,7 +1442,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
1430 */ 1442 */
1431 mutex_lock(&priv->acpi_lock); 1443 mutex_lock(&priv->acpi_lock);
1432 1444
1433 if (!priv->acpi_reserved) { 1445 if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
1434 priv->acpi_reserved = true; 1446 priv->acpi_reserved = true;
1435 1447
1436 dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); 1448 dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 6d975f5221ca..06c4c767af32 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -538,7 +538,6 @@ static const struct i2c_algorithm lpi2c_imx_algo = {
538 538
539static const struct of_device_id lpi2c_imx_of_match[] = { 539static const struct of_device_id lpi2c_imx_of_match[] = {
540 { .compatible = "fsl,imx7ulp-lpi2c" }, 540 { .compatible = "fsl,imx7ulp-lpi2c" },
541 { .compatible = "fsl,imx8dv-lpi2c" },
542 { }, 541 { },
543}; 542};
544MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match); 543MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 439e8778f849..818cab14e87c 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -507,8 +507,6 @@ static void sh_mobile_i2c_dma_callback(void *data)
507 pd->pos = pd->msg->len; 507 pd->pos = pd->msg->len;
508 pd->stop_after_dma = true; 508 pd->stop_after_dma = true;
509 509
510 i2c_release_dma_safe_msg_buf(pd->msg, pd->dma_buf);
511
512 iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); 510 iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
513} 511}
514 512
@@ -602,8 +600,8 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
602 dma_async_issue_pending(chan); 600 dma_async_issue_pending(chan);
603} 601}
604 602
605static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, 603static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
606 bool do_init) 604 bool do_init)
607{ 605{
608 if (do_init) { 606 if (do_init) {
609 /* Initialize channel registers */ 607 /* Initialize channel registers */
@@ -627,7 +625,6 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
627 625
628 /* Enable all interrupts to begin with */ 626 /* Enable all interrupts to begin with */
629 iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); 627 iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
630 return 0;
631} 628}
632 629
633static int poll_dte(struct sh_mobile_i2c_data *pd) 630static int poll_dte(struct sh_mobile_i2c_data *pd)
@@ -698,9 +695,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
698 pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; 695 pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP;
699 pd->stop_after_dma = false; 696 pd->stop_after_dma = false;
700 697
701 err = start_ch(pd, msg, do_start); 698 start_ch(pd, msg, do_start);
702 if (err)
703 break;
704 699
705 if (do_start) 700 if (do_start)
706 i2c_op(pd, OP_START, 0); 701 i2c_op(pd, OP_START, 0);
@@ -709,6 +704,10 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
709 timeout = wait_event_timeout(pd->wait, 704 timeout = wait_event_timeout(pd->wait,
710 pd->sr & (ICSR_TACK | SW_DONE), 705 pd->sr & (ICSR_TACK | SW_DONE),
711 adapter->timeout); 706 adapter->timeout);
707
708 /* 'stop_after_dma' tells if DMA transfer was complete */
709 i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma);
710
712 if (!timeout) { 711 if (!timeout) {
713 dev_err(pd->dev, "Transfer request timed out\n"); 712 dev_err(pd->dev, "Transfer request timed out\n");
714 if (pd->dma_direction != DMA_NONE) 713 if (pd->dma_direction != DMA_NONE)
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index 9918bdd81619..a403e8579b65 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -401,11 +401,8 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap,
401 return ret; 401 return ret;
402 402
403 for (msg = msgs; msg < emsg; msg++) { 403 for (msg = msgs; msg < emsg; msg++) {
404 /* If next message is read, skip the stop condition */ 404 /* Emit STOP if it is the last message or I2C_M_STOP is set. */
405 bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD); 405 bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
406 /* but, force it if I2C_M_STOP is set */
407 if (msg->flags & I2C_M_STOP)
408 stop = true;
409 406
410 ret = uniphier_fi2c_master_xfer_one(adap, msg, stop); 407 ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
411 if (ret) 408 if (ret)
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index bb181b088291..454f914ae66d 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -248,11 +248,8 @@ static int uniphier_i2c_master_xfer(struct i2c_adapter *adap,
248 return ret; 248 return ret;
249 249
250 for (msg = msgs; msg < emsg; msg++) { 250 for (msg = msgs; msg < emsg; msg++) {
251 /* If next message is read, skip the stop condition */ 251 /* Emit STOP if it is the last message or I2C_M_STOP is set. */
252 bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD); 252 bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
253 /* but, force it if I2C_M_STOP is set */
254 if (msg->flags & I2C_M_STOP)
255 stop = true;
256 253
257 ret = uniphier_i2c_master_xfer_one(adap, msg, stop); 254 ret = uniphier_i2c_master_xfer_one(adap, msg, stop);
258 if (ret) 255 if (ret)
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 9a71e50d21f1..0c51c0ffdda9 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -532,6 +532,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
532{ 532{
533 u8 rx_watermark; 533 u8 rx_watermark;
534 struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg; 534 struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
535 unsigned long flags;
535 536
536 /* Clear and enable Rx full interrupt. */ 537 /* Clear and enable Rx full interrupt. */
537 xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK); 538 xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
@@ -547,6 +548,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
547 rx_watermark = IIC_RX_FIFO_DEPTH; 548 rx_watermark = IIC_RX_FIFO_DEPTH;
548 xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1); 549 xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
549 550
551 local_irq_save(flags);
550 if (!(msg->flags & I2C_M_NOSTART)) 552 if (!(msg->flags & I2C_M_NOSTART))
551 /* write the address */ 553 /* write the address */
552 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, 554 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
@@ -556,6 +558,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
556 558
557 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, 559 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
558 msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0)); 560 msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
561 local_irq_restore(flags);
562
559 if (i2c->nmsgs == 1) 563 if (i2c->nmsgs == 1)
560 /* very last, enable bus not busy as well */ 564 /* very last, enable bus not busy as well */
561 xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK); 565 xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index f15737763608..9ee9a15e7134 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -2293,21 +2293,22 @@ u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold)
2293EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf); 2293EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf);
2294 2294
2295/** 2295/**
2296 * i2c_release_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg 2296 * i2c_put_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg
2297 * @msg: the message to be synced with
2298 * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL. 2297 * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL.
2298 * @msg: the message which the buffer corresponds to
2299 * @xferred: bool saying if the message was transferred
2299 */ 2300 */
2300void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf) 2301void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred)
2301{ 2302{
2302 if (!buf || buf == msg->buf) 2303 if (!buf || buf == msg->buf)
2303 return; 2304 return;
2304 2305
2305 if (msg->flags & I2C_M_RD) 2306 if (xferred && msg->flags & I2C_M_RD)
2306 memcpy(msg->buf, buf, msg->len); 2307 memcpy(msg->buf, buf, msg->len);
2307 2308
2308 kfree(buf); 2309 kfree(buf);
2309} 2310}
2310EXPORT_SYMBOL_GPL(i2c_release_dma_safe_msg_buf); 2311EXPORT_SYMBOL_GPL(i2c_put_dma_safe_msg_buf);
2311 2312
2312MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); 2313MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
2313MODULE_DESCRIPTION("I2C-Bus main module"); 2314MODULE_DESCRIPTION("I2C-Bus main module");
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 7589f2ad1dae..631360b14ca7 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -187,12 +187,15 @@ static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor,
187 187
188int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark) 188int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
189{ 189{
190 u16 fifo_watermark = ~0, cur_watermark, sip = 0, fifo_th_mask; 190 u16 fifo_watermark = ~0, cur_watermark, fifo_th_mask;
191 struct st_lsm6dsx_hw *hw = sensor->hw; 191 struct st_lsm6dsx_hw *hw = sensor->hw;
192 struct st_lsm6dsx_sensor *cur_sensor; 192 struct st_lsm6dsx_sensor *cur_sensor;
193 int i, err, data; 193 int i, err, data;
194 __le16 wdata; 194 __le16 wdata;
195 195
196 if (!hw->sip)
197 return 0;
198
196 for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) { 199 for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
197 cur_sensor = iio_priv(hw->iio_devs[i]); 200 cur_sensor = iio_priv(hw->iio_devs[i]);
198 201
@@ -203,14 +206,10 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
203 : cur_sensor->watermark; 206 : cur_sensor->watermark;
204 207
205 fifo_watermark = min_t(u16, fifo_watermark, cur_watermark); 208 fifo_watermark = min_t(u16, fifo_watermark, cur_watermark);
206 sip += cur_sensor->sip;
207 } 209 }
208 210
209 if (!sip) 211 fifo_watermark = max_t(u16, fifo_watermark, hw->sip);
210 return 0; 212 fifo_watermark = (fifo_watermark / hw->sip) * hw->sip;
211
212 fifo_watermark = max_t(u16, fifo_watermark, sip);
213 fifo_watermark = (fifo_watermark / sip) * sip;
214 fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl; 213 fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl;
215 214
216 err = regmap_read(hw->regmap, hw->settings->fifo_ops.fifo_th.addr + 1, 215 err = regmap_read(hw->regmap, hw->settings->fifo_ops.fifo_th.addr + 1,
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index 54e383231d1e..c31b9633f32d 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -258,7 +258,6 @@ static int maxim_thermocouple_remove(struct spi_device *spi)
258static const struct spi_device_id maxim_thermocouple_id[] = { 258static const struct spi_device_id maxim_thermocouple_id[] = {
259 {"max6675", MAX6675}, 259 {"max6675", MAX6675},
260 {"max31855", MAX31855}, 260 {"max31855", MAX31855},
261 {"max31856", MAX31855},
262 {}, 261 {},
263}; 262};
264MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id); 263MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index f72677291b69..a36c94930c31 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -724,6 +724,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
724 dgid = (union ib_gid *) &addr->sib_addr; 724 dgid = (union ib_gid *) &addr->sib_addr;
725 pkey = ntohs(addr->sib_pkey); 725 pkey = ntohs(addr->sib_pkey);
726 726
727 mutex_lock(&lock);
727 list_for_each_entry(cur_dev, &dev_list, list) { 728 list_for_each_entry(cur_dev, &dev_list, list) {
728 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { 729 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
729 if (!rdma_cap_af_ib(cur_dev->device, p)) 730 if (!rdma_cap_af_ib(cur_dev->device, p))
@@ -750,18 +751,19 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
750 cma_dev = cur_dev; 751 cma_dev = cur_dev;
751 sgid = gid; 752 sgid = gid;
752 id_priv->id.port_num = p; 753 id_priv->id.port_num = p;
754 goto found;
753 } 755 }
754 } 756 }
755 } 757 }
756 } 758 }
757 759 mutex_unlock(&lock);
758 if (!cma_dev) 760 return -ENODEV;
759 return -ENODEV;
760 761
761found: 762found:
762 cma_attach_to_dev(id_priv, cma_dev); 763 cma_attach_to_dev(id_priv, cma_dev);
763 addr = (struct sockaddr_ib *) cma_src_addr(id_priv); 764 mutex_unlock(&lock);
764 memcpy(&addr->sib_addr, &sgid, sizeof sgid); 765 addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
766 memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
765 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); 767 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
766 return 0; 768 return 0;
767} 769}
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 6eb64c6f0802..c4118bcd5103 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -882,6 +882,8 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
882 WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE)); 882 WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
883 if (!uverbs_destroy_uobject(obj, reason)) 883 if (!uverbs_destroy_uobject(obj, reason))
884 ret = 0; 884 ret = 0;
885 else
886 atomic_set(&obj->usecnt, 0);
885 } 887 }
886 return ret; 888 return ret;
887} 889}
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ec8fb289621f..5f437d1570fb 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -124,6 +124,8 @@ static DEFINE_MUTEX(mut);
124static DEFINE_IDR(ctx_idr); 124static DEFINE_IDR(ctx_idr);
125static DEFINE_IDR(multicast_idr); 125static DEFINE_IDR(multicast_idr);
126 126
127static const struct file_operations ucma_fops;
128
127static inline struct ucma_context *_ucma_find_context(int id, 129static inline struct ucma_context *_ucma_find_context(int id,
128 struct ucma_file *file) 130 struct ucma_file *file)
129{ 131{
@@ -1581,6 +1583,10 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1581 f = fdget(cmd.fd); 1583 f = fdget(cmd.fd);
1582 if (!f.file) 1584 if (!f.file)
1583 return -ENOENT; 1585 return -ENOENT;
1586 if (f.file->f_op != &ucma_fops) {
1587 ret = -EINVAL;
1588 goto file_put;
1589 }
1584 1590
1585 /* Validate current fd and prevent destruction of id. */ 1591 /* Validate current fd and prevent destruction of id. */
1586 ctx = ucma_get_ctx(f.file->private_data, cmd.id); 1592 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 823beca448e1..6d974e2363df 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -1050,7 +1050,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
1050 uverbs_dev->num_comp_vectors = device->num_comp_vectors; 1050 uverbs_dev->num_comp_vectors = device->num_comp_vectors;
1051 1051
1052 if (ib_uverbs_create_uapi(device, uverbs_dev)) 1052 if (ib_uverbs_create_uapi(device, uverbs_dev))
1053 goto err; 1053 goto err_uapi;
1054 1054
1055 cdev_init(&uverbs_dev->cdev, NULL); 1055 cdev_init(&uverbs_dev->cdev, NULL);
1056 uverbs_dev->cdev.owner = THIS_MODULE; 1056 uverbs_dev->cdev.owner = THIS_MODULE;
@@ -1077,11 +1077,10 @@ static void ib_uverbs_add_one(struct ib_device *device)
1077 1077
1078err_class: 1078err_class:
1079 device_destroy(uverbs_class, uverbs_dev->cdev.dev); 1079 device_destroy(uverbs_class, uverbs_dev->cdev.dev);
1080
1081err_cdev: 1080err_cdev:
1082 cdev_del(&uverbs_dev->cdev); 1081 cdev_del(&uverbs_dev->cdev);
1082err_uapi:
1083 clear_bit(devnum, dev_map); 1083 clear_bit(devnum, dev_map);
1084
1085err: 1084err:
1086 if (atomic_dec_and_test(&uverbs_dev->refcount)) 1085 if (atomic_dec_and_test(&uverbs_dev->refcount))
1087 ib_uverbs_comp_dev(uverbs_dev); 1086 ib_uverbs_comp_dev(uverbs_dev);
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index bbfb86eb2d24..bc2b9e038439 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -833,6 +833,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
833 "Failed to destroy Shadow QP"); 833 "Failed to destroy Shadow QP");
834 return rc; 834 return rc;
835 } 835 }
836 bnxt_qplib_free_qp_res(&rdev->qplib_res,
837 &rdev->qp1_sqp->qplib_qp);
836 mutex_lock(&rdev->qp_lock); 838 mutex_lock(&rdev->qp_lock);
837 list_del(&rdev->qp1_sqp->list); 839 list_del(&rdev->qp1_sqp->list);
838 atomic_dec(&rdev->qp_count); 840 atomic_dec(&rdev->qp_count);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index e426b990c1dd..6ad0d46ab879 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -196,7 +196,7 @@ static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
196 struct bnxt_qplib_qp *qp) 196 struct bnxt_qplib_qp *qp)
197{ 197{
198 struct bnxt_qplib_q *rq = &qp->rq; 198 struct bnxt_qplib_q *rq = &qp->rq;
199 struct bnxt_qplib_q *sq = &qp->rq; 199 struct bnxt_qplib_q *sq = &qp->sq;
200 int rc = 0; 200 int rc = 0;
201 201
202 if (qp->sq_hdr_buf_size && sq->hwq.max_elements) { 202 if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index b3203afa3b1d..347fe18b1a41 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1685,6 +1685,12 @@ static void flush_qp(struct c4iw_qp *qhp)
1685 schp = to_c4iw_cq(qhp->ibqp.send_cq); 1685 schp = to_c4iw_cq(qhp->ibqp.send_cq);
1686 1686
1687 if (qhp->ibqp.uobject) { 1687 if (qhp->ibqp.uobject) {
1688
1689 /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
1690 if (qhp->wq.flushed)
1691 return;
1692
1693 qhp->wq.flushed = 1;
1688 t4_set_wq_in_error(&qhp->wq, 0); 1694 t4_set_wq_in_error(&qhp->wq, 0);
1689 t4_set_cq_in_error(&rchp->cq); 1695 t4_set_cq_in_error(&rchp->cq);
1690 spin_lock_irqsave(&rchp->comp_handler_lock, flag); 1696 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index eec83757d55f..6c967dde58e7 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -893,14 +893,11 @@ static int trigger_sbr(struct hfi1_devdata *dd)
893 } 893 }
894 894
895 /* 895 /*
896 * A secondary bus reset (SBR) issues a hot reset to our device. 896 * This is an end around to do an SBR during probe time. A new API needs
897 * The following routine does a 1s wait after the reset is dropped 897 * to be implemented to have cleaner interface but this fixes the
898 * per PCI Trhfa (recovery time). PCIe 3.0 section 6.6.1 - 898 * current brokenness
899 * Conventional Reset, paragraph 3, line 35 also says that a 1s
900 * delay after a reset is required. Per spec requirements,
901 * the link is either working or not after that point.
902 */ 899 */
903 return pci_reset_bus(dev); 900 return pci_bridge_secondary_bus_reset(dev->bus->self);
904} 901}
905 902
906/* 903/*
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ca0f1ee26091..0bbeaaae47e0 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -517,9 +517,11 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
517 props->page_size_cap = dev->dev->caps.page_size_cap; 517 props->page_size_cap = dev->dev->caps.page_size_cap;
518 props->max_qp = dev->dev->quotas.qp; 518 props->max_qp = dev->dev->quotas.qp;
519 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; 519 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
520 props->max_send_sge = dev->dev->caps.max_sq_sg; 520 props->max_send_sge =
521 props->max_recv_sge = dev->dev->caps.max_rq_sg; 521 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
522 props->max_sge_rd = MLX4_MAX_SGE_RD; 522 props->max_recv_sge =
523 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
524 props->max_sge_rd = MLX4_MAX_SGE_RD;
523 props->max_cq = dev->dev->quotas.cq; 525 props->max_cq = dev->dev->quotas.cq;
524 props->max_cqe = dev->dev->caps.max_cqes; 526 props->max_cqe = dev->dev->caps.max_cqes;
525 props->max_mr = dev->dev->quotas.mpt; 527 props->max_mr = dev->dev->quotas.mpt;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index ea01b8dd2be6..3d5424f335cb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1027,12 +1027,14 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id,
1027 1027
1028 skb_queue_head_init(&skqueue); 1028 skb_queue_head_init(&skqueue);
1029 1029
1030 netif_tx_lock_bh(p->dev);
1030 spin_lock_irq(&priv->lock); 1031 spin_lock_irq(&priv->lock);
1031 set_bit(IPOIB_FLAG_OPER_UP, &p->flags); 1032 set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
1032 if (p->neigh) 1033 if (p->neigh)
1033 while ((skb = __skb_dequeue(&p->neigh->queue))) 1034 while ((skb = __skb_dequeue(&p->neigh->queue)))
1034 __skb_queue_tail(&skqueue, skb); 1035 __skb_queue_tail(&skqueue, skb);
1035 spin_unlock_irq(&priv->lock); 1036 spin_unlock_irq(&priv->lock);
1037 netif_tx_unlock_bh(p->dev);
1036 1038
1037 while ((skb = __skb_dequeue(&skqueue))) { 1039 while ((skb = __skb_dequeue(&skqueue))) {
1038 skb->dev = p->dev; 1040 skb->dev = p->dev;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 316a57530f6d..c2df341ff6fa 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1439,6 +1439,7 @@ static struct irq_chip its_irq_chip = {
1439 * The consequence of the above is that allocation is cost is low, but 1439 * The consequence of the above is that allocation is cost is low, but
1440 * freeing is expensive. We assumes that freeing rarely occurs. 1440 * freeing is expensive. We assumes that freeing rarely occurs.
1441 */ 1441 */
1442#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
1442 1443
1443static DEFINE_MUTEX(lpi_range_lock); 1444static DEFINE_MUTEX(lpi_range_lock);
1444static LIST_HEAD(lpi_range_list); 1445static LIST_HEAD(lpi_range_list);
@@ -1625,7 +1626,8 @@ static int __init its_alloc_lpi_tables(void)
1625{ 1626{
1626 phys_addr_t paddr; 1627 phys_addr_t paddr;
1627 1628
1628 lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer); 1629 lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
1630 ITS_MAX_LPI_NRBITS);
1629 gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); 1631 gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
1630 if (!gic_rdists->prop_page) { 1632 if (!gic_rdists->prop_page) {
1631 pr_err("Failed to allocate PROPBASE\n"); 1633 pr_err("Failed to allocate PROPBASE\n");
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f266c81f396f..0481223b1deb 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -332,7 +332,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
332 int err; 332 int err;
333 333
334 desc->tfm = essiv->hash_tfm; 334 desc->tfm = essiv->hash_tfm;
335 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 335 desc->flags = 0;
336 336
337 err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt); 337 err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt);
338 shash_desc_zero(desc); 338 shash_desc_zero(desc);
@@ -606,7 +606,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
606 int i, r; 606 int i, r;
607 607
608 desc->tfm = lmk->hash_tfm; 608 desc->tfm = lmk->hash_tfm;
609 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 609 desc->flags = 0;
610 610
611 r = crypto_shash_init(desc); 611 r = crypto_shash_init(desc);
612 if (r) 612 if (r)
@@ -768,7 +768,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
768 768
769 /* calculate crc32 for every 32bit part and xor it */ 769 /* calculate crc32 for every 32bit part and xor it */
770 desc->tfm = tcw->crc32_tfm; 770 desc->tfm = tcw->crc32_tfm;
771 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 771 desc->flags = 0;
772 for (i = 0; i < 4; i++) { 772 for (i = 0; i < 4; i++) {
773 r = crypto_shash_init(desc); 773 r = crypto_shash_init(desc);
774 if (r) 774 if (r)
@@ -1251,7 +1251,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
1251 * requests if driver request queue is full. 1251 * requests if driver request queue is full.
1252 */ 1252 */
1253 skcipher_request_set_callback(ctx->r.req, 1253 skcipher_request_set_callback(ctx->r.req,
1254 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 1254 CRYPTO_TFM_REQ_MAY_BACKLOG,
1255 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req)); 1255 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
1256} 1256}
1257 1257
@@ -1268,7 +1268,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
1268 * requests if driver request queue is full. 1268 * requests if driver request queue is full.
1269 */ 1269 */
1270 aead_request_set_callback(ctx->r.req_aead, 1270 aead_request_set_callback(ctx->r.req_aead,
1271 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 1271 CRYPTO_TFM_REQ_MAY_BACKLOG,
1272 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead)); 1272 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
1273} 1273}
1274 1274
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 378878599466..89ccb64342de 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -532,7 +532,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
532 unsigned j, size; 532 unsigned j, size;
533 533
534 desc->tfm = ic->journal_mac; 534 desc->tfm = ic->journal_mac;
535 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 535 desc->flags = 0;
536 536
537 r = crypto_shash_init(desc); 537 r = crypto_shash_init(desc);
538 if (unlikely(r)) { 538 if (unlikely(r)) {
@@ -676,7 +676,7 @@ static void complete_journal_encrypt(struct crypto_async_request *req, int err)
676static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp) 676static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
677{ 677{
678 int r; 678 int r;
679 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 679 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
680 complete_journal_encrypt, comp); 680 complete_journal_encrypt, comp);
681 if (likely(encrypt)) 681 if (likely(encrypt))
682 r = crypto_skcipher_encrypt(req); 682 r = crypto_skcipher_encrypt(req);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index cae689de75fd..5ba067fa0c72 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2010-2011 Neil Brown 2 * Copyright (C) 2010-2011 Neil Brown
3 * Copyright (C) 2010-2017 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2010-2018 Red Hat, Inc. All rights reserved.
4 * 4 *
5 * This file is released under the GPL. 5 * This file is released under the GPL.
6 */ 6 */
@@ -29,9 +29,6 @@
29 */ 29 */
30#define MIN_RAID456_JOURNAL_SPACE (4*2048) 30#define MIN_RAID456_JOURNAL_SPACE (4*2048)
31 31
32/* Global list of all raid sets */
33static LIST_HEAD(raid_sets);
34
35static bool devices_handle_discard_safely = false; 32static bool devices_handle_discard_safely = false;
36 33
37/* 34/*
@@ -227,7 +224,6 @@ struct rs_layout {
227 224
228struct raid_set { 225struct raid_set {
229 struct dm_target *ti; 226 struct dm_target *ti;
230 struct list_head list;
231 227
232 uint32_t stripe_cache_entries; 228 uint32_t stripe_cache_entries;
233 unsigned long ctr_flags; 229 unsigned long ctr_flags;
@@ -273,19 +269,6 @@ static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
273 mddev->new_chunk_sectors = l->new_chunk_sectors; 269 mddev->new_chunk_sectors = l->new_chunk_sectors;
274} 270}
275 271
276/* Find any raid_set in active slot for @rs on global list */
277static struct raid_set *rs_find_active(struct raid_set *rs)
278{
279 struct raid_set *r;
280 struct mapped_device *md = dm_table_get_md(rs->ti->table);
281
282 list_for_each_entry(r, &raid_sets, list)
283 if (r != rs && dm_table_get_md(r->ti->table) == md)
284 return r;
285
286 return NULL;
287}
288
289/* raid10 algorithms (i.e. formats) */ 272/* raid10 algorithms (i.e. formats) */
290#define ALGORITHM_RAID10_DEFAULT 0 273#define ALGORITHM_RAID10_DEFAULT 0
291#define ALGORITHM_RAID10_NEAR 1 274#define ALGORITHM_RAID10_NEAR 1
@@ -764,7 +747,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
764 747
765 mddev_init(&rs->md); 748 mddev_init(&rs->md);
766 749
767 INIT_LIST_HEAD(&rs->list);
768 rs->raid_disks = raid_devs; 750 rs->raid_disks = raid_devs;
769 rs->delta_disks = 0; 751 rs->delta_disks = 0;
770 752
@@ -782,9 +764,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
782 for (i = 0; i < raid_devs; i++) 764 for (i = 0; i < raid_devs; i++)
783 md_rdev_init(&rs->dev[i].rdev); 765 md_rdev_init(&rs->dev[i].rdev);
784 766
785 /* Add @rs to global list. */
786 list_add(&rs->list, &raid_sets);
787
788 /* 767 /*
789 * Remaining items to be initialized by further RAID params: 768 * Remaining items to be initialized by further RAID params:
790 * rs->md.persistent 769 * rs->md.persistent
@@ -797,7 +776,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
797 return rs; 776 return rs;
798} 777}
799 778
800/* Free all @rs allocations and remove it from global list. */ 779/* Free all @rs allocations */
801static void raid_set_free(struct raid_set *rs) 780static void raid_set_free(struct raid_set *rs)
802{ 781{
803 int i; 782 int i;
@@ -815,8 +794,6 @@ static void raid_set_free(struct raid_set *rs)
815 dm_put_device(rs->ti, rs->dev[i].data_dev); 794 dm_put_device(rs->ti, rs->dev[i].data_dev);
816 } 795 }
817 796
818 list_del(&rs->list);
819
820 kfree(rs); 797 kfree(rs);
821} 798}
822 799
@@ -2649,7 +2626,7 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
2649 return 0; 2626 return 0;
2650 } 2627 }
2651 2628
2652 /* HM FIXME: get InSync raid_dev? */ 2629 /* HM FIXME: get In_Sync raid_dev? */
2653 rdev = &rs->dev[0].rdev; 2630 rdev = &rs->dev[0].rdev;
2654 2631
2655 if (rs->delta_disks < 0) { 2632 if (rs->delta_disks < 0) {
@@ -3149,6 +3126,11 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3149 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); 3126 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3150 rs_set_new(rs); 3127 rs_set_new(rs);
3151 } else if (rs_is_recovering(rs)) { 3128 } else if (rs_is_recovering(rs)) {
3129 /* Rebuild particular devices */
3130 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
3131 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3132 rs_setup_recovery(rs, MaxSector);
3133 }
3152 /* A recovering raid set may be resized */ 3134 /* A recovering raid set may be resized */
3153 ; /* skip setup rs */ 3135 ; /* skip setup rs */
3154 } else if (rs_is_reshaping(rs)) { 3136 } else if (rs_is_reshaping(rs)) {
@@ -3242,6 +3224,8 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3242 /* Start raid set read-only and assumed clean to change in raid_resume() */ 3224 /* Start raid set read-only and assumed clean to change in raid_resume() */
3243 rs->md.ro = 1; 3225 rs->md.ro = 1;
3244 rs->md.in_sync = 1; 3226 rs->md.in_sync = 1;
3227
3228 /* Keep array frozen */
3245 set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); 3229 set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
3246 3230
3247 /* Has to be held on running the array */ 3231 /* Has to be held on running the array */
@@ -3265,7 +3249,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3265 rs->callbacks.congested_fn = raid_is_congested; 3249 rs->callbacks.congested_fn = raid_is_congested;
3266 dm_table_add_target_callbacks(ti->table, &rs->callbacks); 3250 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
3267 3251
3268 /* If raid4/5/6 journal mode explictely requested (only possible with journal dev) -> set it */ 3252 /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
3269 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) { 3253 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
3270 r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode); 3254 r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
3271 if (r) { 3255 if (r) {
@@ -3350,32 +3334,53 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
3350 return DM_MAPIO_SUBMITTED; 3334 return DM_MAPIO_SUBMITTED;
3351} 3335}
3352 3336
3353/* Return string describing the current sync action of @mddev */ 3337/* Return sync state string for @state */
3354static const char *decipher_sync_action(struct mddev *mddev, unsigned long recovery) 3338enum sync_state { st_frozen, st_reshape, st_resync, st_check, st_repair, st_recover, st_idle };
3339static const char *sync_str(enum sync_state state)
3340{
3341 /* Has to be in above sync_state order! */
3342 static const char *sync_strs[] = {
3343 "frozen",
3344 "reshape",
3345 "resync",
3346 "check",
3347 "repair",
3348 "recover",
3349 "idle"
3350 };
3351
3352 return __within_range(state, 0, ARRAY_SIZE(sync_strs) - 1) ? sync_strs[state] : "undef";
3353};
3354
3355/* Return enum sync_state for @mddev derived from @recovery flags */
3356static const enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
3355{ 3357{
3356 if (test_bit(MD_RECOVERY_FROZEN, &recovery)) 3358 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
3357 return "frozen"; 3359 return st_frozen;
3358 3360
3359 /* The MD sync thread can be done with io but still be running */ 3361 /* The MD sync thread can be done with io or be interrupted but still be running */
3360 if (!test_bit(MD_RECOVERY_DONE, &recovery) && 3362 if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
3361 (test_bit(MD_RECOVERY_RUNNING, &recovery) || 3363 (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
3362 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) { 3364 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
3363 if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) 3365 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
3364 return "reshape"; 3366 return st_reshape;
3365 3367
3366 if (test_bit(MD_RECOVERY_SYNC, &recovery)) { 3368 if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
3367 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery)) 3369 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
3368 return "resync"; 3370 return st_resync;
3369 else if (test_bit(MD_RECOVERY_CHECK, &recovery)) 3371 if (test_bit(MD_RECOVERY_CHECK, &recovery))
3370 return "check"; 3372 return st_check;
3371 return "repair"; 3373 return st_repair;
3372 } 3374 }
3373 3375
3374 if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 3376 if (test_bit(MD_RECOVERY_RECOVER, &recovery))
3375 return "recover"; 3377 return st_recover;
3378
3379 if (mddev->reshape_position != MaxSector)
3380 return st_reshape;
3376 } 3381 }
3377 3382
3378 return "idle"; 3383 return st_idle;
3379} 3384}
3380 3385
3381/* 3386/*
@@ -3409,6 +3414,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3409 sector_t resync_max_sectors) 3414 sector_t resync_max_sectors)
3410{ 3415{
3411 sector_t r; 3416 sector_t r;
3417 enum sync_state state;
3412 struct mddev *mddev = &rs->md; 3418 struct mddev *mddev = &rs->md;
3413 3419
3414 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3420 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
@@ -3419,20 +3425,14 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3419 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3425 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3420 3426
3421 } else { 3427 } else {
3422 if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) && 3428 state = decipher_sync_action(mddev, recovery);
3423 !test_bit(MD_RECOVERY_INTR, &recovery) && 3429
3424 (test_bit(MD_RECOVERY_NEEDED, &recovery) || 3430 if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
3425 test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
3426 test_bit(MD_RECOVERY_RUNNING, &recovery)))
3427 r = mddev->curr_resync_completed;
3428 else
3429 r = mddev->recovery_cp; 3431 r = mddev->recovery_cp;
3432 else
3433 r = mddev->curr_resync_completed;
3430 3434
3431 if (r >= resync_max_sectors && 3435 if (state == st_idle && r >= resync_max_sectors) {
3432 (!test_bit(MD_RECOVERY_REQUESTED, &recovery) ||
3433 (!test_bit(MD_RECOVERY_FROZEN, &recovery) &&
3434 !test_bit(MD_RECOVERY_NEEDED, &recovery) &&
3435 !test_bit(MD_RECOVERY_RUNNING, &recovery)))) {
3436 /* 3436 /*
3437 * Sync complete. 3437 * Sync complete.
3438 */ 3438 */
@@ -3440,24 +3440,20 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3440 if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 3440 if (test_bit(MD_RECOVERY_RECOVER, &recovery))
3441 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3441 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3442 3442
3443 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) { 3443 } else if (state == st_recover)
3444 /* 3444 /*
3445 * In case we are recovering, the array is not in sync 3445 * In case we are recovering, the array is not in sync
3446 * and health chars should show the recovering legs. 3446 * and health chars should show the recovering legs.
3447 */ 3447 */
3448 ; 3448 ;
3449 3449 else if (state == st_resync)
3450 } else if (test_bit(MD_RECOVERY_SYNC, &recovery) &&
3451 !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
3452 /* 3450 /*
3453 * If "resync" is occurring, the raid set 3451 * If "resync" is occurring, the raid set
3454 * is or may be out of sync hence the health 3452 * is or may be out of sync hence the health
3455 * characters shall be 'a'. 3453 * characters shall be 'a'.
3456 */ 3454 */
3457 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); 3455 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3458 3456 else if (state == st_reshape)
3459 } else if (test_bit(MD_RECOVERY_RESHAPE, &recovery) &&
3460 !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
3461 /* 3457 /*
3462 * If "reshape" is occurring, the raid set 3458 * If "reshape" is occurring, the raid set
3463 * is or may be out of sync hence the health 3459 * is or may be out of sync hence the health
@@ -3465,7 +3461,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3465 */ 3461 */
3466 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); 3462 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3467 3463
3468 } else if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) { 3464 else if (state == st_check || state == st_repair)
3469 /* 3465 /*
3470 * If "check" or "repair" is occurring, the raid set has 3466 * If "check" or "repair" is occurring, the raid set has
3471 * undergone an initial sync and the health characters 3467 * undergone an initial sync and the health characters
@@ -3473,12 +3469,12 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3473 */ 3469 */
3474 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3470 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3475 3471
3476 } else { 3472 else {
3477 struct md_rdev *rdev; 3473 struct md_rdev *rdev;
3478 3474
3479 /* 3475 /*
3480 * We are idle and recovery is needed, prevent 'A' chars race 3476 * We are idle and recovery is needed, prevent 'A' chars race
3481 * caused by components still set to in-sync by constrcuctor. 3477 * caused by components still set to in-sync by constructor.
3482 */ 3478 */
3483 if (test_bit(MD_RECOVERY_NEEDED, &recovery)) 3479 if (test_bit(MD_RECOVERY_NEEDED, &recovery))
3484 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); 3480 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
@@ -3542,7 +3538,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
3542 progress = rs_get_progress(rs, recovery, resync_max_sectors); 3538 progress = rs_get_progress(rs, recovery, resync_max_sectors);
3543 resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ? 3539 resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
3544 atomic64_read(&mddev->resync_mismatches) : 0; 3540 atomic64_read(&mddev->resync_mismatches) : 0;
3545 sync_action = decipher_sync_action(&rs->md, recovery); 3541 sync_action = sync_str(decipher_sync_action(&rs->md, recovery));
3546 3542
3547 /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */ 3543 /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
3548 for (i = 0; i < rs->raid_disks; i++) 3544 for (i = 0; i < rs->raid_disks; i++)
@@ -3892,14 +3888,13 @@ static int rs_start_reshape(struct raid_set *rs)
3892 struct mddev *mddev = &rs->md; 3888 struct mddev *mddev = &rs->md;
3893 struct md_personality *pers = mddev->pers; 3889 struct md_personality *pers = mddev->pers;
3894 3890
3891 /* Don't allow the sync thread to work until the table gets reloaded. */
3892 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
3893
3895 r = rs_setup_reshape(rs); 3894 r = rs_setup_reshape(rs);
3896 if (r) 3895 if (r)
3897 return r; 3896 return r;
3898 3897
3899 /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
3900 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
3901 mddev_resume(mddev);
3902
3903 /* 3898 /*
3904 * Check any reshape constraints enforced by the personalility 3899 * Check any reshape constraints enforced by the personalility
3905 * 3900 *
@@ -3923,10 +3918,6 @@ static int rs_start_reshape(struct raid_set *rs)
3923 } 3918 }
3924 } 3919 }
3925 3920
3926 /* Suspend because a resume will happen in raid_resume() */
3927 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3928 mddev_suspend(mddev);
3929
3930 /* 3921 /*
3931 * Now reshape got set up, update superblocks to 3922 * Now reshape got set up, update superblocks to
3932 * reflect the fact so that a table reload will 3923 * reflect the fact so that a table reload will
@@ -3947,29 +3938,6 @@ static int raid_preresume(struct dm_target *ti)
3947 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags)) 3938 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
3948 return 0; 3939 return 0;
3949 3940
3950 if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
3951 struct raid_set *rs_active = rs_find_active(rs);
3952
3953 if (rs_active) {
3954 /*
3955 * In case no rebuilds have been requested
3956 * and an active table slot exists, copy
3957 * current resynchonization completed and
3958 * reshape position pointers across from
3959 * suspended raid set in the active slot.
3960 *
3961 * This resumes the new mapping at current
3962 * offsets to continue recover/reshape without
3963 * necessarily redoing a raid set partially or
3964 * causing data corruption in case of a reshape.
3965 */
3966 if (rs_active->md.curr_resync_completed != MaxSector)
3967 mddev->curr_resync_completed = rs_active->md.curr_resync_completed;
3968 if (rs_active->md.reshape_position != MaxSector)
3969 mddev->reshape_position = rs_active->md.reshape_position;
3970 }
3971 }
3972
3973 /* 3941 /*
3974 * The superblocks need to be updated on disk if the 3942 * The superblocks need to be updated on disk if the
3975 * array is new or new devices got added (thus zeroed 3943 * array is new or new devices got added (thus zeroed
@@ -4046,7 +4014,7 @@ static void raid_resume(struct dm_target *ti)
4046 4014
4047static struct target_type raid_target = { 4015static struct target_type raid_target = {
4048 .name = "raid", 4016 .name = "raid",
4049 .version = {1, 13, 2}, 4017 .version = {1, 14, 0},
4050 .module = THIS_MODULE, 4018 .module = THIS_MODULE,
4051 .ctr = raid_ctr, 4019 .ctr = raid_ctr,
4052 .dtr = raid_dtr, 4020 .dtr = raid_dtr,
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 72142021b5c9..74f6770c70b1 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -189,6 +189,12 @@ struct dm_pool_metadata {
189 sector_t data_block_size; 189 sector_t data_block_size;
190 190
191 /* 191 /*
192 * We reserve a section of the metadata for commit overhead.
193 * All reported space does *not* include this.
194 */
195 dm_block_t metadata_reserve;
196
197 /*
192 * Set if a transaction has to be aborted but the attempt to roll back 198 * Set if a transaction has to be aborted but the attempt to roll back
193 * to the previous (good) transaction failed. The only pool metadata 199 * to the previous (good) transaction failed. The only pool metadata
194 * operation possible in this state is the closing of the device. 200 * operation possible in this state is the closing of the device.
@@ -816,6 +822,22 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
816 return dm_tm_commit(pmd->tm, sblock); 822 return dm_tm_commit(pmd->tm, sblock);
817} 823}
818 824
825static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
826{
827 int r;
828 dm_block_t total;
829 dm_block_t max_blocks = 4096; /* 16M */
830
831 r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
832 if (r) {
833 DMERR("could not get size of metadata device");
834 pmd->metadata_reserve = max_blocks;
835 } else {
836 sector_div(total, 10);
837 pmd->metadata_reserve = min(max_blocks, total);
838 }
839}
840
819struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, 841struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
820 sector_t data_block_size, 842 sector_t data_block_size,
821 bool format_device) 843 bool format_device)
@@ -849,6 +871,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
849 return ERR_PTR(r); 871 return ERR_PTR(r);
850 } 872 }
851 873
874 __set_metadata_reserve(pmd);
875
852 return pmd; 876 return pmd;
853} 877}
854 878
@@ -1820,6 +1844,13 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
1820 down_read(&pmd->root_lock); 1844 down_read(&pmd->root_lock);
1821 if (!pmd->fail_io) 1845 if (!pmd->fail_io)
1822 r = dm_sm_get_nr_free(pmd->metadata_sm, result); 1846 r = dm_sm_get_nr_free(pmd->metadata_sm, result);
1847
1848 if (!r) {
1849 if (*result < pmd->metadata_reserve)
1850 *result = 0;
1851 else
1852 *result -= pmd->metadata_reserve;
1853 }
1823 up_read(&pmd->root_lock); 1854 up_read(&pmd->root_lock);
1824 1855
1825 return r; 1856 return r;
@@ -1932,8 +1963,11 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
1932 int r = -EINVAL; 1963 int r = -EINVAL;
1933 1964
1934 down_write(&pmd->root_lock); 1965 down_write(&pmd->root_lock);
1935 if (!pmd->fail_io) 1966 if (!pmd->fail_io) {
1936 r = __resize_space_map(pmd->metadata_sm, new_count); 1967 r = __resize_space_map(pmd->metadata_sm, new_count);
1968 if (!r)
1969 __set_metadata_reserve(pmd);
1970 }
1937 up_write(&pmd->root_lock); 1971 up_write(&pmd->root_lock);
1938 1972
1939 return r; 1973 return r;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 7bd60a150f8f..aaf1ad481ee8 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -200,7 +200,13 @@ struct dm_thin_new_mapping;
200enum pool_mode { 200enum pool_mode {
201 PM_WRITE, /* metadata may be changed */ 201 PM_WRITE, /* metadata may be changed */
202 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */ 202 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
203
204 /*
205 * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
206 */
207 PM_OUT_OF_METADATA_SPACE,
203 PM_READ_ONLY, /* metadata may not be changed */ 208 PM_READ_ONLY, /* metadata may not be changed */
209
204 PM_FAIL, /* all I/O fails */ 210 PM_FAIL, /* all I/O fails */
205}; 211};
206 212
@@ -1371,7 +1377,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1371 1377
1372static void requeue_bios(struct pool *pool); 1378static void requeue_bios(struct pool *pool);
1373 1379
1374static void check_for_space(struct pool *pool) 1380static bool is_read_only_pool_mode(enum pool_mode mode)
1381{
1382 return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
1383}
1384
1385static bool is_read_only(struct pool *pool)
1386{
1387 return is_read_only_pool_mode(get_pool_mode(pool));
1388}
1389
1390static void check_for_metadata_space(struct pool *pool)
1391{
1392 int r;
1393 const char *ooms_reason = NULL;
1394 dm_block_t nr_free;
1395
1396 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
1397 if (r)
1398 ooms_reason = "Could not get free metadata blocks";
1399 else if (!nr_free)
1400 ooms_reason = "No free metadata blocks";
1401
1402 if (ooms_reason && !is_read_only(pool)) {
1403 DMERR("%s", ooms_reason);
1404 set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
1405 }
1406}
1407
1408static void check_for_data_space(struct pool *pool)
1375{ 1409{
1376 int r; 1410 int r;
1377 dm_block_t nr_free; 1411 dm_block_t nr_free;
@@ -1397,14 +1431,16 @@ static int commit(struct pool *pool)
1397{ 1431{
1398 int r; 1432 int r;
1399 1433
1400 if (get_pool_mode(pool) >= PM_READ_ONLY) 1434 if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
1401 return -EINVAL; 1435 return -EINVAL;
1402 1436
1403 r = dm_pool_commit_metadata(pool->pmd); 1437 r = dm_pool_commit_metadata(pool->pmd);
1404 if (r) 1438 if (r)
1405 metadata_operation_failed(pool, "dm_pool_commit_metadata", r); 1439 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
1406 else 1440 else {
1407 check_for_space(pool); 1441 check_for_metadata_space(pool);
1442 check_for_data_space(pool);
1443 }
1408 1444
1409 return r; 1445 return r;
1410} 1446}
@@ -1470,6 +1506,19 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1470 return r; 1506 return r;
1471 } 1507 }
1472 1508
1509 r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
1510 if (r) {
1511 metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
1512 return r;
1513 }
1514
1515 if (!free_blocks) {
1516 /* Let's commit before we use up the metadata reserve. */
1517 r = commit(pool);
1518 if (r)
1519 return r;
1520 }
1521
1473 return 0; 1522 return 0;
1474} 1523}
1475 1524
@@ -1501,6 +1550,7 @@ static blk_status_t should_error_unserviceable_bio(struct pool *pool)
1501 case PM_OUT_OF_DATA_SPACE: 1550 case PM_OUT_OF_DATA_SPACE:
1502 return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0; 1551 return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
1503 1552
1553 case PM_OUT_OF_METADATA_SPACE:
1504 case PM_READ_ONLY: 1554 case PM_READ_ONLY:
1505 case PM_FAIL: 1555 case PM_FAIL:
1506 return BLK_STS_IOERR; 1556 return BLK_STS_IOERR;
@@ -2464,8 +2514,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2464 error_retry_list(pool); 2514 error_retry_list(pool);
2465 break; 2515 break;
2466 2516
2517 case PM_OUT_OF_METADATA_SPACE:
2467 case PM_READ_ONLY: 2518 case PM_READ_ONLY:
2468 if (old_mode != new_mode) 2519 if (!is_read_only_pool_mode(old_mode))
2469 notify_of_pool_mode_change(pool, "read-only"); 2520 notify_of_pool_mode_change(pool, "read-only");
2470 dm_pool_metadata_read_only(pool->pmd); 2521 dm_pool_metadata_read_only(pool->pmd);
2471 pool->process_bio = process_bio_read_only; 2522 pool->process_bio = process_bio_read_only;
@@ -3403,6 +3454,10 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
3403 DMINFO("%s: growing the metadata device from %llu to %llu blocks", 3454 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
3404 dm_device_name(pool->pool_md), 3455 dm_device_name(pool->pool_md),
3405 sb_metadata_dev_size, metadata_dev_size); 3456 sb_metadata_dev_size, metadata_dev_size);
3457
3458 if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
3459 set_pool_mode(pool, PM_WRITE);
3460
3406 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); 3461 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
3407 if (r) { 3462 if (r) {
3408 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r); 3463 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
@@ -3707,7 +3762,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv,
3707 struct pool_c *pt = ti->private; 3762 struct pool_c *pt = ti->private;
3708 struct pool *pool = pt->pool; 3763 struct pool *pool = pt->pool;
3709 3764
3710 if (get_pool_mode(pool) >= PM_READ_ONLY) { 3765 if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
3711 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode", 3766 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
3712 dm_device_name(pool->pool_md)); 3767 dm_device_name(pool->pool_md));
3713 return -EOPNOTSUPP; 3768 return -EOPNOTSUPP;
@@ -3781,6 +3836,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
3781 dm_block_t nr_blocks_data; 3836 dm_block_t nr_blocks_data;
3782 dm_block_t nr_blocks_metadata; 3837 dm_block_t nr_blocks_metadata;
3783 dm_block_t held_root; 3838 dm_block_t held_root;
3839 enum pool_mode mode;
3784 char buf[BDEVNAME_SIZE]; 3840 char buf[BDEVNAME_SIZE];
3785 char buf2[BDEVNAME_SIZE]; 3841 char buf2[BDEVNAME_SIZE];
3786 struct pool_c *pt = ti->private; 3842 struct pool_c *pt = ti->private;
@@ -3851,9 +3907,10 @@ static void pool_status(struct dm_target *ti, status_type_t type,
3851 else 3907 else
3852 DMEMIT("- "); 3908 DMEMIT("- ");
3853 3909
3854 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE) 3910 mode = get_pool_mode(pool);
3911 if (mode == PM_OUT_OF_DATA_SPACE)
3855 DMEMIT("out_of_data_space "); 3912 DMEMIT("out_of_data_space ");
3856 else if (pool->pf.mode == PM_READ_ONLY) 3913 else if (is_read_only_pool_mode(mode))
3857 DMEMIT("ro "); 3914 DMEMIT("ro ");
3858 else 3915 else
3859 DMEMIT("rw "); 3916 DMEMIT("rw ");
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 12decdbd722d..fc65f0dedf7f 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -99,10 +99,26 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
99{ 99{
100 struct scatterlist sg; 100 struct scatterlist sg;
101 101
102 sg_init_one(&sg, data, len); 102 if (likely(!is_vmalloc_addr(data))) {
103 ahash_request_set_crypt(req, &sg, NULL, len); 103 sg_init_one(&sg, data, len);
104 104 ahash_request_set_crypt(req, &sg, NULL, len);
105 return crypto_wait_req(crypto_ahash_update(req), wait); 105 return crypto_wait_req(crypto_ahash_update(req), wait);
106 } else {
107 do {
108 int r;
109 size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
110 flush_kernel_vmap_range((void *)data, this_step);
111 sg_init_table(&sg, 1);
112 sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
113 ahash_request_set_crypt(req, &sg, NULL, this_step);
114 r = crypto_wait_req(crypto_ahash_update(req), wait);
115 if (unlikely(r))
116 return r;
117 data += this_step;
118 len -= this_step;
119 } while (len);
120 return 0;
121 }
106} 122}
107 123
108/* 124/*
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 94329e03001e..0b2af6e74fc3 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -1276,18 +1276,18 @@ static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
1276static int resync_finish(struct mddev *mddev) 1276static int resync_finish(struct mddev *mddev)
1277{ 1277{
1278 struct md_cluster_info *cinfo = mddev->cluster_info; 1278 struct md_cluster_info *cinfo = mddev->cluster_info;
1279 int ret = 0;
1279 1280
1280 clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery); 1281 clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
1281 dlm_unlock_sync(cinfo->resync_lockres);
1282 1282
1283 /* 1283 /*
1284 * If resync thread is interrupted so we can't say resync is finished, 1284 * If resync thread is interrupted so we can't say resync is finished,
1285 * another node will launch resync thread to continue. 1285 * another node will launch resync thread to continue.
1286 */ 1286 */
1287 if (test_bit(MD_CLOSING, &mddev->flags)) 1287 if (!test_bit(MD_CLOSING, &mddev->flags))
1288 return 0; 1288 ret = resync_info_update(mddev, 0, 0);
1289 else 1289 dlm_unlock_sync(cinfo->resync_lockres);
1290 return resync_info_update(mddev, 0, 0); 1290 return ret;
1291} 1291}
1292 1292
1293static int area_resyncing(struct mddev *mddev, int direction, 1293static int area_resyncing(struct mddev *mddev, int direction,
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 981898049491..d6f7978b4449 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4529,11 +4529,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4529 allow_barrier(conf); 4529 allow_barrier(conf);
4530 } 4530 }
4531 4531
4532 raise_barrier(conf, 0);
4532read_more: 4533read_more:
4533 /* Now schedule reads for blocks from sector_nr to last */ 4534 /* Now schedule reads for blocks from sector_nr to last */
4534 r10_bio = raid10_alloc_init_r10buf(conf); 4535 r10_bio = raid10_alloc_init_r10buf(conf);
4535 r10_bio->state = 0; 4536 r10_bio->state = 0;
4536 raise_barrier(conf, sectors_done != 0); 4537 raise_barrier(conf, 1);
4537 atomic_set(&r10_bio->remaining, 0); 4538 atomic_set(&r10_bio->remaining, 0);
4538 r10_bio->mddev = mddev; 4539 r10_bio->mddev = mddev;
4539 r10_bio->sector = sector_nr; 4540 r10_bio->sector = sector_nr;
@@ -4629,6 +4630,8 @@ read_more:
4629 if (sector_nr <= last) 4630 if (sector_nr <= last)
4630 goto read_more; 4631 goto read_more;
4631 4632
4633 lower_barrier(conf);
4634
4632 /* Now that we have done the whole section we can 4635 /* Now that we have done the whole section we can
4633 * update reshape_progress 4636 * update reshape_progress
4634 */ 4637 */
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
index a001808a2b77..bfb811407061 100644
--- a/drivers/md/raid5-log.h
+++ b/drivers/md/raid5-log.h
@@ -46,6 +46,11 @@ extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
46extern void ppl_quiesce(struct r5conf *conf, int quiesce); 46extern void ppl_quiesce(struct r5conf *conf, int quiesce);
47extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio); 47extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);
48 48
49static inline bool raid5_has_log(struct r5conf *conf)
50{
51 return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
52}
53
49static inline bool raid5_has_ppl(struct r5conf *conf) 54static inline bool raid5_has_ppl(struct r5conf *conf)
50{ 55{
51 return test_bit(MD_HAS_PPL, &conf->mddev->flags); 56 return test_bit(MD_HAS_PPL, &conf->mddev->flags);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4ce0d7502fad..e4e98f47865d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -733,7 +733,7 @@ static bool stripe_can_batch(struct stripe_head *sh)
733{ 733{
734 struct r5conf *conf = sh->raid_conf; 734 struct r5conf *conf = sh->raid_conf;
735 735
736 if (conf->log || raid5_has_ppl(conf)) 736 if (raid5_has_log(conf) || raid5_has_ppl(conf))
737 return false; 737 return false;
738 return test_bit(STRIPE_BATCH_READY, &sh->state) && 738 return test_bit(STRIPE_BATCH_READY, &sh->state) &&
739 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && 739 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
@@ -7737,7 +7737,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
7737 sector_t newsize; 7737 sector_t newsize;
7738 struct r5conf *conf = mddev->private; 7738 struct r5conf *conf = mddev->private;
7739 7739
7740 if (conf->log || raid5_has_ppl(conf)) 7740 if (raid5_has_log(conf) || raid5_has_ppl(conf))
7741 return -EINVAL; 7741 return -EINVAL;
7742 sectors &= ~((sector_t)conf->chunk_sectors - 1); 7742 sectors &= ~((sector_t)conf->chunk_sectors - 1);
7743 newsize = raid5_size(mddev, sectors, mddev->raid_disks); 7743 newsize = raid5_size(mddev, sectors, mddev->raid_disks);
@@ -7788,7 +7788,7 @@ static int check_reshape(struct mddev *mddev)
7788{ 7788{
7789 struct r5conf *conf = mddev->private; 7789 struct r5conf *conf = mddev->private;
7790 7790
7791 if (conf->log || raid5_has_ppl(conf)) 7791 if (raid5_has_log(conf) || raid5_has_ppl(conf))
7792 return -EINVAL; 7792 return -EINVAL;
7793 if (mddev->delta_disks == 0 && 7793 if (mddev->delta_disks == 0 &&
7794 mddev->new_layout == mddev->layout && 7794 mddev->new_layout == mddev->layout &&
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index 31112f622b88..475e5b3790ed 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -411,7 +411,7 @@ static int aemif_probe(struct platform_device *pdev)
411 if (ret < 0) 411 if (ret < 0)
412 goto error; 412 goto error;
413 } 413 }
414 } else { 414 } else if (pdata) {
415 for (i = 0; i < pdata->num_sub_devices; i++) { 415 for (i = 0; i < pdata->num_sub_devices; i++) {
416 pdata->sub_devices[i].dev.parent = dev; 416 pdata->sub_devices[i].dev.parent = dev;
417 ret = platform_device_register(&pdata->sub_devices[i]); 417 ret = platform_device_register(&pdata->sub_devices[i]);
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
index eeb7eef62174..38f90e179927 100644
--- a/drivers/misc/hmc6352.c
+++ b/drivers/misc/hmc6352.c
@@ -27,6 +27,7 @@
27#include <linux/err.h> 27#include <linux/err.h>
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/sysfs.h> 29#include <linux/sysfs.h>
30#include <linux/nospec.h>
30 31
31static DEFINE_MUTEX(compass_mutex); 32static DEFINE_MUTEX(compass_mutex);
32 33
@@ -50,6 +51,7 @@ static int compass_store(struct device *dev, const char *buf, size_t count,
50 return ret; 51 return ret;
51 if (val >= strlen(map)) 52 if (val >= strlen(map))
52 return -EINVAL; 53 return -EINVAL;
54 val = array_index_nospec(val, strlen(map));
53 mutex_lock(&compass_mutex); 55 mutex_lock(&compass_mutex);
54 ret = compass_command(c, map[val]); 56 ret = compass_command(c, map[val]);
55 mutex_unlock(&compass_mutex); 57 mutex_unlock(&compass_mutex);
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index 8f82bb9d11e2..b8aaa684c397 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -2131,7 +2131,7 @@ static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
2131 retrc = plpar_hcall_norets(H_REG_CRQ, 2131 retrc = plpar_hcall_norets(H_REG_CRQ,
2132 vdev->unit_address, 2132 vdev->unit_address,
2133 queue->msg_token, PAGE_SIZE); 2133 queue->msg_token, PAGE_SIZE);
2134 retrc = rc; 2134 rc = retrc;
2135 2135
2136 if (rc == H_RESOURCE) 2136 if (rc == H_RESOURCE)
2137 rc = ibmvmc_reset_crq_queue(adapter); 2137 rc = ibmvmc_reset_crq_queue(adapter);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 7bba62a72921..fc3872fe7b25 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -521,17 +521,15 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
521 521
522 cl = cldev->cl; 522 cl = cldev->cl;
523 523
524 mutex_lock(&bus->device_lock);
524 if (cl->state == MEI_FILE_UNINITIALIZED) { 525 if (cl->state == MEI_FILE_UNINITIALIZED) {
525 mutex_lock(&bus->device_lock);
526 ret = mei_cl_link(cl); 526 ret = mei_cl_link(cl);
527 mutex_unlock(&bus->device_lock);
528 if (ret) 527 if (ret)
529 return ret; 528 goto out;
530 /* update pointers */ 529 /* update pointers */
531 cl->cldev = cldev; 530 cl->cldev = cldev;
532 } 531 }
533 532
534 mutex_lock(&bus->device_lock);
535 if (mei_cl_is_connected(cl)) { 533 if (mei_cl_is_connected(cl)) {
536 ret = 0; 534 ret = 0;
537 goto out; 535 goto out;
@@ -616,9 +614,8 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
616 if (err < 0) 614 if (err < 0)
617 dev_err(bus->dev, "Could not disconnect from the ME client\n"); 615 dev_err(bus->dev, "Could not disconnect from the ME client\n");
618 616
619out:
620 mei_cl_bus_module_put(cldev); 617 mei_cl_bus_module_put(cldev);
621 618out:
622 /* Flush queues and remove any pending read */ 619 /* Flush queues and remove any pending read */
623 mei_cl_flush_queues(cl, NULL); 620 mei_cl_flush_queues(cl, NULL);
624 mei_cl_unlink(cl); 621 mei_cl_unlink(cl);
@@ -876,12 +873,13 @@ static void mei_cl_bus_dev_release(struct device *dev)
876 873
877 mei_me_cl_put(cldev->me_cl); 874 mei_me_cl_put(cldev->me_cl);
878 mei_dev_bus_put(cldev->bus); 875 mei_dev_bus_put(cldev->bus);
876 mei_cl_unlink(cldev->cl);
879 kfree(cldev->cl); 877 kfree(cldev->cl);
880 kfree(cldev); 878 kfree(cldev);
881} 879}
882 880
883static const struct device_type mei_cl_device_type = { 881static const struct device_type mei_cl_device_type = {
884 .release = mei_cl_bus_dev_release, 882 .release = mei_cl_bus_dev_release,
885}; 883};
886 884
887/** 885/**
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 4ab6251d418e..ebdcf0b450e2 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1767,7 +1767,7 @@ out:
1767 } 1767 }
1768 } 1768 }
1769 1769
1770 rets = buf->size; 1770 rets = len;
1771err: 1771err:
1772 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1772 cl_dbg(dev, cl, "rpm: autosuspend\n");
1773 pm_runtime_mark_last_busy(dev->dev); 1773 pm_runtime_mark_last_busy(dev->dev);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 09e233d4c0de..e56f3e72d57a 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1161,15 +1161,18 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
1161 1161
1162 props_res = (struct hbm_props_response *)mei_msg; 1162 props_res = (struct hbm_props_response *)mei_msg;
1163 1163
1164 if (props_res->status) { 1164 if (props_res->status == MEI_HBMS_CLIENT_NOT_FOUND) {
1165 dev_dbg(dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n",
1166 props_res->me_addr);
1167 } else if (props_res->status) {
1165 dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n", 1168 dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n",
1166 props_res->status, 1169 props_res->status,
1167 mei_hbm_status_str(props_res->status)); 1170 mei_hbm_status_str(props_res->status));
1168 return -EPROTO; 1171 return -EPROTO;
1172 } else {
1173 mei_hbm_me_cl_add(dev, props_res);
1169 } 1174 }
1170 1175
1171 mei_hbm_me_cl_add(dev, props_res);
1172
1173 /* request property for the next client */ 1176 /* request property for the next client */
1174 if (mei_hbm_prop_req(dev, props_res->me_addr + 1)) 1177 if (mei_hbm_prop_req(dev, props_res->me_addr + 1))
1175 return -EIO; 1178 return -EIO;
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 648eb6743ed5..6edffeed9953 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -238,10 +238,6 @@ static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
238 mmc_exit_request(mq->queue, req); 238 mmc_exit_request(mq->queue, req);
239} 239}
240 240
241/*
242 * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests
243 * will not be dispatched in parallel.
244 */
245static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 241static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
246 const struct blk_mq_queue_data *bd) 242 const struct blk_mq_queue_data *bd)
247{ 243{
@@ -264,7 +260,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
264 260
265 spin_lock_irq(q->queue_lock); 261 spin_lock_irq(q->queue_lock);
266 262
267 if (mq->recovery_needed) { 263 if (mq->recovery_needed || mq->busy) {
268 spin_unlock_irq(q->queue_lock); 264 spin_unlock_irq(q->queue_lock);
269 return BLK_STS_RESOURCE; 265 return BLK_STS_RESOURCE;
270 } 266 }
@@ -291,6 +287,9 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
291 break; 287 break;
292 } 288 }
293 289
290 /* Parallel dispatch of requests is not supported at the moment */
291 mq->busy = true;
292
294 mq->in_flight[issue_type] += 1; 293 mq->in_flight[issue_type] += 1;
295 get_card = (mmc_tot_in_flight(mq) == 1); 294 get_card = (mmc_tot_in_flight(mq) == 1);
296 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); 295 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
@@ -333,9 +332,12 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
333 mq->in_flight[issue_type] -= 1; 332 mq->in_flight[issue_type] -= 1;
334 if (mmc_tot_in_flight(mq) == 0) 333 if (mmc_tot_in_flight(mq) == 0)
335 put_card = true; 334 put_card = true;
335 mq->busy = false;
336 spin_unlock_irq(q->queue_lock); 336 spin_unlock_irq(q->queue_lock);
337 if (put_card) 337 if (put_card)
338 mmc_put_card(card, &mq->ctx); 338 mmc_put_card(card, &mq->ctx);
339 } else {
340 WRITE_ONCE(mq->busy, false);
339 } 341 }
340 342
341 return ret; 343 return ret;
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 17e59d50b496..9bf3c9245075 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -81,6 +81,7 @@ struct mmc_queue {
81 unsigned int cqe_busy; 81 unsigned int cqe_busy;
82#define MMC_CQE_DCMD_BUSY BIT(0) 82#define MMC_CQE_DCMD_BUSY BIT(0)
83#define MMC_CQE_QUEUE_FULL BIT(1) 83#define MMC_CQE_QUEUE_FULL BIT(1)
84 bool busy;
84 bool use_cqe; 85 bool use_cqe;
85 bool recovery_needed; 86 bool recovery_needed;
86 bool in_recovery; 87 bool in_recovery;
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
index 294de177632c..61e4e2a213c9 100644
--- a/drivers/mmc/host/android-goldfish.c
+++ b/drivers/mmc/host/android-goldfish.c
@@ -217,7 +217,7 @@ static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host,
217 * We don't really have DMA, so we need 217 * We don't really have DMA, so we need
218 * to copy from our platform driver buffer 218 * to copy from our platform driver buffer
219 */ 219 */
220 sg_copy_to_buffer(data->sg, 1, host->virt_base, 220 sg_copy_from_buffer(data->sg, 1, host->virt_base,
221 data->sg->length); 221 data->sg->length);
222 } 222 }
223 host->data->bytes_xfered += data->sg->length; 223 host->data->bytes_xfered += data->sg->length;
@@ -393,7 +393,7 @@ static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host,
393 * We don't really have DMA, so we need to copy to our 393 * We don't really have DMA, so we need to copy to our
394 * platform driver buffer 394 * platform driver buffer
395 */ 395 */
396 sg_copy_from_buffer(data->sg, 1, host->virt_base, 396 sg_copy_to_buffer(data->sg, 1, host->virt_base,
397 data->sg->length); 397 data->sg->length);
398 } 398 }
399} 399}
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 5aa2c9404e92..be53044086c7 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1976,7 +1976,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
1976 do { 1976 do {
1977 value = atmci_readl(host, ATMCI_RDR); 1977 value = atmci_readl(host, ATMCI_RDR);
1978 if (likely(offset + 4 <= sg->length)) { 1978 if (likely(offset + 4 <= sg->length)) {
1979 sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset); 1979 sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset);
1980 1980
1981 offset += 4; 1981 offset += 4;
1982 nbytes += 4; 1982 nbytes += 4;
@@ -1993,7 +1993,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
1993 } else { 1993 } else {
1994 unsigned int remaining = sg->length - offset; 1994 unsigned int remaining = sg->length - offset;
1995 1995
1996 sg_pcopy_to_buffer(sg, 1, &value, remaining, offset); 1996 sg_pcopy_from_buffer(sg, 1, &value, remaining, offset);
1997 nbytes += remaining; 1997 nbytes += remaining;
1998 1998
1999 flush_dcache_page(sg_page(sg)); 1999 flush_dcache_page(sg_page(sg));
@@ -2003,7 +2003,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
2003 goto done; 2003 goto done;
2004 2004
2005 offset = 4 - remaining; 2005 offset = 4 - remaining;
2006 sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining, 2006 sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining,
2007 offset, 0); 2007 offset, 0);
2008 nbytes += offset; 2008 nbytes += offset;
2009 } 2009 }
@@ -2042,7 +2042,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
2042 2042
2043 do { 2043 do {
2044 if (likely(offset + 4 <= sg->length)) { 2044 if (likely(offset + 4 <= sg->length)) {
2045 sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset); 2045 sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset);
2046 atmci_writel(host, ATMCI_TDR, value); 2046 atmci_writel(host, ATMCI_TDR, value);
2047 2047
2048 offset += 4; 2048 offset += 4;
@@ -2059,7 +2059,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
2059 unsigned int remaining = sg->length - offset; 2059 unsigned int remaining = sg->length - offset;
2060 2060
2061 value = 0; 2061 value = 0;
2062 sg_pcopy_from_buffer(sg, 1, &value, remaining, offset); 2062 sg_pcopy_to_buffer(sg, 1, &value, remaining, offset);
2063 nbytes += remaining; 2063 nbytes += remaining;
2064 2064
2065 host->sg = sg = sg_next(sg); 2065 host->sg = sg = sg_next(sg);
@@ -2070,7 +2070,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
2070 } 2070 }
2071 2071
2072 offset = 4 - remaining; 2072 offset = 4 - remaining;
2073 sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining, 2073 sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining,
2074 offset, 0); 2074 offset, 0);
2075 atmci_writel(host, ATMCI_TDR, value); 2075 atmci_writel(host, ATMCI_TDR, value);
2076 nbytes += offset; 2076 nbytes += offset;
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index 09cb89645d06..2cfec33178c1 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -517,19 +517,23 @@ static struct mmc_host_ops meson_mx_mmc_ops = {
517static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent) 517static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent)
518{ 518{
519 struct device_node *slot_node; 519 struct device_node *slot_node;
520 struct platform_device *pdev;
520 521
521 /* 522 /*
522 * TODO: the MMC core framework currently does not support 523 * TODO: the MMC core framework currently does not support
523 * controllers with multiple slots properly. So we only register 524 * controllers with multiple slots properly. So we only register
524 * the first slot for now 525 * the first slot for now
525 */ 526 */
526 slot_node = of_find_compatible_node(parent->of_node, NULL, "mmc-slot"); 527 slot_node = of_get_compatible_child(parent->of_node, "mmc-slot");
527 if (!slot_node) { 528 if (!slot_node) {
528 dev_warn(parent, "no 'mmc-slot' sub-node found\n"); 529 dev_warn(parent, "no 'mmc-slot' sub-node found\n");
529 return ERR_PTR(-ENOENT); 530 return ERR_PTR(-ENOENT);
530 } 531 }
531 532
532 return of_platform_device_create(slot_node, NULL, parent); 533 pdev = of_platform_device_create(slot_node, NULL, parent);
534 of_node_put(slot_node);
535
536 return pdev;
533} 537}
534 538
535static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host) 539static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 071693ebfe18..68760d4a5d3d 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2177,6 +2177,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2177 dma_release_channel(host->tx_chan); 2177 dma_release_channel(host->tx_chan);
2178 dma_release_channel(host->rx_chan); 2178 dma_release_channel(host->rx_chan);
2179 2179
2180 dev_pm_clear_wake_irq(host->dev);
2180 pm_runtime_dont_use_autosuspend(host->dev); 2181 pm_runtime_dont_use_autosuspend(host->dev);
2181 pm_runtime_put_sync(host->dev); 2182 pm_runtime_put_sync(host->dev);
2182 pm_runtime_disable(host->dev); 2183 pm_runtime_disable(host->dev);
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 35cc0de6be67..ca0b43973769 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -45,14 +45,16 @@
45/* DM_CM_RST */ 45/* DM_CM_RST */
46#define RST_DTRANRST1 BIT(9) 46#define RST_DTRANRST1 BIT(9)
47#define RST_DTRANRST0 BIT(8) 47#define RST_DTRANRST0 BIT(8)
48#define RST_RESERVED_BITS GENMASK_ULL(32, 0) 48#define RST_RESERVED_BITS GENMASK_ULL(31, 0)
49 49
50/* DM_CM_INFO1 and DM_CM_INFO1_MASK */ 50/* DM_CM_INFO1 and DM_CM_INFO1_MASK */
51#define INFO1_CLEAR 0 51#define INFO1_CLEAR 0
52#define INFO1_MASK_CLEAR GENMASK_ULL(31, 0)
52#define INFO1_DTRANEND1 BIT(17) 53#define INFO1_DTRANEND1 BIT(17)
53#define INFO1_DTRANEND0 BIT(16) 54#define INFO1_DTRANEND0 BIT(16)
54 55
55/* DM_CM_INFO2 and DM_CM_INFO2_MASK */ 56/* DM_CM_INFO2 and DM_CM_INFO2_MASK */
57#define INFO2_MASK_CLEAR GENMASK_ULL(31, 0)
56#define INFO2_DTRANERR1 BIT(17) 58#define INFO2_DTRANERR1 BIT(17)
57#define INFO2_DTRANERR0 BIT(16) 59#define INFO2_DTRANERR0 BIT(16)
58 60
@@ -252,6 +254,12 @@ renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
252{ 254{
253 struct renesas_sdhi *priv = host_to_priv(host); 255 struct renesas_sdhi *priv = host_to_priv(host);
254 256
257 /* Disable DMAC interrupts, we don't use them */
258 renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1_MASK,
259 INFO1_MASK_CLEAR);
260 renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO2_MASK,
261 INFO2_MASK_CLEAR);
262
255 /* Each value is set to non-zero to assume "enabling" each DMA */ 263 /* Each value is set to non-zero to assume "enabling" each DMA */
256 host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; 264 host->chan_rx = host->chan_tx = (void *)0xdeadbeaf;
257 265
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index ca18612c4201..67b2065e7a19 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -1338,6 +1338,11 @@ int denali_init(struct denali_nand_info *denali)
1338 1338
1339 denali_enable_irq(denali); 1339 denali_enable_irq(denali);
1340 denali_reset_banks(denali); 1340 denali_reset_banks(denali);
1341 if (!denali->max_banks) {
1342 /* Error out earlier if no chip is found for some reasons. */
1343 ret = -ENODEV;
1344 goto disable_irq;
1345 }
1341 1346
1342 denali->active_bank = DENALI_INVALID_BANK; 1347 denali->active_bank = DENALI_INVALID_BANK;
1343 1348
diff --git a/drivers/mtd/nand/raw/docg4.c b/drivers/mtd/nand/raw/docg4.c
index a3f04315c05c..427fcbc1b71c 100644
--- a/drivers/mtd/nand/raw/docg4.c
+++ b/drivers/mtd/nand/raw/docg4.c
@@ -1218,7 +1218,7 @@ static int docg4_resume(struct platform_device *pdev)
1218 return 0; 1218 return 0;
1219} 1219}
1220 1220
1221static void __init init_mtd_structs(struct mtd_info *mtd) 1221static void init_mtd_structs(struct mtd_info *mtd)
1222{ 1222{
1223 /* initialize mtd and nand data structures */ 1223 /* initialize mtd and nand data structures */
1224 1224
@@ -1290,7 +1290,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
1290 1290
1291} 1291}
1292 1292
1293static int __init read_id_reg(struct mtd_info *mtd) 1293static int read_id_reg(struct mtd_info *mtd)
1294{ 1294{
1295 struct nand_chip *nand = mtd_to_nand(mtd); 1295 struct nand_chip *nand = mtd_to_nand(mtd);
1296 struct docg4_priv *doc = nand_get_controller_data(nand); 1296 struct docg4_priv *doc = nand_get_controller_data(nand);
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 9375cef22420..3d27616d9c85 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -283,8 +283,12 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
283 case SIOCFINDIPDDPRT: 283 case SIOCFINDIPDDPRT:
284 spin_lock_bh(&ipddp_route_lock); 284 spin_lock_bh(&ipddp_route_lock);
285 rp = __ipddp_find_route(&rcp); 285 rp = __ipddp_find_route(&rcp);
286 if (rp) 286 if (rp) {
287 memcpy(&rcp2, rp, sizeof(rcp2)); 287 memset(&rcp2, 0, sizeof(rcp2));
288 rcp2.ip = rp->ip;
289 rcp2.at = rp->at;
290 rcp2.flags = rp->flags;
291 }
288 spin_unlock_bh(&ipddp_route_lock); 292 spin_unlock_bh(&ipddp_route_lock);
289 293
290 if (rp) { 294 if (rp) {
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 7c791c1da4b9..bef01331266f 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -128,7 +128,7 @@
128#define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000 128#define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000
129#define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7) 129#define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7)
130#define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6) 130#define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6)
131#define MV88E6XXX_G1_ATU_OP_MISS_VIOLTATION BIT(5) 131#define MV88E6XXX_G1_ATU_OP_MISS_VIOLATION BIT(5)
132#define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4) 132#define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4)
133 133
134/* Offset 0x0C: ATU Data Register */ 134/* Offset 0x0C: ATU Data Register */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 307410898fc9..5200e4bdce93 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -349,7 +349,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
349 chip->ports[entry.portvec].atu_member_violation++; 349 chip->ports[entry.portvec].atu_member_violation++;
350 } 350 }
351 351
352 if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { 352 if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
353 dev_err_ratelimited(chip->dev, 353 dev_err_ratelimited(chip->dev,
354 "ATU miss violation for %pM portvec %x\n", 354 "ATU miss violation for %pM portvec %x\n",
355 entry.mac, entry.portvec); 355 entry.mac, entry.portvec);
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 17f12c18d225..7635c38e77dd 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -459,12 +459,12 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
459 cqe = &admin_queue->cq.entries[head_masked]; 459 cqe = &admin_queue->cq.entries[head_masked];
460 460
461 /* Go over all the completions */ 461 /* Go over all the completions */
462 while ((cqe->acq_common_descriptor.flags & 462 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
463 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { 463 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
464 /* Do not read the rest of the completion entry before the 464 /* Do not read the rest of the completion entry before the
465 * phase bit was validated 465 * phase bit was validated
466 */ 466 */
467 rmb(); 467 dma_rmb();
468 ena_com_handle_single_admin_completion(admin_queue, cqe); 468 ena_com_handle_single_admin_completion(admin_queue, cqe);
469 469
470 head_masked++; 470 head_masked++;
@@ -627,17 +627,10 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
627 mmio_read_reg |= mmio_read->seq_num & 627 mmio_read_reg |= mmio_read->seq_num &
628 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; 628 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
629 629
630 /* make sure read_resp->req_id get updated before the hw can write 630 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
631 * there
632 */
633 wmb();
634
635 writel_relaxed(mmio_read_reg,
636 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
637 631
638 mmiowb();
639 for (i = 0; i < timeout; i++) { 632 for (i = 0; i < timeout; i++) {
640 if (read_resp->req_id == mmio_read->seq_num) 633 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
641 break; 634 break;
642 635
643 udelay(1); 636 udelay(1);
@@ -1796,8 +1789,13 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1796 aenq_common = &aenq_e->aenq_common_desc; 1789 aenq_common = &aenq_e->aenq_common_desc;
1797 1790
1798 /* Go over all the events */ 1791 /* Go over all the events */
1799 while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == 1792 while ((READ_ONCE(aenq_common->flags) &
1800 phase) { 1793 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
1794 /* Make sure the phase bit (ownership) is as expected before
1795 * reading the rest of the descriptor.
1796 */
1797 dma_rmb();
1798
1801 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", 1799 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1802 aenq_common->group, aenq_common->syndrom, 1800 aenq_common->group, aenq_common->syndrom,
1803 (u64)aenq_common->timestamp_low + 1801 (u64)aenq_common->timestamp_low +
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index ea149c134e15..1c682b76190f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -51,6 +51,11 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
51 if (desc_phase != expected_phase) 51 if (desc_phase != expected_phase)
52 return NULL; 52 return NULL;
53 53
54 /* Make sure we read the rest of the descriptor after the phase bit
55 * has been read
56 */
57 dma_rmb();
58
54 return cdesc; 59 return cdesc;
55} 60}
56 61
@@ -493,6 +498,7 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
493 if (cdesc_phase != expected_phase) 498 if (cdesc_phase != expected_phase)
494 return -EAGAIN; 499 return -EAGAIN;
495 500
501 dma_rmb();
496 if (unlikely(cdesc->req_id >= io_cq->q_depth)) { 502 if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
497 pr_err("Invalid req id %d\n", cdesc->req_id); 503 pr_err("Invalid req id %d\n", cdesc->req_id);
498 return -EINVAL; 504 return -EINVAL;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 6fdc753d9483..2f7657227cfe 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -107,8 +107,7 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
107 return io_sq->q_depth - 1 - cnt; 107 return io_sq->q_depth - 1 - cnt;
108} 108}
109 109
110static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq, 110static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
111 bool relaxed)
112{ 111{
113 u16 tail; 112 u16 tail;
114 113
@@ -117,10 +116,7 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
117 pr_debug("write submission queue doorbell for queue: %d tail: %d\n", 116 pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
118 io_sq->qid, tail); 117 io_sq->qid, tail);
119 118
120 if (relaxed) 119 writel(tail, io_sq->db_addr);
121 writel_relaxed(tail, io_sq->db_addr);
122 else
123 writel(tail, io_sq->db_addr);
124 120
125 return 0; 121 return 0;
126} 122}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index c673ac2df65b..29b5774dd32d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -76,7 +76,7 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
76 76
77static int ena_rss_init_default(struct ena_adapter *adapter); 77static int ena_rss_init_default(struct ena_adapter *adapter);
78static void check_for_admin_com_state(struct ena_adapter *adapter); 78static void check_for_admin_com_state(struct ena_adapter *adapter);
79static void ena_destroy_device(struct ena_adapter *adapter); 79static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
80static int ena_restore_device(struct ena_adapter *adapter); 80static int ena_restore_device(struct ena_adapter *adapter);
81 81
82static void ena_tx_timeout(struct net_device *dev) 82static void ena_tx_timeout(struct net_device *dev)
@@ -461,7 +461,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
461 return -ENOMEM; 461 return -ENOMEM;
462 } 462 }
463 463
464 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, 464 dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
465 DMA_FROM_DEVICE); 465 DMA_FROM_DEVICE);
466 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { 466 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
467 u64_stats_update_begin(&rx_ring->syncp); 467 u64_stats_update_begin(&rx_ring->syncp);
@@ -478,7 +478,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
478 rx_info->page_offset = 0; 478 rx_info->page_offset = 0;
479 ena_buf = &rx_info->ena_buf; 479 ena_buf = &rx_info->ena_buf;
480 ena_buf->paddr = dma; 480 ena_buf->paddr = dma;
481 ena_buf->len = PAGE_SIZE; 481 ena_buf->len = ENA_PAGE_SIZE;
482 482
483 return 0; 483 return 0;
484} 484}
@@ -495,7 +495,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
495 return; 495 return;
496 } 496 }
497 497
498 dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE, 498 dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
499 DMA_FROM_DEVICE); 499 DMA_FROM_DEVICE);
500 500
501 __free_page(page); 501 __free_page(page);
@@ -551,14 +551,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
551 rx_ring->qid, i, num); 551 rx_ring->qid, i, num);
552 } 552 }
553 553
554 if (likely(i)) { 554 /* ena_com_write_sq_doorbell issues a wmb() */
555 /* Add memory barrier to make sure the desc were written before 555 if (likely(i))
556 * issue a doorbell 556 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
557 */
558 wmb();
559 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true);
560 mmiowb();
561 }
562 557
563 rx_ring->next_to_use = next_to_use; 558 rx_ring->next_to_use = next_to_use;
564 559
@@ -916,10 +911,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
916 do { 911 do {
917 dma_unmap_page(rx_ring->dev, 912 dma_unmap_page(rx_ring->dev,
918 dma_unmap_addr(&rx_info->ena_buf, paddr), 913 dma_unmap_addr(&rx_info->ena_buf, paddr),
919 PAGE_SIZE, DMA_FROM_DEVICE); 914 ENA_PAGE_SIZE, DMA_FROM_DEVICE);
920 915
921 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, 916 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
922 rx_info->page_offset, len, PAGE_SIZE); 917 rx_info->page_offset, len, ENA_PAGE_SIZE);
923 918
924 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 919 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
925 "rx skb updated. len %d. data_len %d\n", 920 "rx skb updated. len %d. data_len %d\n",
@@ -1900,7 +1895,7 @@ static int ena_close(struct net_device *netdev)
1900 "Destroy failure, restarting device\n"); 1895 "Destroy failure, restarting device\n");
1901 ena_dump_stats_to_dmesg(adapter); 1896 ena_dump_stats_to_dmesg(adapter);
1902 /* rtnl lock already obtained in dev_ioctl() layer */ 1897 /* rtnl lock already obtained in dev_ioctl() layer */
1903 ena_destroy_device(adapter); 1898 ena_destroy_device(adapter, false);
1904 ena_restore_device(adapter); 1899 ena_restore_device(adapter);
1905 } 1900 }
1906 1901
@@ -2112,12 +2107,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2112 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, 2107 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
2113 tx_ring->ring_size); 2108 tx_ring->ring_size);
2114 2109
2115 /* This WMB is aimed to:
2116 * 1 - perform smp barrier before reading next_to_completion
2117 * 2 - make sure the desc were written before trigger DB
2118 */
2119 wmb();
2120
2121 /* stop the queue when no more space available, the packet can have up 2110 /* stop the queue when no more space available, the packet can have up
2122 * to sgl_size + 2. one for the meta descriptor and one for header 2111 * to sgl_size + 2. one for the meta descriptor and one for header
2123 * (if the header is larger than tx_max_header_size). 2112 * (if the header is larger than tx_max_header_size).
@@ -2136,10 +2125,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2136 * stop the queue but meanwhile clean_tx_irq updates 2125 * stop the queue but meanwhile clean_tx_irq updates
2137 * next_to_completion and terminates. 2126 * next_to_completion and terminates.
2138 * The queue will remain stopped forever. 2127 * The queue will remain stopped forever.
2139 * To solve this issue this function perform rmb, check 2128 * To solve this issue add a mb() to make sure that
2140 * the wakeup condition and wake up the queue if needed. 2129 * netif_tx_stop_queue() write is vissible before checking if
2130 * there is additional space in the queue.
2141 */ 2131 */
2142 smp_rmb(); 2132 smp_mb();
2143 2133
2144 if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq) 2134 if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
2145 > ENA_TX_WAKEUP_THRESH) { 2135 > ENA_TX_WAKEUP_THRESH) {
@@ -2151,8 +2141,10 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2151 } 2141 }
2152 2142
2153 if (netif_xmit_stopped(txq) || !skb->xmit_more) { 2143 if (netif_xmit_stopped(txq) || !skb->xmit_more) {
2154 /* trigger the dma engine */ 2144 /* trigger the dma engine. ena_com_write_sq_doorbell()
2155 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false); 2145 * has a mb
2146 */
2147 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
2156 u64_stats_update_begin(&tx_ring->syncp); 2148 u64_stats_update_begin(&tx_ring->syncp);
2157 tx_ring->tx_stats.doorbells++; 2149 tx_ring->tx_stats.doorbells++;
2158 u64_stats_update_end(&tx_ring->syncp); 2150 u64_stats_update_end(&tx_ring->syncp);
@@ -2550,12 +2542,15 @@ err_disable_msix:
2550 return rc; 2542 return rc;
2551} 2543}
2552 2544
2553static void ena_destroy_device(struct ena_adapter *adapter) 2545static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
2554{ 2546{
2555 struct net_device *netdev = adapter->netdev; 2547 struct net_device *netdev = adapter->netdev;
2556 struct ena_com_dev *ena_dev = adapter->ena_dev; 2548 struct ena_com_dev *ena_dev = adapter->ena_dev;
2557 bool dev_up; 2549 bool dev_up;
2558 2550
2551 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2552 return;
2553
2559 netif_carrier_off(netdev); 2554 netif_carrier_off(netdev);
2560 2555
2561 del_timer_sync(&adapter->timer_service); 2556 del_timer_sync(&adapter->timer_service);
@@ -2563,7 +2558,8 @@ static void ena_destroy_device(struct ena_adapter *adapter)
2563 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); 2558 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2564 adapter->dev_up_before_reset = dev_up; 2559 adapter->dev_up_before_reset = dev_up;
2565 2560
2566 ena_com_set_admin_running_state(ena_dev, false); 2561 if (!graceful)
2562 ena_com_set_admin_running_state(ena_dev, false);
2567 2563
2568 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 2564 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2569 ena_down(adapter); 2565 ena_down(adapter);
@@ -2591,6 +2587,7 @@ static void ena_destroy_device(struct ena_adapter *adapter)
2591 adapter->reset_reason = ENA_REGS_RESET_NORMAL; 2587 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
2592 2588
2593 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 2589 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2590 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2594} 2591}
2595 2592
2596static int ena_restore_device(struct ena_adapter *adapter) 2593static int ena_restore_device(struct ena_adapter *adapter)
@@ -2635,6 +2632,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
2635 } 2632 }
2636 } 2633 }
2637 2634
2635 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2638 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); 2636 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
2639 dev_err(&pdev->dev, "Device reset completed successfully\n"); 2637 dev_err(&pdev->dev, "Device reset completed successfully\n");
2640 2638
@@ -2665,7 +2663,7 @@ static void ena_fw_reset_device(struct work_struct *work)
2665 return; 2663 return;
2666 } 2664 }
2667 rtnl_lock(); 2665 rtnl_lock();
2668 ena_destroy_device(adapter); 2666 ena_destroy_device(adapter, false);
2669 ena_restore_device(adapter); 2667 ena_restore_device(adapter);
2670 rtnl_unlock(); 2668 rtnl_unlock();
2671} 2669}
@@ -3409,30 +3407,24 @@ static void ena_remove(struct pci_dev *pdev)
3409 netdev->rx_cpu_rmap = NULL; 3407 netdev->rx_cpu_rmap = NULL;
3410 } 3408 }
3411#endif /* CONFIG_RFS_ACCEL */ 3409#endif /* CONFIG_RFS_ACCEL */
3412
3413 unregister_netdev(netdev);
3414 del_timer_sync(&adapter->timer_service); 3410 del_timer_sync(&adapter->timer_service);
3415 3411
3416 cancel_work_sync(&adapter->reset_task); 3412 cancel_work_sync(&adapter->reset_task);
3417 3413
3418 /* Reset the device only if the device is running. */ 3414 unregister_netdev(netdev);
3419 if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3420 ena_com_dev_reset(ena_dev, adapter->reset_reason);
3421 3415
3422 ena_free_mgmnt_irq(adapter); 3416 /* If the device is running then we want to make sure the device will be
3417 * reset to make sure no more events will be issued by the device.
3418 */
3419 if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3420 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3423 3421
3424 ena_disable_msix(adapter); 3422 rtnl_lock();
3423 ena_destroy_device(adapter, true);
3424 rtnl_unlock();
3425 3425
3426 free_netdev(netdev); 3426 free_netdev(netdev);
3427 3427
3428 ena_com_mmio_reg_read_request_destroy(ena_dev);
3429
3430 ena_com_abort_admin_commands(ena_dev);
3431
3432 ena_com_wait_for_abort_completion(ena_dev);
3433
3434 ena_com_admin_destroy(ena_dev);
3435
3436 ena_com_rss_destroy(ena_dev); 3428 ena_com_rss_destroy(ena_dev);
3437 3429
3438 ena_com_delete_debug_area(ena_dev); 3430 ena_com_delete_debug_area(ena_dev);
@@ -3467,7 +3459,7 @@ static int ena_suspend(struct pci_dev *pdev, pm_message_t state)
3467 "ignoring device reset request as the device is being suspended\n"); 3459 "ignoring device reset request as the device is being suspended\n");
3468 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 3460 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3469 } 3461 }
3470 ena_destroy_device(adapter); 3462 ena_destroy_device(adapter, true);
3471 rtnl_unlock(); 3463 rtnl_unlock();
3472 return 0; 3464 return 0;
3473} 3465}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index f1972b5ab650..7c7ae56c52cf 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -355,4 +355,15 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
355 355
356int ena_get_sset_count(struct net_device *netdev, int sset); 356int ena_get_sset_count(struct net_device *netdev, int sset);
357 357
358/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
359 * driver passas 0.
360 * Since the max packet size the ENA handles is ~9kB limit the buffer length to
361 * 16kB.
362 */
363#if PAGE_SIZE > SZ_16K
364#define ENA_PAGE_SIZE SZ_16K
365#else
366#define ENA_PAGE_SIZE PAGE_SIZE
367#endif
368
358#endif /* !(ENA_H) */ 369#endif /* !(ENA_H) */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 8bb1e38b1681..177587f9c3f1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -5913,12 +5913,12 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
5913 return bp->hw_resc.max_cp_rings; 5913 return bp->hw_resc.max_cp_rings;
5914} 5914}
5915 5915
5916void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) 5916unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
5917{ 5917{
5918 bp->hw_resc.max_cp_rings = max; 5918 return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp);
5919} 5919}
5920 5920
5921unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 5921static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
5922{ 5922{
5923 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5923 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5924 5924
@@ -6684,6 +6684,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
6684 hw_resc->resv_rx_rings = 0; 6684 hw_resc->resv_rx_rings = 0;
6685 hw_resc->resv_hw_ring_grps = 0; 6685 hw_resc->resv_hw_ring_grps = 0;
6686 hw_resc->resv_vnics = 0; 6686 hw_resc->resv_vnics = 0;
6687 bp->tx_nr_rings = 0;
6688 bp->rx_nr_rings = 0;
6687 } 6689 }
6688 return rc; 6690 return rc;
6689} 6691}
@@ -8025,7 +8027,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
8025 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 8027 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
8026 return 0; 8028 return 0;
8027 8029
8028 rc = bnxt_approve_mac(bp, addr->sa_data); 8030 rc = bnxt_approve_mac(bp, addr->sa_data, true);
8029 if (rc) 8031 if (rc)
8030 return rc; 8032 return rc;
8031 8033
@@ -8629,7 +8631,8 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
8629 8631
8630 *max_tx = hw_resc->max_tx_rings; 8632 *max_tx = hw_resc->max_tx_rings;
8631 *max_rx = hw_resc->max_rx_rings; 8633 *max_rx = hw_resc->max_rx_rings;
8632 *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings); 8634 *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
8635 hw_resc->max_irqs);
8633 *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); 8636 *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
8634 max_ring_grps = hw_resc->max_hw_ring_grps; 8637 max_ring_grps = hw_resc->max_hw_ring_grps;
8635 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 8638 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@@ -8769,20 +8772,25 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
8769 if (bp->tx_nr_rings) 8772 if (bp->tx_nr_rings)
8770 return 0; 8773 return 0;
8771 8774
8775 bnxt_ulp_irq_stop(bp);
8776 bnxt_clear_int_mode(bp);
8772 rc = bnxt_set_dflt_rings(bp, true); 8777 rc = bnxt_set_dflt_rings(bp, true);
8773 if (rc) { 8778 if (rc) {
8774 netdev_err(bp->dev, "Not enough rings available.\n"); 8779 netdev_err(bp->dev, "Not enough rings available.\n");
8775 return rc; 8780 goto init_dflt_ring_err;
8776 } 8781 }
8777 rc = bnxt_init_int_mode(bp); 8782 rc = bnxt_init_int_mode(bp);
8778 if (rc) 8783 if (rc)
8779 return rc; 8784 goto init_dflt_ring_err;
8785
8780 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 8786 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8781 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { 8787 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
8782 bp->flags |= BNXT_FLAG_RFS; 8788 bp->flags |= BNXT_FLAG_RFS;
8783 bp->dev->features |= NETIF_F_NTUPLE; 8789 bp->dev->features |= NETIF_F_NTUPLE;
8784 } 8790 }
8785 return 0; 8791init_dflt_ring_err:
8792 bnxt_ulp_irq_restart(bp, rc);
8793 return rc;
8786} 8794}
8787 8795
8788int bnxt_restore_pf_fw_resources(struct bnxt *bp) 8796int bnxt_restore_pf_fw_resources(struct bnxt *bp)
@@ -8819,14 +8827,19 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
8819 } else { 8827 } else {
8820#ifdef CONFIG_BNXT_SRIOV 8828#ifdef CONFIG_BNXT_SRIOV
8821 struct bnxt_vf_info *vf = &bp->vf; 8829 struct bnxt_vf_info *vf = &bp->vf;
8830 bool strict_approval = true;
8822 8831
8823 if (is_valid_ether_addr(vf->mac_addr)) { 8832 if (is_valid_ether_addr(vf->mac_addr)) {
8824 /* overwrite netdev dev_addr with admin VF MAC */ 8833 /* overwrite netdev dev_addr with admin VF MAC */
8825 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); 8834 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
8835 /* Older PF driver or firmware may not approve this
8836 * correctly.
8837 */
8838 strict_approval = false;
8826 } else { 8839 } else {
8827 eth_hw_addr_random(bp->dev); 8840 eth_hw_addr_random(bp->dev);
8828 } 8841 }
8829 rc = bnxt_approve_mac(bp, bp->dev->dev_addr); 8842 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
8830#endif 8843#endif
8831 } 8844 }
8832 return rc; 8845 return rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index fefa011320e0..bde384630a75 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1481,8 +1481,7 @@ int bnxt_hwrm_set_coal(struct bnxt *);
1481unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); 1481unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
1482void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); 1482void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
1483unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); 1483unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
1484void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max); 1484unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp);
1485unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
1486int bnxt_get_avail_msix(struct bnxt *bp, int num); 1485int bnxt_get_avail_msix(struct bnxt *bp, int num);
1487int bnxt_reserve_rings(struct bnxt *bp); 1486int bnxt_reserve_rings(struct bnxt *bp);
1488void bnxt_tx_disable(struct bnxt *bp); 1487void bnxt_tx_disable(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 6d583bcd2a81..3962f6fd543c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -451,7 +451,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
451 451
452 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); 452 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
453 453
454 vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings; 454 vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
455 vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; 455 vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
456 if (bp->flags & BNXT_FLAG_AGG_RINGS) 456 if (bp->flags & BNXT_FLAG_AGG_RINGS)
457 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; 457 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
@@ -549,7 +549,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
549 max_stat_ctxs = hw_resc->max_stat_ctxs; 549 max_stat_ctxs = hw_resc->max_stat_ctxs;
550 550
551 /* Remaining rings are distributed equally amongs VF's for now */ 551 /* Remaining rings are distributed equally amongs VF's for now */
552 vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs; 552 vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) -
553 bp->cp_nr_rings) / num_vfs;
553 vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; 554 vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
554 if (bp->flags & BNXT_FLAG_AGG_RINGS) 555 if (bp->flags & BNXT_FLAG_AGG_RINGS)
555 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / 556 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
@@ -643,7 +644,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
643 */ 644 */
644 vfs_supported = *num_vfs; 645 vfs_supported = *num_vfs;
645 646
646 avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings; 647 avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
647 avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; 648 avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
648 avail_cp = min_t(int, avail_cp, avail_stat); 649 avail_cp = min_t(int, avail_cp, avail_stat);
649 650
@@ -1103,7 +1104,7 @@ update_vf_mac_exit:
1103 mutex_unlock(&bp->hwrm_cmd_lock); 1104 mutex_unlock(&bp->hwrm_cmd_lock);
1104} 1105}
1105 1106
1106int bnxt_approve_mac(struct bnxt *bp, u8 *mac) 1107int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1107{ 1108{
1108 struct hwrm_func_vf_cfg_input req = {0}; 1109 struct hwrm_func_vf_cfg_input req = {0};
1109 int rc = 0; 1110 int rc = 0;
@@ -1121,12 +1122,13 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
1121 memcpy(req.dflt_mac_addr, mac, ETH_ALEN); 1122 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
1122 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1123 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1123mac_done: 1124mac_done:
1124 if (rc) { 1125 if (rc && strict) {
1125 rc = -EADDRNOTAVAIL; 1126 rc = -EADDRNOTAVAIL;
1126 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", 1127 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
1127 mac); 1128 mac);
1129 return rc;
1128 } 1130 }
1129 return rc; 1131 return 0;
1130} 1132}
1131#else 1133#else
1132 1134
@@ -1143,7 +1145,7 @@ void bnxt_update_vf_mac(struct bnxt *bp)
1143{ 1145{
1144} 1146}
1145 1147
1146int bnxt_approve_mac(struct bnxt *bp, u8 *mac) 1148int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1147{ 1149{
1148 return 0; 1150 return 0;
1149} 1151}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index e9b20cd19881..2eed9eda1195 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -39,5 +39,5 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
39void bnxt_sriov_disable(struct bnxt *); 39void bnxt_sriov_disable(struct bnxt *);
40void bnxt_hwrm_exec_fwd_req(struct bnxt *); 40void bnxt_hwrm_exec_fwd_req(struct bnxt *);
41void bnxt_update_vf_mac(struct bnxt *); 41void bnxt_update_vf_mac(struct bnxt *);
42int bnxt_approve_mac(struct bnxt *, u8 *); 42int bnxt_approve_mac(struct bnxt *, u8 *, bool);
43#endif 43#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 139d96c5a023..092c817f8f11 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -110,16 +110,14 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
110 struct tcf_exts *tc_exts) 110 struct tcf_exts *tc_exts)
111{ 111{
112 const struct tc_action *tc_act; 112 const struct tc_action *tc_act;
113 LIST_HEAD(tc_actions); 113 int i, rc;
114 int rc;
115 114
116 if (!tcf_exts_has_actions(tc_exts)) { 115 if (!tcf_exts_has_actions(tc_exts)) {
117 netdev_info(bp->dev, "no actions"); 116 netdev_info(bp->dev, "no actions");
118 return -EINVAL; 117 return -EINVAL;
119 } 118 }
120 119
121 tcf_exts_to_list(tc_exts, &tc_actions); 120 tcf_exts_for_each_action(i, tc_act, tc_exts) {
122 list_for_each_entry(tc_act, &tc_actions, list) {
123 /* Drop action */ 121 /* Drop action */
124 if (is_tcf_gact_shot(tc_act)) { 122 if (is_tcf_gact_shot(tc_act)) {
125 actions->flags |= BNXT_TC_ACTION_FLAG_DROP; 123 actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index c37b2842f972..beee61292d5e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
169 edev->ulp_tbl[ulp_id].msix_requested = avail_msix; 169 edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
170 } 170 }
171 bnxt_fill_msix_vecs(bp, ent); 171 bnxt_fill_msix_vecs(bp, ent);
172 bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
173 edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; 172 edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
174 return avail_msix; 173 return avail_msix;
175} 174}
@@ -178,7 +177,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
178{ 177{
179 struct net_device *dev = edev->net; 178 struct net_device *dev = edev->net;
180 struct bnxt *bp = netdev_priv(dev); 179 struct bnxt *bp = netdev_priv(dev);
181 int max_cp_rings, msix_requested;
182 180
183 ASSERT_RTNL(); 181 ASSERT_RTNL();
184 if (ulp_id != BNXT_ROCE_ULP) 182 if (ulp_id != BNXT_ROCE_ULP)
@@ -187,9 +185,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
187 if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) 185 if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
188 return 0; 186 return 0;
189 187
190 max_cp_rings = bnxt_get_max_func_cp_rings(bp);
191 msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
192 bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
193 edev->ulp_tbl[ulp_id].msix_requested = 0; 188 edev->ulp_tbl[ulp_id].msix_requested = 0;
194 edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; 189 edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
195 if (netif_running(dev)) { 190 if (netif_running(dev)) {
@@ -220,21 +215,6 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
220 return 0; 215 return 0;
221} 216}
222 217
223void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id)
224{
225 ASSERT_RTNL();
226 if (bnxt_ulp_registered(bp->edev, ulp_id)) {
227 struct bnxt_en_dev *edev = bp->edev;
228 unsigned int msix_req, max;
229
230 msix_req = edev->ulp_tbl[ulp_id].msix_requested;
231 max = bnxt_get_max_func_cp_rings(bp);
232 bnxt_set_max_func_cp_rings(bp, max - msix_req);
233 max = bnxt_get_max_func_stat_ctxs(bp);
234 bnxt_set_max_func_stat_ctxs(bp, max - 1);
235 }
236}
237
238static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, 218static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
239 struct bnxt_fw_msg *fw_msg) 219 struct bnxt_fw_msg *fw_msg)
240{ 220{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index df48ac71729f..d9bea37cd211 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -90,7 +90,6 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
90 90
91int bnxt_get_ulp_msix_num(struct bnxt *bp); 91int bnxt_get_ulp_msix_num(struct bnxt *bp);
92int bnxt_get_ulp_msix_base(struct bnxt *bp); 92int bnxt_get_ulp_msix_base(struct bnxt *bp);
93void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id);
94void bnxt_ulp_stop(struct bnxt *bp); 93void bnxt_ulp_stop(struct bnxt *bp);
95void bnxt_ulp_start(struct bnxt *bp); 94void bnxt_ulp_start(struct bnxt *bp);
96void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); 95void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index b773bc07edf7..14b49612aa86 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -186,6 +186,9 @@ struct bcmgenet_mib_counters {
186#define UMAC_MAC1 0x010 186#define UMAC_MAC1 0x010
187#define UMAC_MAX_FRAME_LEN 0x014 187#define UMAC_MAX_FRAME_LEN 0x014
188 188
189#define UMAC_MODE 0x44
190#define MODE_LINK_STATUS (1 << 5)
191
189#define UMAC_EEE_CTRL 0x064 192#define UMAC_EEE_CTRL 0x064
190#define EN_LPI_RX_PAUSE (1 << 0) 193#define EN_LPI_RX_PAUSE (1 << 0)
191#define EN_LPI_TX_PFC (1 << 1) 194#define EN_LPI_TX_PFC (1 << 1)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 5333274a283c..4241ae928d4a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -115,8 +115,14 @@ void bcmgenet_mii_setup(struct net_device *dev)
115static int bcmgenet_fixed_phy_link_update(struct net_device *dev, 115static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
116 struct fixed_phy_status *status) 116 struct fixed_phy_status *status)
117{ 117{
118 if (dev && dev->phydev && status) 118 struct bcmgenet_priv *priv;
119 status->link = dev->phydev->link; 119 u32 reg;
120
121 if (dev && dev->phydev && status) {
122 priv = netdev_priv(dev);
123 reg = bcmgenet_umac_readl(priv, UMAC_MODE);
124 status->link = !!(reg & MODE_LINK_STATUS);
125 }
120 126
121 return 0; 127 return 0;
122} 128}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index dc09f9a8a49b..f1a86b422617 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -482,11 +482,6 @@ static int macb_mii_probe(struct net_device *dev)
482 482
483 if (np) { 483 if (np) {
484 if (of_phy_is_fixed_link(np)) { 484 if (of_phy_is_fixed_link(np)) {
485 if (of_phy_register_fixed_link(np) < 0) {
486 dev_err(&bp->pdev->dev,
487 "broken fixed-link specification\n");
488 return -ENODEV;
489 }
490 bp->phy_node = of_node_get(np); 485 bp->phy_node = of_node_get(np);
491 } else { 486 } else {
492 bp->phy_node = of_parse_phandle(np, "phy-handle", 0); 487 bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
@@ -569,7 +564,7 @@ static int macb_mii_init(struct macb *bp)
569{ 564{
570 struct macb_platform_data *pdata; 565 struct macb_platform_data *pdata;
571 struct device_node *np; 566 struct device_node *np;
572 int err; 567 int err = -ENXIO;
573 568
574 /* Enable management port */ 569 /* Enable management port */
575 macb_writel(bp, NCR, MACB_BIT(MPE)); 570 macb_writel(bp, NCR, MACB_BIT(MPE));
@@ -592,12 +587,23 @@ static int macb_mii_init(struct macb *bp)
592 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); 587 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
593 588
594 np = bp->pdev->dev.of_node; 589 np = bp->pdev->dev.of_node;
595 if (pdata) 590 if (np && of_phy_is_fixed_link(np)) {
596 bp->mii_bus->phy_mask = pdata->phy_mask; 591 if (of_phy_register_fixed_link(np) < 0) {
592 dev_err(&bp->pdev->dev,
593 "broken fixed-link specification %pOF\n", np);
594 goto err_out_free_mdiobus;
595 }
596
597 err = mdiobus_register(bp->mii_bus);
598 } else {
599 if (pdata)
600 bp->mii_bus->phy_mask = pdata->phy_mask;
601
602 err = of_mdiobus_register(bp->mii_bus, np);
603 }
597 604
598 err = of_mdiobus_register(bp->mii_bus, np);
599 if (err) 605 if (err)
600 goto err_out_free_mdiobus; 606 goto err_out_free_fixed_link;
601 607
602 err = macb_mii_probe(bp->dev); 608 err = macb_mii_probe(bp->dev);
603 if (err) 609 if (err)
@@ -607,6 +613,7 @@ static int macb_mii_init(struct macb *bp)
607 613
608err_out_unregister_bus: 614err_out_unregister_bus:
609 mdiobus_unregister(bp->mii_bus); 615 mdiobus_unregister(bp->mii_bus);
616err_out_free_fixed_link:
610 if (np && of_phy_is_fixed_link(np)) 617 if (np && of_phy_is_fixed_link(np))
611 of_phy_deregister_fixed_link(np); 618 of_phy_deregister_fixed_link(np);
612err_out_free_mdiobus: 619err_out_free_mdiobus:
@@ -642,7 +649,7 @@ static int macb_halt_tx(struct macb *bp)
642 if (!(status & MACB_BIT(TGO))) 649 if (!(status & MACB_BIT(TGO)))
643 return 0; 650 return 0;
644 651
645 usleep_range(10, 250); 652 udelay(250);
646 } while (time_before(halt_time, timeout)); 653 } while (time_before(halt_time, timeout));
647 654
648 return -ETIMEDOUT; 655 return -ETIMEDOUT;
@@ -2028,14 +2035,17 @@ static void macb_reset_hw(struct macb *bp)
2028{ 2035{
2029 struct macb_queue *queue; 2036 struct macb_queue *queue;
2030 unsigned int q; 2037 unsigned int q;
2038 u32 ctrl = macb_readl(bp, NCR);
2031 2039
2032 /* Disable RX and TX (XXX: Should we halt the transmission 2040 /* Disable RX and TX (XXX: Should we halt the transmission
2033 * more gracefully?) 2041 * more gracefully?)
2034 */ 2042 */
2035 macb_writel(bp, NCR, 0); 2043 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
2036 2044
2037 /* Clear the stats registers (XXX: Update stats first?) */ 2045 /* Clear the stats registers (XXX: Update stats first?) */
2038 macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); 2046 ctrl |= MACB_BIT(CLRSTAT);
2047
2048 macb_writel(bp, NCR, ctrl);
2039 2049
2040 /* Clear all status flags */ 2050 /* Clear all status flags */
2041 macb_writel(bp, TSR, -1); 2051 macb_writel(bp, TSR, -1);
@@ -2223,7 +2233,7 @@ static void macb_init_hw(struct macb *bp)
2223 } 2233 }
2224 2234
2225 /* Enable TX and RX */ 2235 /* Enable TX and RX */
2226 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); 2236 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
2227} 2237}
2228 2238
2229/* The hash address register is 64 bits long and takes up two 2239/* The hash address register is 64 bits long and takes up two
@@ -3827,6 +3837,13 @@ static const struct macb_config at91sam9260_config = {
3827 .init = macb_init, 3837 .init = macb_init,
3828}; 3838};
3829 3839
3840static const struct macb_config sama5d3macb_config = {
3841 .caps = MACB_CAPS_SG_DISABLED
3842 | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3843 .clk_init = macb_clk_init,
3844 .init = macb_init,
3845};
3846
3830static const struct macb_config pc302gem_config = { 3847static const struct macb_config pc302gem_config = {
3831 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, 3848 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
3832 .dma_burst_length = 16, 3849 .dma_burst_length = 16,
@@ -3894,6 +3911,7 @@ static const struct of_device_id macb_dt_ids[] = {
3894 { .compatible = "cdns,gem", .data = &pc302gem_config }, 3911 { .compatible = "cdns,gem", .data = &pc302gem_config },
3895 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, 3912 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
3896 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, 3913 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
3914 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
3897 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, 3915 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
3898 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, 3916 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
3899 { .compatible = "cdns,emac", .data = &emac_config }, 3917 { .compatible = "cdns,emac", .data = &emac_config },
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 623f73dd7738..c116f96956fe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -417,10 +417,9 @@ static void cxgb4_process_flow_actions(struct net_device *in,
417 struct ch_filter_specification *fs) 417 struct ch_filter_specification *fs)
418{ 418{
419 const struct tc_action *a; 419 const struct tc_action *a;
420 LIST_HEAD(actions); 420 int i;
421 421
422 tcf_exts_to_list(cls->exts, &actions); 422 tcf_exts_for_each_action(i, a, cls->exts) {
423 list_for_each_entry(a, &actions, list) {
424 if (is_tcf_gact_ok(a)) { 423 if (is_tcf_gact_ok(a)) {
425 fs->action = FILTER_PASS; 424 fs->action = FILTER_PASS;
426 } else if (is_tcf_gact_shot(a)) { 425 } else if (is_tcf_gact_shot(a)) {
@@ -591,10 +590,9 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
591 bool act_redir = false; 590 bool act_redir = false;
592 bool act_pedit = false; 591 bool act_pedit = false;
593 bool act_vlan = false; 592 bool act_vlan = false;
594 LIST_HEAD(actions); 593 int i;
595 594
596 tcf_exts_to_list(cls->exts, &actions); 595 tcf_exts_for_each_action(i, a, cls->exts) {
597 list_for_each_entry(a, &actions, list) {
598 if (is_tcf_gact_ok(a)) { 596 if (is_tcf_gact_ok(a)) {
599 /* Do nothing */ 597 /* Do nothing */
600 } else if (is_tcf_gact_shot(a)) { 598 } else if (is_tcf_gact_shot(a)) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index 18eb2aedd4cb..c7d2b4dc7568 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -93,14 +93,13 @@ static int fill_action_fields(struct adapter *adap,
93 unsigned int num_actions = 0; 93 unsigned int num_actions = 0;
94 const struct tc_action *a; 94 const struct tc_action *a;
95 struct tcf_exts *exts; 95 struct tcf_exts *exts;
96 LIST_HEAD(actions); 96 int i;
97 97
98 exts = cls->knode.exts; 98 exts = cls->knode.exts;
99 if (!tcf_exts_has_actions(exts)) 99 if (!tcf_exts_has_actions(exts))
100 return -EINVAL; 100 return -EINVAL;
101 101
102 tcf_exts_to_list(exts, &actions); 102 tcf_exts_for_each_action(i, a, exts) {
103 list_for_each_entry(a, &actions, list) {
104 /* Don't allow more than one action per rule. */ 103 /* Don't allow more than one action per rule. */
105 if (num_actions) 104 if (num_actions)
106 return -EINVAL; 105 return -EINVAL;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index ff92ab1daeb8..1e9d882c04ef 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -4500,7 +4500,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
4500 port_res->max_vfs += le16_to_cpu(pcie->num_vfs); 4500 port_res->max_vfs += le16_to_cpu(pcie->num_vfs);
4501 } 4501 }
4502 } 4502 }
4503 return status; 4503 goto err;
4504 } 4504 }
4505 4505
4506 pcie = be_get_pcie_desc(resp->func_param, desc_count, 4506 pcie = be_get_pcie_desc(resp->func_param, desc_count,
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index fa5b30f547f6..08a750fb60c4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -220,10 +220,10 @@ struct hnae_desc_cb {
220 220
221 /* priv data for the desc, e.g. skb when use with ip stack*/ 221 /* priv data for the desc, e.g. skb when use with ip stack*/
222 void *priv; 222 void *priv;
223 u16 page_offset; 223 u32 page_offset;
224 u16 reuse_flag; 224 u32 length; /* length of the buffer */
225 225
226 u16 length; /* length of the buffer */ 226 u16 reuse_flag;
227 227
228 /* desc type, used by the ring user to mark the type of the priv data */ 228 /* desc type, used by the ring user to mark the type of the priv data */
229 u16 type; 229 u16 type;
@@ -486,6 +486,8 @@ struct hnae_ae_ops {
486 u8 *auto_neg, u16 *speed, u8 *duplex); 486 u8 *auto_neg, u16 *speed, u8 *duplex);
487 void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val); 487 void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
488 void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex); 488 void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
489 bool (*need_adjust_link)(struct hnae_handle *handle,
490 int speed, int duplex);
489 int (*set_loopback)(struct hnae_handle *handle, 491 int (*set_loopback)(struct hnae_handle *handle,
490 enum hnae_loop loop_mode, int en); 492 enum hnae_loop loop_mode, int en);
491 void (*get_ring_bdnum_limit)(struct hnae_queue *queue, 493 void (*get_ring_bdnum_limit)(struct hnae_queue *queue,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index e6aad30e7e69..b52029e26d15 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -155,6 +155,41 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
155 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; 155 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
156} 156}
157 157
158static int hns_ae_wait_flow_down(struct hnae_handle *handle)
159{
160 struct dsaf_device *dsaf_dev;
161 struct hns_ppe_cb *ppe_cb;
162 struct hnae_vf_cb *vf_cb;
163 int ret;
164 int i;
165
166 for (i = 0; i < handle->q_num; i++) {
167 ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]);
168 if (ret)
169 return ret;
170 }
171
172 ppe_cb = hns_get_ppe_cb(handle);
173 ret = hns_ppe_wait_tx_fifo_clean(ppe_cb);
174 if (ret)
175 return ret;
176
177 dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
178 if (!dsaf_dev)
179 return -EINVAL;
180 ret = hns_dsaf_wait_pkt_clean(dsaf_dev, handle->dport_id);
181 if (ret)
182 return ret;
183
184 vf_cb = hns_ae_get_vf_cb(handle);
185 ret = hns_mac_wait_fifo_clean(vf_cb->mac_cb);
186 if (ret)
187 return ret;
188
189 mdelay(10);
190 return 0;
191}
192
158static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val) 193static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
159{ 194{
160 int q_num = handle->q_num; 195 int q_num = handle->q_num;
@@ -399,12 +434,41 @@ static int hns_ae_get_mac_info(struct hnae_handle *handle,
399 return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex); 434 return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex);
400} 435}
401 436
437static bool hns_ae_need_adjust_link(struct hnae_handle *handle, int speed,
438 int duplex)
439{
440 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
441
442 return hns_mac_need_adjust_link(mac_cb, speed, duplex);
443}
444
402static void hns_ae_adjust_link(struct hnae_handle *handle, int speed, 445static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
403 int duplex) 446 int duplex)
404{ 447{
405 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); 448 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
406 449
407 hns_mac_adjust_link(mac_cb, speed, duplex); 450 switch (mac_cb->dsaf_dev->dsaf_ver) {
451 case AE_VERSION_1:
452 hns_mac_adjust_link(mac_cb, speed, duplex);
453 break;
454
455 case AE_VERSION_2:
456 /* chip need to clear all pkt inside */
457 hns_mac_disable(mac_cb, MAC_COMM_MODE_RX);
458 if (hns_ae_wait_flow_down(handle)) {
459 hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
460 break;
461 }
462
463 hns_mac_adjust_link(mac_cb, speed, duplex);
464 hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
465 break;
466
467 default:
468 break;
469 }
470
471 return;
408} 472}
409 473
410static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue, 474static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
@@ -902,6 +966,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
902 .get_status = hns_ae_get_link_status, 966 .get_status = hns_ae_get_link_status,
903 .get_info = hns_ae_get_mac_info, 967 .get_info = hns_ae_get_mac_info,
904 .adjust_link = hns_ae_adjust_link, 968 .adjust_link = hns_ae_adjust_link,
969 .need_adjust_link = hns_ae_need_adjust_link,
905 .set_loopback = hns_ae_config_loopback, 970 .set_loopback = hns_ae_config_loopback,
906 .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit, 971 .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
907 .get_pauseparam = hns_ae_get_pauseparam, 972 .get_pauseparam = hns_ae_get_pauseparam,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 5488c6e89f21..09e4061d1fa6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -257,6 +257,16 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
257 *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B); 257 *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B);
258} 258}
259 259
260static bool hns_gmac_need_adjust_link(void *mac_drv, enum mac_speed speed,
261 int duplex)
262{
263 struct mac_driver *drv = (struct mac_driver *)mac_drv;
264 struct hns_mac_cb *mac_cb = drv->mac_cb;
265
266 return (mac_cb->speed != speed) ||
267 (mac_cb->half_duplex == duplex);
268}
269
260static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed, 270static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
261 u32 full_duplex) 271 u32 full_duplex)
262{ 272{
@@ -309,6 +319,30 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en)
309 hns_gmac_set_uc_match(mac_drv, en); 319 hns_gmac_set_uc_match(mac_drv, en);
310} 320}
311 321
322int hns_gmac_wait_fifo_clean(void *mac_drv)
323{
324 struct mac_driver *drv = (struct mac_driver *)mac_drv;
325 int wait_cnt;
326 u32 val;
327
328 wait_cnt = 0;
329 while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
330 val = dsaf_read_dev(drv, GMAC_FIFO_STATE_REG);
331 /* bit5~bit0 is not send complete pkts */
332 if ((val & 0x3f) == 0)
333 break;
334 usleep_range(100, 200);
335 }
336
337 if (wait_cnt >= HNS_MAX_WAIT_CNT) {
338 dev_err(drv->dev,
339 "hns ge %d fifo was not idle.\n", drv->mac_id);
340 return -EBUSY;
341 }
342
343 return 0;
344}
345
312static void hns_gmac_init(void *mac_drv) 346static void hns_gmac_init(void *mac_drv)
313{ 347{
314 u32 port; 348 u32 port;
@@ -690,6 +724,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
690 mac_drv->mac_disable = hns_gmac_disable; 724 mac_drv->mac_disable = hns_gmac_disable;
691 mac_drv->mac_free = hns_gmac_free; 725 mac_drv->mac_free = hns_gmac_free;
692 mac_drv->adjust_link = hns_gmac_adjust_link; 726 mac_drv->adjust_link = hns_gmac_adjust_link;
727 mac_drv->need_adjust_link = hns_gmac_need_adjust_link;
693 mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames; 728 mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames;
694 mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length; 729 mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length;
695 mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg; 730 mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg;
@@ -717,6 +752,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
717 mac_drv->get_strings = hns_gmac_get_strings; 752 mac_drv->get_strings = hns_gmac_get_strings;
718 mac_drv->update_stats = hns_gmac_update_stats; 753 mac_drv->update_stats = hns_gmac_update_stats;
719 mac_drv->set_promiscuous = hns_gmac_set_promisc; 754 mac_drv->set_promiscuous = hns_gmac_set_promisc;
755 mac_drv->wait_fifo_clean = hns_gmac_wait_fifo_clean;
720 756
721 return (void *)mac_drv; 757 return (void *)mac_drv;
722} 758}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 1c2326bd76e2..6ed6f142427e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -114,6 +114,26 @@ int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
114 return 0; 114 return 0;
115} 115}
116 116
117/**
118 *hns_mac_is_adjust_link - check is need change mac speed and duplex register
119 *@mac_cb: mac device
120 *@speed: phy device speed
121 *@duplex:phy device duplex
122 *
123 */
124bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
125{
126 struct mac_driver *mac_ctrl_drv;
127
128 mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac);
129
130 if (mac_ctrl_drv->need_adjust_link)
131 return mac_ctrl_drv->need_adjust_link(mac_ctrl_drv,
132 (enum mac_speed)speed, duplex);
133 else
134 return true;
135}
136
117void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) 137void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
118{ 138{
119 int ret; 139 int ret;
@@ -430,6 +450,16 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
430 return 0; 450 return 0;
431} 451}
432 452
453int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb)
454{
455 struct mac_driver *drv = hns_mac_get_drv(mac_cb);
456
457 if (drv->wait_fifo_clean)
458 return drv->wait_fifo_clean(drv);
459
460 return 0;
461}
462
433void hns_mac_reset(struct hns_mac_cb *mac_cb) 463void hns_mac_reset(struct hns_mac_cb *mac_cb)
434{ 464{
435 struct mac_driver *drv = hns_mac_get_drv(mac_cb); 465 struct mac_driver *drv = hns_mac_get_drv(mac_cb);
@@ -998,6 +1028,20 @@ static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev)
998 return DSAF_MAX_PORT_NUM; 1028 return DSAF_MAX_PORT_NUM;
999} 1029}
1000 1030
1031void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
1032{
1033 struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
1034
1035 mac_ctrl_drv->mac_enable(mac_cb->priv.mac, mode);
1036}
1037
1038void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
1039{
1040 struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
1041
1042 mac_ctrl_drv->mac_disable(mac_cb->priv.mac, mode);
1043}
1044
1001/** 1045/**
1002 * hns_mac_init - init mac 1046 * hns_mac_init - init mac
1003 * @dsaf_dev: dsa fabric device struct pointer 1047 * @dsaf_dev: dsa fabric device struct pointer
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index bbc0a98e7ca3..fbc75341bef7 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -356,6 +356,9 @@ struct mac_driver {
356 /*adjust mac mode of port,include speed and duplex*/ 356 /*adjust mac mode of port,include speed and duplex*/
357 int (*adjust_link)(void *mac_drv, enum mac_speed speed, 357 int (*adjust_link)(void *mac_drv, enum mac_speed speed,
358 u32 full_duplex); 358 u32 full_duplex);
359 /* need adjust link */
360 bool (*need_adjust_link)(void *mac_drv, enum mac_speed speed,
361 int duplex);
359 /* config autoegotaite mode of port*/ 362 /* config autoegotaite mode of port*/
360 void (*set_an_mode)(void *mac_drv, u8 enable); 363 void (*set_an_mode)(void *mac_drv, u8 enable);
361 /* config loopbank mode */ 364 /* config loopbank mode */
@@ -394,6 +397,7 @@ struct mac_driver {
394 void (*get_info)(void *mac_drv, struct mac_info *mac_info); 397 void (*get_info)(void *mac_drv, struct mac_info *mac_info);
395 398
396 void (*update_stats)(void *mac_drv); 399 void (*update_stats)(void *mac_drv);
400 int (*wait_fifo_clean)(void *mac_drv);
397 401
398 enum mac_mode mac_mode; 402 enum mac_mode mac_mode;
399 u8 mac_id; 403 u8 mac_id;
@@ -427,6 +431,7 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb,
427 431
428int hns_mac_init(struct dsaf_device *dsaf_dev); 432int hns_mac_init(struct dsaf_device *dsaf_dev);
429void mac_adjust_link(struct net_device *net_dev); 433void mac_adjust_link(struct net_device *net_dev);
434bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
430void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status); 435void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status);
431int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr); 436int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr);
432int hns_mac_set_multi(struct hns_mac_cb *mac_cb, 437int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
@@ -463,5 +468,8 @@ int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
463int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id, 468int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
464 const unsigned char *addr); 469 const unsigned char *addr);
465int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn); 470int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn);
471void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
472void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
473int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb);
466 474
467#endif /* _HNS_DSAF_MAC_H */ 475#endif /* _HNS_DSAF_MAC_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index ca50c2553a9c..e557a4ef5996 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -2727,6 +2727,35 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
2727 soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX; 2727 soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
2728} 2728}
2729 2729
2730int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
2731{
2732 u32 val, val_tmp;
2733 int wait_cnt;
2734
2735 if (port >= DSAF_SERVICE_NW_NUM)
2736 return 0;
2737
2738 wait_cnt = 0;
2739 while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
2740 val = dsaf_read_dev(dsaf_dev, DSAF_VOQ_IN_PKT_NUM_0_REG +
2741 (port + DSAF_XGE_NUM) * 0x40);
2742 val_tmp = dsaf_read_dev(dsaf_dev, DSAF_VOQ_OUT_PKT_NUM_0_REG +
2743 (port + DSAF_XGE_NUM) * 0x40);
2744 if (val == val_tmp)
2745 break;
2746
2747 usleep_range(100, 200);
2748 }
2749
2750 if (wait_cnt >= HNS_MAX_WAIT_CNT) {
2751 dev_err(dsaf_dev->dev, "hns dsaf clean wait timeout(%u - %u).\n",
2752 val, val_tmp);
2753 return -EBUSY;
2754 }
2755
2756 return 0;
2757}
2758
2730/** 2759/**
2731 * dsaf_probe - probo dsaf dev 2760 * dsaf_probe - probo dsaf dev
2732 * @pdev: dasf platform device 2761 * @pdev: dasf platform device
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 4507e8222683..0e1cd99831a6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -44,6 +44,8 @@ struct hns_mac_cb;
44#define DSAF_ROCE_CREDIT_CHN 8 44#define DSAF_ROCE_CREDIT_CHN 8
45#define DSAF_ROCE_CHAN_MODE 3 45#define DSAF_ROCE_CHAN_MODE 3
46 46
47#define HNS_MAX_WAIT_CNT 10000
48
47enum dsaf_roce_port_mode { 49enum dsaf_roce_port_mode {
48 DSAF_ROCE_6PORT_MODE, 50 DSAF_ROCE_6PORT_MODE,
49 DSAF_ROCE_4PORT_MODE, 51 DSAF_ROCE_4PORT_MODE,
@@ -463,5 +465,6 @@ int hns_dsaf_rm_mac_addr(
463 465
464int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, 466int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
465 u8 mac_id, u8 port_num); 467 u8 mac_id, u8 port_num);
468int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
466 469
467#endif /* __HNS_DSAF_MAIN_H__ */ 470#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index d160d8c9e45b..0942e4916d9d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -275,6 +275,29 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
275 dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk); 275 dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk);
276} 276}
277 277
278int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb)
279{
280 int wait_cnt;
281 u32 val;
282
283 wait_cnt = 0;
284 while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
285 val = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG) & 0x3ffU;
286 if (!val)
287 break;
288
289 usleep_range(100, 200);
290 }
291
292 if (wait_cnt >= HNS_MAX_WAIT_CNT) {
293 dev_err(ppe_cb->dev, "hns ppe tx fifo clean wait timeout, still has %u pkt.\n",
294 val);
295 return -EBUSY;
296 }
297
298 return 0;
299}
300
278/** 301/**
279 * ppe_init_hw - init ppe 302 * ppe_init_hw - init ppe
280 * @ppe_cb: ppe device 303 * @ppe_cb: ppe device
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index 9d8e643e8aa6..f670e63a5a01 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -100,6 +100,7 @@ struct ppe_common_cb {
100 100
101}; 101};
102 102
103int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb);
103int hns_ppe_init(struct dsaf_device *dsaf_dev); 104int hns_ppe_init(struct dsaf_device *dsaf_dev);
104 105
105void hns_ppe_uninit(struct dsaf_device *dsaf_dev); 106void hns_ppe_uninit(struct dsaf_device *dsaf_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 9d76e2e54f9d..5d64519b9b1d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -66,6 +66,29 @@ void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
66 "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num); 66 "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
67} 67}
68 68
69int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs)
70{
71 u32 head, tail;
72 int wait_cnt;
73
74 tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL);
75 wait_cnt = 0;
76 while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
77 head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD);
78 if (tail == head)
79 break;
80
81 usleep_range(100, 200);
82 }
83
84 if (wait_cnt >= HNS_MAX_WAIT_CNT) {
85 dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n");
86 return -EBUSY;
87 }
88
89 return 0;
90}
91
69/** 92/**
70 *hns_rcb_reset_ring_hw - ring reset 93 *hns_rcb_reset_ring_hw - ring reset
71 *@q: ring struct pointer 94 *@q: ring struct pointer
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index 602816498c8d..2319b772a271 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -136,6 +136,7 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
136void hns_rcb_init_hw(struct ring_pair_cb *ring); 136void hns_rcb_init_hw(struct ring_pair_cb *ring);
137void hns_rcb_reset_ring_hw(struct hnae_queue *q); 137void hns_rcb_reset_ring_hw(struct hnae_queue *q);
138void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); 138void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
139int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs);
139u32 hns_rcb_get_rx_coalesced_frames( 140u32 hns_rcb_get_rx_coalesced_frames(
140 struct rcb_common_cb *rcb_common, u32 port_idx); 141 struct rcb_common_cb *rcb_common, u32 port_idx);
141u32 hns_rcb_get_tx_coalesced_frames( 142u32 hns_rcb_get_tx_coalesced_frames(
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 886cbbf25761..74d935d82cbc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -464,6 +464,7 @@
464#define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4 464#define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4
465#define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8 465#define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8
466 466
467#define GMAC_FIFO_STATE_REG 0x0000UL
467#define GMAC_DUPLEX_TYPE_REG 0x0008UL 468#define GMAC_DUPLEX_TYPE_REG 0x0008UL
468#define GMAC_FD_FC_TYPE_REG 0x000CUL 469#define GMAC_FD_FC_TYPE_REG 0x000CUL
469#define GMAC_TX_WATER_LINE_REG 0x0010UL 470#define GMAC_TX_WATER_LINE_REG 0x0010UL
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 9f2b552aee33..f56855e63c96 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -406,113 +406,13 @@ out_net_tx_busy:
406 return NETDEV_TX_BUSY; 406 return NETDEV_TX_BUSY;
407} 407}
408 408
409/**
410 * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
411 * @data: pointer to the start of the headers
412 * @max: total length of section to find headers in
413 *
414 * This function is meant to determine the length of headers that will
415 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
416 * motivation of doing this is to only perform one pull for IPv4 TCP
417 * packets so that we can do basic things like calculating the gso_size
418 * based on the average data per packet.
419 **/
420static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
421 unsigned int max_size)
422{
423 unsigned char *network;
424 u8 hlen;
425
426 /* this should never happen, but better safe than sorry */
427 if (max_size < ETH_HLEN)
428 return max_size;
429
430 /* initialize network frame pointer */
431 network = data;
432
433 /* set first protocol and move network header forward */
434 network += ETH_HLEN;
435
436 /* handle any vlan tag if present */
437 if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
438 == HNS_RX_FLAG_VLAN_PRESENT) {
439 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
440 return max_size;
441
442 network += VLAN_HLEN;
443 }
444
445 /* handle L3 protocols */
446 if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
447 == HNS_RX_FLAG_L3ID_IPV4) {
448 if ((typeof(max_size))(network - data) >
449 (max_size - sizeof(struct iphdr)))
450 return max_size;
451
452 /* access ihl as a u8 to avoid unaligned access on ia64 */
453 hlen = (network[0] & 0x0F) << 2;
454
455 /* verify hlen meets minimum size requirements */
456 if (hlen < sizeof(struct iphdr))
457 return network - data;
458
459 /* record next protocol if header is present */
460 } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
461 == HNS_RX_FLAG_L3ID_IPV6) {
462 if ((typeof(max_size))(network - data) >
463 (max_size - sizeof(struct ipv6hdr)))
464 return max_size;
465
466 /* record next protocol */
467 hlen = sizeof(struct ipv6hdr);
468 } else {
469 return network - data;
470 }
471
472 /* relocate pointer to start of L4 header */
473 network += hlen;
474
475 /* finally sort out TCP/UDP */
476 if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
477 == HNS_RX_FLAG_L4ID_TCP) {
478 if ((typeof(max_size))(network - data) >
479 (max_size - sizeof(struct tcphdr)))
480 return max_size;
481
482 /* access doff as a u8 to avoid unaligned access on ia64 */
483 hlen = (network[12] & 0xF0) >> 2;
484
485 /* verify hlen meets minimum size requirements */
486 if (hlen < sizeof(struct tcphdr))
487 return network - data;
488
489 network += hlen;
490 } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
491 == HNS_RX_FLAG_L4ID_UDP) {
492 if ((typeof(max_size))(network - data) >
493 (max_size - sizeof(struct udphdr)))
494 return max_size;
495
496 network += sizeof(struct udphdr);
497 }
498
499 /* If everything has gone correctly network should be the
500 * data section of the packet and will be the end of the header.
501 * If not then it probably represents the end of the last recognized
502 * header.
503 */
504 if ((typeof(max_size))(network - data) < max_size)
505 return network - data;
506 else
507 return max_size;
508}
509
510static void hns_nic_reuse_page(struct sk_buff *skb, int i, 409static void hns_nic_reuse_page(struct sk_buff *skb, int i,
511 struct hnae_ring *ring, int pull_len, 410 struct hnae_ring *ring, int pull_len,
512 struct hnae_desc_cb *desc_cb) 411 struct hnae_desc_cb *desc_cb)
513{ 412{
514 struct hnae_desc *desc; 413 struct hnae_desc *desc;
515 int truesize, size; 414 u32 truesize;
415 int size;
516 int last_offset; 416 int last_offset;
517 bool twobufs; 417 bool twobufs;
518 418
@@ -530,7 +430,7 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i,
530 } 430 }
531 431
532 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, 432 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
533 size - pull_len, truesize - pull_len); 433 size - pull_len, truesize);
534 434
535 /* avoid re-using remote pages,flag default unreuse */ 435 /* avoid re-using remote pages,flag default unreuse */
536 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) 436 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
@@ -695,7 +595,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
695 } else { 595 } else {
696 ring->stats.seg_pkt_cnt++; 596 ring->stats.seg_pkt_cnt++;
697 597
698 pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE); 598 pull_len = eth_get_headlen(va, HNS_RX_HEAD_SIZE);
699 memcpy(__skb_put(skb, pull_len), va, 599 memcpy(__skb_put(skb, pull_len), va,
700 ALIGN(pull_len, sizeof(long))); 600 ALIGN(pull_len, sizeof(long)));
701 601
@@ -1212,11 +1112,26 @@ static void hns_nic_adjust_link(struct net_device *ndev)
1212 struct hnae_handle *h = priv->ae_handle; 1112 struct hnae_handle *h = priv->ae_handle;
1213 int state = 1; 1113 int state = 1;
1214 1114
1115 /* If there is no phy, do not need adjust link */
1215 if (ndev->phydev) { 1116 if (ndev->phydev) {
1216 h->dev->ops->adjust_link(h, ndev->phydev->speed, 1117 /* When phy link down, do nothing */
1217 ndev->phydev->duplex); 1118 if (ndev->phydev->link == 0)
1218 state = ndev->phydev->link; 1119 return;
1120
1121 if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed,
1122 ndev->phydev->duplex)) {
1123 /* because Hi161X chip don't support to change gmac
1124 * speed and duplex with traffic. Delay 200ms to
1125 * make sure there is no more data in chip FIFO.
1126 */
1127 netif_carrier_off(ndev);
1128 msleep(200);
1129 h->dev->ops->adjust_link(h, ndev->phydev->speed,
1130 ndev->phydev->duplex);
1131 netif_carrier_on(ndev);
1132 }
1219 } 1133 }
1134
1220 state = state && h->dev->ops->get_status(h); 1135 state = state && h->dev->ops->get_status(h);
1221 1136
1222 if (state != priv->link) { 1137 if (state != priv->link) {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 08f3c4743f74..774beda040a1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -243,7 +243,9 @@ static int hns_nic_set_link_ksettings(struct net_device *net_dev,
243 } 243 }
244 244
245 if (h->dev->ops->adjust_link) { 245 if (h->dev->ops->adjust_link) {
246 netif_carrier_off(net_dev);
246 h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex); 247 h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
248 netif_carrier_on(net_dev);
247 return 0; 249 return 0;
248 } 250 }
249 251
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 3554dca7a680..955c4ab18b03 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2019,7 +2019,8 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2019 struct hns3_desc_cb *desc_cb) 2019 struct hns3_desc_cb *desc_cb)
2020{ 2020{
2021 struct hns3_desc *desc; 2021 struct hns3_desc *desc;
2022 int truesize, size; 2022 u32 truesize;
2023 int size;
2023 int last_offset; 2024 int last_offset;
2024 bool twobufs; 2025 bool twobufs;
2025 2026
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index a02a96aee2a2..cb450d7ec8c1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -284,11 +284,11 @@ struct hns3_desc_cb {
284 284
285 /* priv data for the desc, e.g. skb when use with ip stack*/ 285 /* priv data for the desc, e.g. skb when use with ip stack*/
286 void *priv; 286 void *priv;
287 u16 page_offset; 287 u32 page_offset;
288 u16 reuse_flag;
289
290 u32 length; /* length of the buffer */ 288 u32 length; /* length of the buffer */
291 289
290 u16 reuse_flag;
291
292 /* desc type, used by the ring user to mark the type of the priv data */ 292 /* desc type, used by the ring user to mark the type of the priv data */
293 u16 type; 293 u16 type;
294}; 294};
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index c8c7ad2eff77..9b5a68b65432 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -2634,7 +2634,7 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
2634 /* Wait for link to drop */ 2634 /* Wait for link to drop */
2635 time = jiffies + (HZ / 10); 2635 time = jiffies + (HZ / 10);
2636 do { 2636 do {
2637 if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) 2637 if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
2638 break; 2638 break;
2639 if (!in_interrupt()) 2639 if (!in_interrupt())
2640 schedule_timeout_interruptible(1); 2640 schedule_timeout_interruptible(1);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 354c0982847b..372664686309 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -494,9 +494,6 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
494 case 16384: 494 case 16384:
495 ret |= EMAC_MR1_RFS_16K; 495 ret |= EMAC_MR1_RFS_16K;
496 break; 496 break;
497 case 8192:
498 ret |= EMAC4_MR1_RFS_8K;
499 break;
500 case 4096: 497 case 4096:
501 ret |= EMAC_MR1_RFS_4K; 498 ret |= EMAC_MR1_RFS_4K;
502 break; 499 break;
@@ -537,6 +534,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
537 case 16384: 534 case 16384:
538 ret |= EMAC4_MR1_RFS_16K; 535 ret |= EMAC4_MR1_RFS_16K;
539 break; 536 break;
537 case 8192:
538 ret |= EMAC4_MR1_RFS_8K;
539 break;
540 case 4096: 540 case 4096:
541 ret |= EMAC4_MR1_RFS_4K; 541 ret |= EMAC4_MR1_RFS_4K;
542 break; 542 break;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index dafdd4ade705..4f0daf67b18d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1823,11 +1823,17 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1823 adapter->map_id = 1; 1823 adapter->map_id = 1;
1824 release_rx_pools(adapter); 1824 release_rx_pools(adapter);
1825 release_tx_pools(adapter); 1825 release_tx_pools(adapter);
1826 init_rx_pools(netdev); 1826 rc = init_rx_pools(netdev);
1827 init_tx_pools(netdev); 1827 if (rc)
1828 return rc;
1829 rc = init_tx_pools(netdev);
1830 if (rc)
1831 return rc;
1828 1832
1829 release_napi(adapter); 1833 release_napi(adapter);
1830 init_napi(adapter); 1834 rc = init_napi(adapter);
1835 if (rc)
1836 return rc;
1831 } else { 1837 } else {
1832 rc = reset_tx_pools(adapter); 1838 rc = reset_tx_pools(adapter);
1833 if (rc) 1839 if (rc)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index bdb3f8e65ed4..2569a168334c 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -624,14 +624,14 @@ static int e1000_set_ringparam(struct net_device *netdev,
624 adapter->tx_ring = tx_old; 624 adapter->tx_ring = tx_old;
625 e1000_free_all_rx_resources(adapter); 625 e1000_free_all_rx_resources(adapter);
626 e1000_free_all_tx_resources(adapter); 626 e1000_free_all_tx_resources(adapter);
627 kfree(tx_old);
628 kfree(rx_old);
629 adapter->rx_ring = rxdr; 627 adapter->rx_ring = rxdr;
630 adapter->tx_ring = txdr; 628 adapter->tx_ring = txdr;
631 err = e1000_up(adapter); 629 err = e1000_up(adapter);
632 if (err) 630 if (err)
633 goto err_setup; 631 goto err_setup;
634 } 632 }
633 kfree(tx_old);
634 kfree(rx_old);
635 635
636 clear_bit(__E1000_RESETTING, &adapter->flags); 636 clear_bit(__E1000_RESETTING, &adapter->flags);
637 return 0; 637 return 0;
@@ -644,7 +644,8 @@ err_setup_rx:
644err_alloc_rx: 644err_alloc_rx:
645 kfree(txdr); 645 kfree(txdr);
646err_alloc_tx: 646err_alloc_tx:
647 e1000_up(adapter); 647 if (netif_running(adapter->netdev))
648 e1000_up(adapter);
648err_setup: 649err_setup:
649 clear_bit(__E1000_RESETTING, &adapter->flags); 650 clear_bit(__E1000_RESETTING, &adapter->flags);
650 return err; 651 return err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index abcd096ede14..5ff6caa83948 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2013,7 +2013,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
2013 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) 2013 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
2014 i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i); 2014 i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
2015 2015
2016 WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, 2016 WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
2017 "stat strings count mismatch!"); 2017 "stat strings count mismatch!");
2018} 2018}
2019 2019
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f2c622e78802..ac685ad4d877 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -5122,15 +5122,17 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5122 u8 *bw_share) 5122 u8 *bw_share)
5123{ 5123{
5124 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 5124 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5125 struct i40e_pf *pf = vsi->back;
5125 i40e_status ret; 5126 i40e_status ret;
5126 int i; 5127 int i;
5127 5128
5128 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) 5129 /* There is no need to reset BW when mqprio mode is on. */
5130 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5129 return 0; 5131 return 0;
5130 if (!vsi->mqprio_qopt.qopt.hw) { 5132 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5131 ret = i40e_set_bw_limit(vsi, vsi->seid, 0); 5133 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5132 if (ret) 5134 if (ret)
5133 dev_info(&vsi->back->pdev->dev, 5135 dev_info(&pf->pdev->dev,
5134 "Failed to reset tx rate for vsi->seid %u\n", 5136 "Failed to reset tx rate for vsi->seid %u\n",
5135 vsi->seid); 5137 vsi->seid);
5136 return ret; 5138 return ret;
@@ -5139,12 +5141,11 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5139 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 5141 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5140 bw_data.tc_bw_credits[i] = bw_share[i]; 5142 bw_data.tc_bw_credits[i] = bw_share[i];
5141 5143
5142 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, 5144 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5143 NULL);
5144 if (ret) { 5145 if (ret) {
5145 dev_info(&vsi->back->pdev->dev, 5146 dev_info(&pf->pdev->dev,
5146 "AQ command Config VSI BW allocation per TC failed = %d\n", 5147 "AQ command Config VSI BW allocation per TC failed = %d\n",
5147 vsi->back->hw.aq.asq_last_status); 5148 pf->hw.aq.asq_last_status);
5148 return -EINVAL; 5149 return -EINVAL;
5149 } 5150 }
5150 5151
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index d8b5fff581e7..868f4a1d0f72 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -89,6 +89,13 @@ extern const char ice_drv_ver[];
89#define ice_for_each_rxq(vsi, i) \ 89#define ice_for_each_rxq(vsi, i) \
90 for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) 90 for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
91 91
92/* Macros for each allocated tx/rx ring whether used or not in a VSI */
93#define ice_for_each_alloc_txq(vsi, i) \
94 for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
95
96#define ice_for_each_alloc_rxq(vsi, i) \
97 for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
98
92struct ice_tc_info { 99struct ice_tc_info {
93 u16 qoffset; 100 u16 qoffset;
94 u16 qcount; 101 u16 qcount;
@@ -189,9 +196,9 @@ struct ice_vsi {
189 struct list_head tmp_sync_list; /* MAC filters to be synced */ 196 struct list_head tmp_sync_list; /* MAC filters to be synced */
190 struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ 197 struct list_head tmp_unsync_list; /* MAC filters to be unsynced */
191 198
192 bool irqs_ready; 199 u8 irqs_ready;
193 bool current_isup; /* Sync 'link up' logging */ 200 u8 current_isup; /* Sync 'link up' logging */
194 bool stat_offsets_loaded; 201 u8 stat_offsets_loaded;
195 202
196 /* queue information */ 203 /* queue information */
197 u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ 204 u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -262,7 +269,7 @@ struct ice_pf {
262 struct ice_hw_port_stats stats; 269 struct ice_hw_port_stats stats;
263 struct ice_hw_port_stats stats_prev; 270 struct ice_hw_port_stats stats_prev;
264 struct ice_hw hw; 271 struct ice_hw hw;
265 bool stat_prev_loaded; /* has previous stats been loaded */ 272 u8 stat_prev_loaded; /* has previous stats been loaded */
266 char int_name[ICE_INT_NAME_STR_LEN]; 273 char int_name[ICE_INT_NAME_STR_LEN];
267}; 274};
268 275
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 7541ec2270b3..a0614f472658 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -329,19 +329,19 @@ struct ice_aqc_vsi_props {
329 /* VLAN section */ 329 /* VLAN section */
330 __le16 pvid; /* VLANS include priority bits */ 330 __le16 pvid; /* VLANS include priority bits */
331 u8 pvlan_reserved[2]; 331 u8 pvlan_reserved[2];
332 u8 port_vlan_flags; 332 u8 vlan_flags;
333#define ICE_AQ_VSI_PVLAN_MODE_S 0 333#define ICE_AQ_VSI_VLAN_MODE_S 0
334#define ICE_AQ_VSI_PVLAN_MODE_M (0x3 << ICE_AQ_VSI_PVLAN_MODE_S) 334#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S)
335#define ICE_AQ_VSI_PVLAN_MODE_UNTAGGED 0x1 335#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1
336#define ICE_AQ_VSI_PVLAN_MODE_TAGGED 0x2 336#define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2
337#define ICE_AQ_VSI_PVLAN_MODE_ALL 0x3 337#define ICE_AQ_VSI_VLAN_MODE_ALL 0x3
338#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2) 338#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2)
339#define ICE_AQ_VSI_PVLAN_EMOD_S 3 339#define ICE_AQ_VSI_VLAN_EMOD_S 3
340#define ICE_AQ_VSI_PVLAN_EMOD_M (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) 340#define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
341#define ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_PVLAN_EMOD_S) 341#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S)
342#define ICE_AQ_VSI_PVLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_PVLAN_EMOD_S) 342#define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S)
343#define ICE_AQ_VSI_PVLAN_EMOD_STR (0x2 << ICE_AQ_VSI_PVLAN_EMOD_S) 343#define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S)
344#define ICE_AQ_VSI_PVLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) 344#define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
345 u8 pvlan_reserved2[3]; 345 u8 pvlan_reserved2[3];
346 /* ingress egress up sections */ 346 /* ingress egress up sections */
347 __le32 ingress_table; /* bitmap, 3 bits per up */ 347 __le32 ingress_table; /* bitmap, 3 bits per up */
@@ -594,6 +594,7 @@ struct ice_sw_rule_lg_act {
594#define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S) 594#define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S)
595#define ICE_LG_ACT_GENERIC_PRIORITY_S 22 595#define ICE_LG_ACT_GENERIC_PRIORITY_S 22
596#define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S) 596#define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S)
597#define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX 7
597 598
598 /* Action = 7 - Set Stat count */ 599 /* Action = 7 - Set Stat count */
599#define ICE_LG_ACT_STAT_COUNT 0x7 600#define ICE_LG_ACT_STAT_COUNT 0x7
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 71d032cc5fa7..661beea6af79 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -45,6 +45,9 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
45/** 45/**
46 * ice_clear_pf_cfg - Clear PF configuration 46 * ice_clear_pf_cfg - Clear PF configuration
47 * @hw: pointer to the hardware structure 47 * @hw: pointer to the hardware structure
48 *
49 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
50 * configuration, flow director filters, etc.).
48 */ 51 */
49enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) 52enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
50{ 53{
@@ -1483,7 +1486,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
1483 struct ice_phy_info *phy_info; 1486 struct ice_phy_info *phy_info;
1484 enum ice_status status = 0; 1487 enum ice_status status = 0;
1485 1488
1486 if (!pi) 1489 if (!pi || !link_up)
1487 return ICE_ERR_PARAM; 1490 return ICE_ERR_PARAM;
1488 1491
1489 phy_info = &pi->phy; 1492 phy_info = &pi->phy;
@@ -1619,20 +1622,23 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
1619 } 1622 }
1620 1623
1621 /* LUT size is only valid for Global and PF table types */ 1624 /* LUT size is only valid for Global and PF table types */
1622 if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) { 1625 switch (lut_size) {
1623 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG << 1626 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
1624 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 1627 break;
1625 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 1628 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
1626 } else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) {
1627 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << 1629 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
1628 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 1630 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
1629 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 1631 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
1630 } else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) && 1632 break;
1631 (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) { 1633 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
1632 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << 1634 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
1633 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 1635 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
1634 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 1636 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
1635 } else { 1637 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
1638 break;
1639 }
1640 /* fall-through */
1641 default:
1636 status = ICE_ERR_PARAM; 1642 status = ICE_ERR_PARAM;
1637 goto ice_aq_get_set_rss_lut_exit; 1643 goto ice_aq_get_set_rss_lut_exit;
1638 } 1644 }
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 7c511f144ed6..62be72fdc8f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -597,10 +597,14 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
597 return 0; 597 return 0;
598 598
599init_ctrlq_free_rq: 599init_ctrlq_free_rq:
600 ice_shutdown_rq(hw, cq); 600 if (cq->rq.head) {
601 ice_shutdown_sq(hw, cq); 601 ice_shutdown_rq(hw, cq);
602 mutex_destroy(&cq->sq_lock); 602 mutex_destroy(&cq->rq_lock);
603 mutex_destroy(&cq->rq_lock); 603 }
604 if (cq->sq.head) {
605 ice_shutdown_sq(hw, cq);
606 mutex_destroy(&cq->sq_lock);
607 }
604 return status; 608 return status;
605} 609}
606 610
@@ -706,10 +710,14 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
706 return; 710 return;
707 } 711 }
708 712
709 ice_shutdown_sq(hw, cq); 713 if (cq->sq.head) {
710 ice_shutdown_rq(hw, cq); 714 ice_shutdown_sq(hw, cq);
711 mutex_destroy(&cq->sq_lock); 715 mutex_destroy(&cq->sq_lock);
712 mutex_destroy(&cq->rq_lock); 716 }
717 if (cq->rq.head) {
718 ice_shutdown_rq(hw, cq);
719 mutex_destroy(&cq->rq_lock);
720 }
713} 721}
714 722
715/** 723/**
@@ -1057,8 +1065,11 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1057 1065
1058clean_rq_elem_out: 1066clean_rq_elem_out:
1059 /* Set pending if needed, unlock and return */ 1067 /* Set pending if needed, unlock and return */
1060 if (pending) 1068 if (pending) {
1069 /* re-read HW head to calculate actual pending messages */
1070 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1061 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); 1071 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1072 }
1062clean_rq_elem_err: 1073clean_rq_elem_err:
1063 mutex_unlock(&cq->rq_lock); 1074 mutex_unlock(&cq->rq_lock);
1064 1075
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 1db304c01d10..c71a9b528d6d 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -26,7 +26,7 @@ static int ice_q_stats_len(struct net_device *netdev)
26{ 26{
27 struct ice_netdev_priv *np = netdev_priv(netdev); 27 struct ice_netdev_priv *np = netdev_priv(netdev);
28 28
29 return ((np->vsi->num_txq + np->vsi->num_rxq) * 29 return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) *
30 (sizeof(struct ice_q_stats) / sizeof(u64))); 30 (sizeof(struct ice_q_stats) / sizeof(u64)));
31} 31}
32 32
@@ -218,7 +218,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
218 p += ETH_GSTRING_LEN; 218 p += ETH_GSTRING_LEN;
219 } 219 }
220 220
221 ice_for_each_txq(vsi, i) { 221 ice_for_each_alloc_txq(vsi, i) {
222 snprintf(p, ETH_GSTRING_LEN, 222 snprintf(p, ETH_GSTRING_LEN,
223 "tx-queue-%u.tx_packets", i); 223 "tx-queue-%u.tx_packets", i);
224 p += ETH_GSTRING_LEN; 224 p += ETH_GSTRING_LEN;
@@ -226,7 +226,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
226 p += ETH_GSTRING_LEN; 226 p += ETH_GSTRING_LEN;
227 } 227 }
228 228
229 ice_for_each_rxq(vsi, i) { 229 ice_for_each_alloc_rxq(vsi, i) {
230 snprintf(p, ETH_GSTRING_LEN, 230 snprintf(p, ETH_GSTRING_LEN,
231 "rx-queue-%u.rx_packets", i); 231 "rx-queue-%u.rx_packets", i);
232 p += ETH_GSTRING_LEN; 232 p += ETH_GSTRING_LEN;
@@ -253,6 +253,24 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
253{ 253{
254 switch (sset) { 254 switch (sset) {
255 case ETH_SS_STATS: 255 case ETH_SS_STATS:
256 /* The number (and order) of strings reported *must* remain
257 * constant for a given netdevice. This function must not
258 * report a different number based on run time parameters
259 * (such as the number of queues in use, or the setting of
260 * a private ethtool flag). This is due to the nature of the
261 * ethtool stats API.
262 *
263 * User space programs such as ethtool must make 3 separate
264 * ioctl requests, one for size, one for the strings, and
265 * finally one for the stats. Since these cross into
266 * user space, changes to the number or size could result in
267 * undefined memory access or incorrect string<->value
268 * correlations for statistics.
269 *
270 * Even if it appears to be safe, changes to the size or
271 * order of strings will suffer from race conditions and are
272 * not safe.
273 */
256 return ICE_ALL_STATS_LEN(netdev); 274 return ICE_ALL_STATS_LEN(netdev);
257 default: 275 default:
258 return -EOPNOTSUPP; 276 return -EOPNOTSUPP;
@@ -280,18 +298,26 @@ ice_get_ethtool_stats(struct net_device *netdev,
280 /* populate per queue stats */ 298 /* populate per queue stats */
281 rcu_read_lock(); 299 rcu_read_lock();
282 300
283 ice_for_each_txq(vsi, j) { 301 ice_for_each_alloc_txq(vsi, j) {
284 ring = READ_ONCE(vsi->tx_rings[j]); 302 ring = READ_ONCE(vsi->tx_rings[j]);
285 if (!ring) 303 if (ring) {
286 continue; 304 data[i++] = ring->stats.pkts;
287 data[i++] = ring->stats.pkts; 305 data[i++] = ring->stats.bytes;
288 data[i++] = ring->stats.bytes; 306 } else {
307 data[i++] = 0;
308 data[i++] = 0;
309 }
289 } 310 }
290 311
291 ice_for_each_rxq(vsi, j) { 312 ice_for_each_alloc_rxq(vsi, j) {
292 ring = READ_ONCE(vsi->rx_rings[j]); 313 ring = READ_ONCE(vsi->rx_rings[j]);
293 data[i++] = ring->stats.pkts; 314 if (ring) {
294 data[i++] = ring->stats.bytes; 315 data[i++] = ring->stats.pkts;
316 data[i++] = ring->stats.bytes;
317 } else {
318 data[i++] = 0;
319 data[i++] = 0;
320 }
295 } 321 }
296 322
297 rcu_read_unlock(); 323 rcu_read_unlock();
@@ -519,7 +545,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
519 goto done; 545 goto done;
520 } 546 }
521 547
522 for (i = 0; i < vsi->num_txq; i++) { 548 for (i = 0; i < vsi->alloc_txq; i++) {
523 /* clone ring and setup updated count */ 549 /* clone ring and setup updated count */
524 tx_rings[i] = *vsi->tx_rings[i]; 550 tx_rings[i] = *vsi->tx_rings[i];
525 tx_rings[i].count = new_tx_cnt; 551 tx_rings[i].count = new_tx_cnt;
@@ -551,7 +577,7 @@ process_rx:
551 goto done; 577 goto done;
552 } 578 }
553 579
554 for (i = 0; i < vsi->num_rxq; i++) { 580 for (i = 0; i < vsi->alloc_rxq; i++) {
555 /* clone ring and setup updated count */ 581 /* clone ring and setup updated count */
556 rx_rings[i] = *vsi->rx_rings[i]; 582 rx_rings[i] = *vsi->rx_rings[i];
557 rx_rings[i].count = new_rx_cnt; 583 rx_rings[i].count = new_rx_cnt;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 499904874b3f..6076fc87df9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -121,10 +121,6 @@
121#define PFINT_FW_CTL_CAUSE_ENA_S 30 121#define PFINT_FW_CTL_CAUSE_ENA_S 30
122#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) 122#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
123#define PFINT_OICR 0x0016CA00 123#define PFINT_OICR 0x0016CA00
124#define PFINT_OICR_HLP_RDY_S 14
125#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S)
126#define PFINT_OICR_CPM_RDY_S 15
127#define PFINT_OICR_CPM_RDY_M BIT(PFINT_OICR_CPM_RDY_S)
128#define PFINT_OICR_ECC_ERR_S 16 124#define PFINT_OICR_ECC_ERR_S 16
129#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S) 125#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S)
130#define PFINT_OICR_MAL_DETECT_S 19 126#define PFINT_OICR_MAL_DETECT_S 19
@@ -133,10 +129,6 @@
133#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S) 129#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S)
134#define PFINT_OICR_PCI_EXCEPTION_S 21 130#define PFINT_OICR_PCI_EXCEPTION_S 21
135#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S) 131#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S)
136#define PFINT_OICR_GPIO_S 22
137#define PFINT_OICR_GPIO_M BIT(PFINT_OICR_GPIO_S)
138#define PFINT_OICR_STORM_DETECT_S 24
139#define PFINT_OICR_STORM_DETECT_M BIT(PFINT_OICR_STORM_DETECT_S)
140#define PFINT_OICR_HMC_ERR_S 26 132#define PFINT_OICR_HMC_ERR_S 26
141#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S) 133#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S)
142#define PFINT_OICR_PE_CRITERR_S 28 134#define PFINT_OICR_PE_CRITERR_S 28
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index d23a91665b46..068dbc740b76 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -265,6 +265,7 @@ enum ice_rx_flex_desc_status_error_0_bits {
265struct ice_rlan_ctx { 265struct ice_rlan_ctx {
266 u16 head; 266 u16 head;
267 u16 cpuid; /* bigger than needed, see above for reason */ 267 u16 cpuid; /* bigger than needed, see above for reason */
268#define ICE_RLAN_BASE_S 7
268 u64 base; 269 u64 base;
269 u16 qlen; 270 u16 qlen;
270#define ICE_RLAN_CTX_DBUF_S 7 271#define ICE_RLAN_CTX_DBUF_S 7
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 5299caf55a7f..f1e80eed2fd6 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -901,7 +901,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
901 case ice_aqc_opc_get_link_status: 901 case ice_aqc_opc_get_link_status:
902 if (ice_handle_link_event(pf)) 902 if (ice_handle_link_event(pf))
903 dev_err(&pf->pdev->dev, 903 dev_err(&pf->pdev->dev,
904 "Could not handle link event"); 904 "Could not handle link event\n");
905 break; 905 break;
906 default: 906 default:
907 dev_dbg(&pf->pdev->dev, 907 dev_dbg(&pf->pdev->dev,
@@ -917,13 +917,27 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
917} 917}
918 918
919/** 919/**
920 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
921 * @hw: pointer to hardware info
922 * @cq: control queue information
923 *
924 * returns true if there are pending messages in a queue, false if there aren't
925 */
926static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
927{
928 u16 ntu;
929
930 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
931 return cq->rq.next_to_clean != ntu;
932}
933
934/**
920 * ice_clean_adminq_subtask - clean the AdminQ rings 935 * ice_clean_adminq_subtask - clean the AdminQ rings
921 * @pf: board private structure 936 * @pf: board private structure
922 */ 937 */
923static void ice_clean_adminq_subtask(struct ice_pf *pf) 938static void ice_clean_adminq_subtask(struct ice_pf *pf)
924{ 939{
925 struct ice_hw *hw = &pf->hw; 940 struct ice_hw *hw = &pf->hw;
926 u32 val;
927 941
928 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) 942 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
929 return; 943 return;
@@ -933,9 +947,13 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
933 947
934 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); 948 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
935 949
936 /* re-enable Admin queue interrupt causes */ 950 /* There might be a situation where new messages arrive to a control
937 val = rd32(hw, PFINT_FW_CTL); 951 * queue between processing the last message and clearing the
938 wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M)); 952 * EVENT_PENDING bit. So before exiting, check queue head again (using
953 * ice_ctrlq_pending) and process new messages if any.
954 */
955 if (ice_ctrlq_pending(hw, &hw->adminq))
956 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
939 957
940 ice_flush(hw); 958 ice_flush(hw);
941} 959}
@@ -1295,11 +1313,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
1295 qcount = numq_tc; 1313 qcount = numq_tc;
1296 } 1314 }
1297 1315
1298 /* find higher power-of-2 of qcount */ 1316 /* find the (rounded up) power-of-2 of qcount */
1299 pow = ilog2(qcount); 1317 pow = order_base_2(qcount);
1300
1301 if (!is_power_of_2(qcount))
1302 pow++;
1303 1318
1304 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { 1319 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
1305 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { 1320 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
@@ -1352,14 +1367,15 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
1352 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; 1367 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
1353 /* Traffic from VSI can be sent to LAN */ 1368 /* Traffic from VSI can be sent to LAN */
1354 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 1369 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1355 /* Allow all packets untagged/tagged */ 1370
1356 ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL & 1371 /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
1357 ICE_AQ_VSI_PVLAN_MODE_M) >> 1372 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
1358 ICE_AQ_VSI_PVLAN_MODE_S); 1373 * packets untagged/tagged.
1359 /* Show VLAN/UP from packets in Rx descriptors */ 1374 */
1360 ctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH & 1375 ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
1361 ICE_AQ_VSI_PVLAN_EMOD_M) >> 1376 ICE_AQ_VSI_VLAN_MODE_M) >>
1362 ICE_AQ_VSI_PVLAN_EMOD_S); 1377 ICE_AQ_VSI_VLAN_MODE_S);
1378
1363 /* Have 1:1 UP mapping for both ingress/egress tables */ 1379 /* Have 1:1 UP mapping for both ingress/egress tables */
1364 table |= ICE_UP_TABLE_TRANSLATE(0, 0); 1380 table |= ICE_UP_TABLE_TRANSLATE(0, 0);
1365 table |= ICE_UP_TABLE_TRANSLATE(1, 1); 1381 table |= ICE_UP_TABLE_TRANSLATE(1, 1);
@@ -1688,15 +1704,12 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
1688 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ 1704 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
1689 rd32(hw, PFINT_OICR); /* read to clear */ 1705 rd32(hw, PFINT_OICR); /* read to clear */
1690 1706
1691 val = (PFINT_OICR_HLP_RDY_M | 1707 val = (PFINT_OICR_ECC_ERR_M |
1692 PFINT_OICR_CPM_RDY_M |
1693 PFINT_OICR_ECC_ERR_M |
1694 PFINT_OICR_MAL_DETECT_M | 1708 PFINT_OICR_MAL_DETECT_M |
1695 PFINT_OICR_GRST_M | 1709 PFINT_OICR_GRST_M |
1696 PFINT_OICR_PCI_EXCEPTION_M | 1710 PFINT_OICR_PCI_EXCEPTION_M |
1697 PFINT_OICR_GPIO_M | 1711 PFINT_OICR_HMC_ERR_M |
1698 PFINT_OICR_STORM_DETECT_M | 1712 PFINT_OICR_PE_CRITERR_M);
1699 PFINT_OICR_HMC_ERR_M);
1700 1713
1701 wr32(hw, PFINT_OICR_ENA, val); 1714 wr32(hw, PFINT_OICR_ENA, val);
1702 1715
@@ -2058,15 +2071,13 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
2058skip_req_irq: 2071skip_req_irq:
2059 ice_ena_misc_vector(pf); 2072 ice_ena_misc_vector(pf);
2060 2073
2061 val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 2074 val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2062 (ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) | 2075 PFINT_OICR_CTL_CAUSE_ENA_M);
2063 PFINT_OICR_CTL_CAUSE_ENA_M;
2064 wr32(hw, PFINT_OICR_CTL, val); 2076 wr32(hw, PFINT_OICR_CTL, val);
2065 2077
2066 /* This enables Admin queue Interrupt causes */ 2078 /* This enables Admin queue Interrupt causes */
2067 val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | 2079 val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2068 (ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) | 2080 PFINT_FW_CTL_CAUSE_ENA_M);
2069 PFINT_FW_CTL_CAUSE_ENA_M;
2070 wr32(hw, PFINT_FW_CTL, val); 2081 wr32(hw, PFINT_FW_CTL, val);
2071 2082
2072 itr_gran = hw->itr_gran_200; 2083 itr_gran = hw->itr_gran_200;
@@ -3246,8 +3257,10 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3246 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 3257 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
3247 ice_dis_msix(pf); 3258 ice_dis_msix(pf);
3248 3259
3249 devm_kfree(&pf->pdev->dev, pf->irq_tracker); 3260 if (pf->irq_tracker) {
3250 pf->irq_tracker = NULL; 3261 devm_kfree(&pf->pdev->dev, pf->irq_tracker);
3262 pf->irq_tracker = NULL;
3263 }
3251} 3264}
3252 3265
3253/** 3266/**
@@ -3271,7 +3284,7 @@ static int ice_probe(struct pci_dev *pdev,
3271 3284
3272 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); 3285 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
3273 if (err) { 3286 if (err) {
3274 dev_err(&pdev->dev, "I/O map error %d\n", err); 3287 dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
3275 return err; 3288 return err;
3276 } 3289 }
3277 3290
@@ -3720,10 +3733,10 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
3720 enum ice_status status; 3733 enum ice_status status;
3721 3734
3722 /* Here we are configuring the VSI to let the driver add VLAN tags by 3735 /* Here we are configuring the VSI to let the driver add VLAN tags by
3723 * setting port_vlan_flags to ICE_AQ_VSI_PVLAN_MODE_ALL. The actual VLAN 3736 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
3724 * tag insertion happens in the Tx hot path, in ice_tx_map. 3737 * insertion happens in the Tx hot path, in ice_tx_map.
3725 */ 3738 */
3726 ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_MODE_ALL; 3739 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
3727 3740
3728 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 3741 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
3729 ctxt.vsi_num = vsi->vsi_num; 3742 ctxt.vsi_num = vsi->vsi_num;
@@ -3735,7 +3748,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
3735 return -EIO; 3748 return -EIO;
3736 } 3749 }
3737 3750
3738 vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; 3751 vsi->info.vlan_flags = ctxt.info.vlan_flags;
3739 return 0; 3752 return 0;
3740} 3753}
3741 3754
@@ -3757,12 +3770,15 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
3757 */ 3770 */
3758 if (ena) { 3771 if (ena) {
3759 /* Strip VLAN tag from Rx packet and put it in the desc */ 3772 /* Strip VLAN tag from Rx packet and put it in the desc */
3760 ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH; 3773 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
3761 } else { 3774 } else {
3762 /* Disable stripping. Leave tag in packet */ 3775 /* Disable stripping. Leave tag in packet */
3763 ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_NOTHING; 3776 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3764 } 3777 }
3765 3778
3779 /* Allow all packets untagged/tagged */
3780 ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
3781
3766 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 3782 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
3767 ctxt.vsi_num = vsi->vsi_num; 3783 ctxt.vsi_num = vsi->vsi_num;
3768 3784
@@ -3773,7 +3789,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
3773 return -EIO; 3789 return -EIO;
3774 } 3790 }
3775 3791
3776 vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; 3792 vsi->info.vlan_flags = ctxt.info.vlan_flags;
3777 return 0; 3793 return 0;
3778} 3794}
3779 3795
@@ -3986,7 +4002,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
3986 /* clear the context structure first */ 4002 /* clear the context structure first */
3987 memset(&rlan_ctx, 0, sizeof(rlan_ctx)); 4003 memset(&rlan_ctx, 0, sizeof(rlan_ctx));
3988 4004
3989 rlan_ctx.base = ring->dma >> 7; 4005 rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
3990 4006
3991 rlan_ctx.qlen = ring->count; 4007 rlan_ctx.qlen = ring->count;
3992 4008
@@ -4098,11 +4114,12 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
4098{ 4114{
4099 int err; 4115 int err;
4100 4116
4101 ice_set_rx_mode(vsi->netdev); 4117 if (vsi->netdev) {
4102 4118 ice_set_rx_mode(vsi->netdev);
4103 err = ice_restore_vlan(vsi); 4119 err = ice_restore_vlan(vsi);
4104 if (err) 4120 if (err)
4105 return err; 4121 return err;
4122 }
4106 4123
4107 err = ice_vsi_cfg_txqs(vsi); 4124 err = ice_vsi_cfg_txqs(vsi);
4108 if (!err) 4125 if (!err)
@@ -4868,7 +4885,7 @@ int ice_down(struct ice_vsi *vsi)
4868 */ 4885 */
4869static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) 4886static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
4870{ 4887{
4871 int i, err; 4888 int i, err = 0;
4872 4889
4873 if (!vsi->num_txq) { 4890 if (!vsi->num_txq) {
4874 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", 4891 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
@@ -4893,7 +4910,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
4893 */ 4910 */
4894static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) 4911static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
4895{ 4912{
4896 int i, err; 4913 int i, err = 0;
4897 4914
4898 if (!vsi->num_rxq) { 4915 if (!vsi->num_rxq) {
4899 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", 4916 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
@@ -5235,7 +5252,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
5235 u8 count = 0; 5252 u8 count = 0;
5236 5253
5237 if (new_mtu == netdev->mtu) { 5254 if (new_mtu == netdev->mtu) {
5238 netdev_warn(netdev, "mtu is already %d\n", netdev->mtu); 5255 netdev_warn(netdev, "mtu is already %u\n", netdev->mtu);
5239 return 0; 5256 return 0;
5240 } 5257 }
5241 5258
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 92da0a626ce0..295a8cd87fc1 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -131,9 +131,8 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
131 * 131 *
132 * This function will request NVM ownership. 132 * This function will request NVM ownership.
133 */ 133 */
134static enum 134static enum ice_status
135ice_status ice_acquire_nvm(struct ice_hw *hw, 135ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
136 enum ice_aq_res_access_type access)
137{ 136{
138 if (hw->nvm.blank_nvm_mode) 137 if (hw->nvm.blank_nvm_mode)
139 return 0; 138 return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 2e6c1d92cc88..eeae199469b6 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1576,8 +1576,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
1576 return status; 1576 return status;
1577 } 1577 }
1578 1578
1579 if (owner == ICE_SCHED_NODE_OWNER_LAN) 1579 vsi->max_lanq[tc] = new_numqs;
1580 vsi->max_lanq[tc] = new_numqs;
1581 1580
1582 return status; 1581 return status;
1583} 1582}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 723d15f1e90b..6b7ec2ae5ad6 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -645,14 +645,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
645 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; 645 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
646 lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); 646 lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
647 647
648 act = (7 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M; 648 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
649 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
649 650
650 /* Third action Marker value */ 651 /* Third action Marker value */
651 act |= ICE_LG_ACT_GENERIC; 652 act |= ICE_LG_ACT_GENERIC;
652 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & 653 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
653 ICE_LG_ACT_GENERIC_VALUE_M; 654 ICE_LG_ACT_GENERIC_VALUE_M;
654 655
655 act |= (0 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M;
656 lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); 656 lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
657 657
658 /* call the fill switch rule to fill the lookup tx rx structure */ 658 /* call the fill switch rule to fill the lookup tx rx structure */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index 6f4a0d159dbf..9b8ec128ee31 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -17,7 +17,7 @@ struct ice_vsi_ctx {
17 u16 vsis_unallocated; 17 u16 vsis_unallocated;
18 u16 flags; 18 u16 flags;
19 struct ice_aqc_vsi_props info; 19 struct ice_aqc_vsi_props info;
20 bool alloc_from_pool; 20 u8 alloc_from_pool;
21}; 21};
22 22
23enum ice_sw_fwd_act_type { 23enum ice_sw_fwd_act_type {
@@ -94,8 +94,8 @@ struct ice_fltr_info {
94 u8 qgrp_size; 94 u8 qgrp_size;
95 95
96 /* Rule creations populate these indicators basing on the switch type */ 96 /* Rule creations populate these indicators basing on the switch type */
97 bool lb_en; /* Indicate if packet can be looped back */ 97 u8 lb_en; /* Indicate if packet can be looped back */
98 bool lan_en; /* Indicate if packet can be forwarded to the uplink */ 98 u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
99}; 99};
100 100
101/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */ 101/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 567067b650c4..31bc998fe200 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -143,7 +143,7 @@ struct ice_ring {
143 u16 next_to_use; 143 u16 next_to_use;
144 u16 next_to_clean; 144 u16 next_to_clean;
145 145
146 bool ring_active; /* is ring online or not */ 146 u8 ring_active; /* is ring online or not */
147 147
148 /* stats structs */ 148 /* stats structs */
149 struct ice_q_stats stats; 149 struct ice_q_stats stats;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 99c8a9a71b5e..97c366e0ca59 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -83,7 +83,7 @@ struct ice_link_status {
83 u64 phy_type_low; 83 u64 phy_type_low;
84 u16 max_frame_size; 84 u16 max_frame_size;
85 u16 link_speed; 85 u16 link_speed;
86 bool lse_ena; /* Link Status Event notification */ 86 u8 lse_ena; /* Link Status Event notification */
87 u8 link_info; 87 u8 link_info;
88 u8 an_info; 88 u8 an_info;
89 u8 ext_info; 89 u8 ext_info;
@@ -101,7 +101,7 @@ struct ice_phy_info {
101 struct ice_link_status link_info_old; 101 struct ice_link_status link_info_old;
102 u64 phy_type_low; 102 u64 phy_type_low;
103 enum ice_media_type media_type; 103 enum ice_media_type media_type;
104 bool get_link_info; 104 u8 get_link_info;
105}; 105};
106 106
107/* Common HW capabilities for SW use */ 107/* Common HW capabilities for SW use */
@@ -167,7 +167,7 @@ struct ice_nvm_info {
167 u32 oem_ver; /* OEM version info */ 167 u32 oem_ver; /* OEM version info */
168 u16 sr_words; /* Shadow RAM size in words */ 168 u16 sr_words; /* Shadow RAM size in words */
169 u16 ver; /* NVM package version */ 169 u16 ver; /* NVM package version */
170 bool blank_nvm_mode; /* is NVM empty (no FW present) */ 170 u8 blank_nvm_mode; /* is NVM empty (no FW present) */
171}; 171};
172 172
173/* Max number of port to queue branches w.r.t topology */ 173/* Max number of port to queue branches w.r.t topology */
@@ -181,7 +181,7 @@ struct ice_sched_node {
181 struct ice_aqc_txsched_elem_data info; 181 struct ice_aqc_txsched_elem_data info;
182 u32 agg_id; /* aggregator group id */ 182 u32 agg_id; /* aggregator group id */
183 u16 vsi_id; 183 u16 vsi_id;
184 bool in_use; /* suspended or in use */ 184 u8 in_use; /* suspended or in use */
185 u8 tx_sched_layer; /* Logical Layer (1-9) */ 185 u8 tx_sched_layer; /* Logical Layer (1-9) */
186 u8 num_children; 186 u8 num_children;
187 u8 tc_num; 187 u8 tc_num;
@@ -218,7 +218,7 @@ struct ice_sched_vsi_info {
218struct ice_sched_tx_policy { 218struct ice_sched_tx_policy {
219 u16 max_num_vsis; 219 u16 max_num_vsis;
220 u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS]; 220 u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS];
221 bool rdma_ena; 221 u8 rdma_ena;
222}; 222};
223 223
224struct ice_port_info { 224struct ice_port_info {
@@ -243,7 +243,7 @@ struct ice_port_info {
243 struct list_head agg_list; /* lists all aggregator */ 243 struct list_head agg_list; /* lists all aggregator */
244 u8 lport; 244 u8 lport;
245#define ICE_LPORT_MASK 0xff 245#define ICE_LPORT_MASK 0xff
246 bool is_vf; 246 u8 is_vf;
247}; 247};
248 248
249struct ice_switch_info { 249struct ice_switch_info {
@@ -287,7 +287,7 @@ struct ice_hw {
287 u8 max_cgds; 287 u8 max_cgds;
288 u8 sw_entry_point_layer; 288 u8 sw_entry_point_layer;
289 289
290 bool evb_veb; /* true for VEB, false for VEPA */ 290 u8 evb_veb; /* true for VEB, false for VEPA */
291 struct ice_bus_info bus; 291 struct ice_bus_info bus;
292 struct ice_nvm_info nvm; 292 struct ice_nvm_info nvm;
293 struct ice_hw_dev_caps dev_caps; /* device capabilities */ 293 struct ice_hw_dev_caps dev_caps; /* device capabilities */
@@ -318,7 +318,7 @@ struct ice_hw {
318 u8 itr_gran_100; 318 u8 itr_gran_100;
319 u8 itr_gran_50; 319 u8 itr_gran_50;
320 u8 itr_gran_25; 320 u8 itr_gran_25;
321 bool ucast_shared; /* true if VSIs can share unicast addr */ 321 u8 ucast_shared; /* true if VSIs can share unicast addr */
322 322
323}; 323};
324 324
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index f92f7918112d..5acf3b743876 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1649,7 +1649,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1649 if (hw->phy.type == e1000_phy_m88) 1649 if (hw->phy.type == e1000_phy_m88)
1650 igb_phy_disable_receiver(adapter); 1650 igb_phy_disable_receiver(adapter);
1651 1651
1652 mdelay(500); 1652 msleep(500);
1653 return 0; 1653 return 0;
1654} 1654}
1655 1655
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index d03c2f0d7592..a32c576c1e65 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3873,7 +3873,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
3873 3873
3874 adapter->mac_table = kcalloc(hw->mac.rar_entry_count, 3874 adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
3875 sizeof(struct igb_mac_addr), 3875 sizeof(struct igb_mac_addr),
3876 GFP_ATOMIC); 3876 GFP_KERNEL);
3877 if (!adapter->mac_table) 3877 if (!adapter->mac_table)
3878 return -ENOMEM; 3878 return -ENOMEM;
3879 3879
@@ -3883,7 +3883,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
3883 3883
3884 /* Setup and initialize a copy of the hw vlan table array */ 3884 /* Setup and initialize a copy of the hw vlan table array */
3885 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), 3885 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
3886 GFP_ATOMIC); 3886 GFP_KERNEL);
3887 if (!adapter->shadow_vfta) 3887 if (!adapter->shadow_vfta)
3888 return -ENOMEM; 3888 return -ENOMEM;
3889 3889
@@ -5816,7 +5816,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
5816 5816
5817 if (skb->ip_summed != CHECKSUM_PARTIAL) { 5817 if (skb->ip_summed != CHECKSUM_PARTIAL) {
5818csum_failed: 5818csum_failed:
5819 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN)) 5819 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
5820 !tx_ring->launchtime_enable)
5820 return; 5821 return;
5821 goto no_csum; 5822 goto no_csum;
5822 } 5823 }
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 43664adf7a3c..d3e72d0f66ef 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -771,14 +771,13 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
771 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); 771 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
772 rxdr->size = ALIGN(rxdr->size, 4096); 772 rxdr->size = ALIGN(rxdr->size, 4096);
773 773
774 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 774 rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
775 GFP_KERNEL); 775 GFP_KERNEL);
776 776
777 if (!rxdr->desc) { 777 if (!rxdr->desc) {
778 vfree(rxdr->buffer_info); 778 vfree(rxdr->buffer_info);
779 return -ENOMEM; 779 return -ENOMEM;
780 } 780 }
781 memset(rxdr->desc, 0, rxdr->size);
782 781
783 rxdr->next_to_clean = 0; 782 rxdr->next_to_clean = 0;
784 rxdr->next_to_use = 0; 783 rxdr->next_to_use = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 94b3165ff543..ccd852ad62a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
192 } 192 }
193 193
194 /* alloc the udl from per cpu ddp pool */ 194 /* alloc the udl from per cpu ddp pool */
195 ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); 195 ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp);
196 if (!ddp->udl) { 196 if (!ddp->udl) {
197 e_err(drv, "failed allocated ddp context\n"); 197 e_err(drv, "failed allocated ddp context\n");
198 goto out_noddp_unmap; 198 goto out_noddp_unmap;
@@ -760,7 +760,7 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
760 return 0; 760 return 0;
761 761
762 /* Extra buffer to be shared by all DDPs for HW work around */ 762 /* Extra buffer to be shared by all DDPs for HW work around */
763 buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); 763 buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL);
764 if (!buffer) 764 if (!buffer)
765 return -ENOMEM; 765 return -ENOMEM;
766 766
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 447098005490..9a23d33a47ed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6201,7 +6201,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
6201 6201
6202 adapter->mac_table = kcalloc(hw->mac.num_rar_entries, 6202 adapter->mac_table = kcalloc(hw->mac.num_rar_entries,
6203 sizeof(struct ixgbe_mac_addr), 6203 sizeof(struct ixgbe_mac_addr),
6204 GFP_ATOMIC); 6204 GFP_KERNEL);
6205 if (!adapter->mac_table) 6205 if (!adapter->mac_table)
6206 return -ENOMEM; 6206 return -ENOMEM;
6207 6207
@@ -6620,8 +6620,18 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
6620 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6620 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6621 6621
6622 if (adapter->xdp_prog) { 6622 if (adapter->xdp_prog) {
6623 e_warn(probe, "MTU cannot be changed while XDP program is loaded\n"); 6623 int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
6624 return -EPERM; 6624 VLAN_HLEN;
6625 int i;
6626
6627 for (i = 0; i < adapter->num_rx_queues; i++) {
6628 struct ixgbe_ring *ring = adapter->rx_ring[i];
6629
6630 if (new_frame_size > ixgbe_rx_bufsz(ring)) {
6631 e_warn(probe, "Requested MTU size is not supported with XDP\n");
6632 return -EINVAL;
6633 }
6634 }
6625 } 6635 }
6626 6636
6627 /* 6637 /*
@@ -8983,6 +8993,15 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
8983 8993
8984#ifdef CONFIG_IXGBE_DCB 8994#ifdef CONFIG_IXGBE_DCB
8985 if (tc) { 8995 if (tc) {
8996 if (adapter->xdp_prog) {
8997 e_warn(probe, "DCB is not supported with XDP\n");
8998
8999 ixgbe_init_interrupt_scheme(adapter);
9000 if (netif_running(dev))
9001 ixgbe_open(dev);
9002 return -EINVAL;
9003 }
9004
8986 netdev_set_num_tc(dev, tc); 9005 netdev_set_num_tc(dev, tc);
8987 ixgbe_set_prio_tc_map(adapter); 9006 ixgbe_set_prio_tc_map(adapter);
8988 9007
@@ -9171,14 +9190,12 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
9171 struct tcf_exts *exts, u64 *action, u8 *queue) 9190 struct tcf_exts *exts, u64 *action, u8 *queue)
9172{ 9191{
9173 const struct tc_action *a; 9192 const struct tc_action *a;
9174 LIST_HEAD(actions); 9193 int i;
9175 9194
9176 if (!tcf_exts_has_actions(exts)) 9195 if (!tcf_exts_has_actions(exts))
9177 return -EINVAL; 9196 return -EINVAL;
9178 9197
9179 tcf_exts_to_list(exts, &actions); 9198 tcf_exts_for_each_action(i, a, exts) {
9180 list_for_each_entry(a, &actions, list) {
9181
9182 /* Drop action */ 9199 /* Drop action */
9183 if (is_tcf_gact_shot(a)) { 9200 if (is_tcf_gact_shot(a)) {
9184 *action = IXGBE_FDIR_DROP_QUEUE; 9201 *action = IXGBE_FDIR_DROP_QUEUE;
@@ -9936,6 +9953,11 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
9936 int tcs = adapter->hw_tcs ? : 1; 9953 int tcs = adapter->hw_tcs ? : 1;
9937 int pool, err; 9954 int pool, err;
9938 9955
9956 if (adapter->xdp_prog) {
9957 e_warn(probe, "L2FW offload is not supported with XDP\n");
9958 return ERR_PTR(-EINVAL);
9959 }
9960
9939 /* The hardware supported by ixgbe only filters on the destination MAC 9961 /* The hardware supported by ixgbe only filters on the destination MAC
9940 * address. In order to avoid issues we only support offloading modes 9962 * address. In order to avoid issues we only support offloading modes
9941 * where the hardware can actually provide the functionality. 9963 * where the hardware can actually provide the functionality.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 6f59933cdff7..3c6f01c41b78 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -53,6 +53,11 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
53 struct ixgbe_hw *hw = &adapter->hw; 53 struct ixgbe_hw *hw = &adapter->hw;
54 int i; 54 int i;
55 55
56 if (adapter->xdp_prog) {
57 e_warn(probe, "SRIOV is not supported with XDP\n");
58 return -EINVAL;
59 }
60
56 /* Enable VMDq flag so device will be set in VM mode */ 61 /* Enable VMDq flag so device will be set in VM mode */
57 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | 62 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED |
58 IXGBE_FLAG_VMDQ_ENABLED; 63 IXGBE_FLAG_VMDQ_ENABLED;
@@ -688,8 +693,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
688static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) 693static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
689{ 694{
690 struct ixgbe_hw *hw = &adapter->hw; 695 struct ixgbe_hw *hw = &adapter->hw;
696 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
691 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 697 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
698 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
692 u8 num_tcs = adapter->hw_tcs; 699 u8 num_tcs = adapter->hw_tcs;
700 u32 reg_val;
701 u32 queue;
702 u32 word;
693 703
694 /* remove VLAN filters beloning to this VF */ 704 /* remove VLAN filters beloning to this VF */
695 ixgbe_clear_vf_vlans(adapter, vf); 705 ixgbe_clear_vf_vlans(adapter, vf);
@@ -726,6 +736,27 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
726 736
727 /* reset VF api back to unknown */ 737 /* reset VF api back to unknown */
728 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; 738 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
739
740 /* Restart each queue for given VF */
741 for (queue = 0; queue < q_per_pool; queue++) {
742 unsigned int reg_idx = (vf * q_per_pool) + queue;
743
744 reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx));
745
746 /* Re-enabling only configured queues */
747 if (reg_val) {
748 reg_val |= IXGBE_TXDCTL_ENABLE;
749 IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
750 reg_val &= ~IXGBE_TXDCTL_ENABLE;
751 IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
752 }
753 }
754
755 /* Clear VF's mailbox memory */
756 for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
757 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
758
759 IXGBE_WRITE_FLUSH(hw);
729} 760}
730 761
731static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, 762static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 44cfb2021145..41bcbb337e83 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2518,6 +2518,7 @@ enum {
2518/* Translated register #defines */ 2518/* Translated register #defines */
2519#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) 2519#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P)))
2520#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) 2520#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P)))
2521#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P)))
2521#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) 2522#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
2522#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) 2523#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
2523 2524
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 7a637b51c7d2..e08301d833e2 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -274,6 +274,7 @@ ltq_etop_hw_init(struct net_device *dev)
274 struct ltq_etop_chan *ch = &priv->ch[i]; 274 struct ltq_etop_chan *ch = &priv->ch[i];
275 275
276 ch->idx = ch->dma.nr = i; 276 ch->idx = ch->dma.nr = i;
277 ch->dma.dev = &priv->pdev->dev;
277 278
278 if (IS_TX(i)) { 279 if (IS_TX(i)) {
279 ltq_dma_alloc_tx(&ch->dma); 280 ltq_dma_alloc_tx(&ch->dma);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 32d785b616e1..702fec82d806 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -58,6 +58,8 @@ static struct {
58 */ 58 */
59static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, 59static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
60 const struct phylink_link_state *state); 60 const struct phylink_link_state *state);
61static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
62 phy_interface_t interface, struct phy_device *phy);
61 63
62/* Queue modes */ 64/* Queue modes */
63#define MVPP2_QDIST_SINGLE_MODE 0 65#define MVPP2_QDIST_SINGLE_MODE 0
@@ -3142,6 +3144,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
3142 mvpp22_mode_reconfigure(port); 3144 mvpp22_mode_reconfigure(port);
3143 3145
3144 if (port->phylink) { 3146 if (port->phylink) {
3147 netif_carrier_off(port->dev);
3145 phylink_start(port->phylink); 3148 phylink_start(port->phylink);
3146 } else { 3149 } else {
3147 /* Phylink isn't used as of now for ACPI, so the MAC has to be 3150 /* Phylink isn't used as of now for ACPI, so the MAC has to be
@@ -3150,9 +3153,10 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
3150 */ 3153 */
3151 struct phylink_link_state state = { 3154 struct phylink_link_state state = {
3152 .interface = port->phy_interface, 3155 .interface = port->phy_interface,
3153 .link = 1,
3154 }; 3156 };
3155 mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state); 3157 mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state);
3158 mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface,
3159 NULL);
3156 } 3160 }
3157 3161
3158 netif_tx_start_all_queues(port->dev); 3162 netif_tx_start_all_queues(port->dev);
@@ -4495,10 +4499,6 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
4495 return; 4499 return;
4496 } 4500 }
4497 4501
4498 netif_tx_stop_all_queues(port->dev);
4499 if (!port->has_phy)
4500 netif_carrier_off(port->dev);
4501
4502 /* Make sure the port is disabled when reconfiguring the mode */ 4502 /* Make sure the port is disabled when reconfiguring the mode */
4503 mvpp2_port_disable(port); 4503 mvpp2_port_disable(port);
4504 4504
@@ -4523,16 +4523,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
4523 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) 4523 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
4524 mvpp2_port_loopback_set(port, state); 4524 mvpp2_port_loopback_set(port, state);
4525 4525
4526 /* If the port already was up, make sure it's still in the same state */ 4526 mvpp2_port_enable(port);
4527 if (state->link || !port->has_phy) {
4528 mvpp2_port_enable(port);
4529
4530 mvpp2_egress_enable(port);
4531 mvpp2_ingress_enable(port);
4532 if (!port->has_phy)
4533 netif_carrier_on(dev);
4534 netif_tx_wake_all_queues(dev);
4535 }
4536} 4527}
4537 4528
4538static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, 4529static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
@@ -4803,6 +4794,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
4803 dev->min_mtu = ETH_MIN_MTU; 4794 dev->min_mtu = ETH_MIN_MTU;
4804 /* 9704 == 9728 - 20 and rounding to 8 */ 4795 /* 9704 == 9728 - 20 and rounding to 8 */
4805 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; 4796 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
4797 dev->dev.of_node = port_node;
4806 4798
4807 /* Phylink isn't used w/ ACPI as of now */ 4799 /* Phylink isn't used w/ ACPI as of now */
4808 if (port_node) { 4800 if (port_node) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index b994b80d5714..37ba7c78859d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -132,11 +132,11 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
132 delayed_event_start(priv); 132 delayed_event_start(priv);
133 133
134 dev_ctx->context = intf->add(dev); 134 dev_ctx->context = intf->add(dev);
135 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
136 if (intf->attach)
137 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
138
139 if (dev_ctx->context) { 135 if (dev_ctx->context) {
136 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
137 if (intf->attach)
138 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
139
140 spin_lock_irq(&priv->ctx_lock); 140 spin_lock_irq(&priv->ctx_lock);
141 list_add_tail(&dev_ctx->list, &priv->ctx_list); 141 list_add_tail(&dev_ctx->list, &priv->ctx_list);
142 142
@@ -211,12 +211,17 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
211 if (intf->attach) { 211 if (intf->attach) {
212 if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) 212 if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
213 goto out; 213 goto out;
214 intf->attach(dev, dev_ctx->context); 214 if (intf->attach(dev, dev_ctx->context))
215 goto out;
216
215 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); 217 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
216 } else { 218 } else {
217 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) 219 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
218 goto out; 220 goto out;
219 dev_ctx->context = intf->add(dev); 221 dev_ctx->context = intf->add(dev);
222 if (!dev_ctx->context)
223 goto out;
224
220 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); 225 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
221 } 226 }
222 227
@@ -391,16 +396,17 @@ void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
391 } 396 }
392} 397}
393 398
394static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev) 399static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
395{ 400{
396 return (u16)((dev->pdev->bus->number << 8) | 401 return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
402 (dev->pdev->bus->number << 8) |
397 PCI_SLOT(dev->pdev->devfn)); 403 PCI_SLOT(dev->pdev->devfn));
398} 404}
399 405
400/* Must be called with intf_mutex held */ 406/* Must be called with intf_mutex held */
401struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) 407struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
402{ 408{
403 u16 pci_id = mlx5_gen_pci_id(dev); 409 u32 pci_id = mlx5_gen_pci_id(dev);
404 struct mlx5_core_dev *res = NULL; 410 struct mlx5_core_dev *res = NULL;
405 struct mlx5_core_dev *tmp_dev; 411 struct mlx5_core_dev *tmp_dev;
406 struct mlx5_priv *priv; 412 struct mlx5_priv *priv;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 75bb981e00b7..41cde926cdab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -191,7 +191,7 @@ set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
191{ 191{
192 if (psrc_m) { 192 if (psrc_m) {
193 MLX5E_FTE_SET(headers_c, udp_sport, 0xffff); 193 MLX5E_FTE_SET(headers_c, udp_sport, 0xffff);
194 MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v)); 194 MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
195 } 195 }
196 196
197 if (pdst_m) { 197 if (pdst_m) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9131a1376e7d..9fed54017659 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1982,14 +1982,15 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
1982 goto out_ok; 1982 goto out_ok;
1983 1983
1984 modify_ip_header = false; 1984 modify_ip_header = false;
1985 tcf_exts_to_list(exts, &actions); 1985 tcf_exts_for_each_action(i, a, exts) {
1986 list_for_each_entry(a, &actions, list) { 1986 int k;
1987
1987 if (!is_tcf_pedit(a)) 1988 if (!is_tcf_pedit(a))
1988 continue; 1989 continue;
1989 1990
1990 nkeys = tcf_pedit_nkeys(a); 1991 nkeys = tcf_pedit_nkeys(a);
1991 for (i = 0; i < nkeys; i++) { 1992 for (k = 0; k < nkeys; k++) {
1992 htype = tcf_pedit_htype(a, i); 1993 htype = tcf_pedit_htype(a, k);
1993 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 || 1994 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
1994 htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) { 1995 htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
1995 modify_ip_header = true; 1996 modify_ip_header = true;
@@ -2053,15 +2054,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2053 const struct tc_action *a; 2054 const struct tc_action *a;
2054 LIST_HEAD(actions); 2055 LIST_HEAD(actions);
2055 u32 action = 0; 2056 u32 action = 0;
2056 int err; 2057 int err, i;
2057 2058
2058 if (!tcf_exts_has_actions(exts)) 2059 if (!tcf_exts_has_actions(exts))
2059 return -EINVAL; 2060 return -EINVAL;
2060 2061
2061 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; 2062 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
2062 2063
2063 tcf_exts_to_list(exts, &actions); 2064 tcf_exts_for_each_action(i, a, exts) {
2064 list_for_each_entry(a, &actions, list) {
2065 if (is_tcf_gact_shot(a)) { 2065 if (is_tcf_gact_shot(a)) {
2066 action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 2066 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2067 if (MLX5_CAP_FLOWTABLE(priv->mdev, 2067 if (MLX5_CAP_FLOWTABLE(priv->mdev,
@@ -2666,7 +2666,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2666 LIST_HEAD(actions); 2666 LIST_HEAD(actions);
2667 bool encap = false; 2667 bool encap = false;
2668 u32 action = 0; 2668 u32 action = 0;
2669 int err; 2669 int err, i;
2670 2670
2671 if (!tcf_exts_has_actions(exts)) 2671 if (!tcf_exts_has_actions(exts))
2672 return -EINVAL; 2672 return -EINVAL;
@@ -2674,8 +2674,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2674 attr->in_rep = rpriv->rep; 2674 attr->in_rep = rpriv->rep;
2675 attr->in_mdev = priv->mdev; 2675 attr->in_mdev = priv->mdev;
2676 2676
2677 tcf_exts_to_list(exts, &actions); 2677 tcf_exts_for_each_action(i, a, exts) {
2678 list_for_each_entry(a, &actions, list) {
2679 if (is_tcf_gact_shot(a)) { 2678 if (is_tcf_gact_shot(a)) {
2680 action |= MLX5_FLOW_CONTEXT_ACTION_DROP | 2679 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
2681 MLX5_FLOW_CONTEXT_ACTION_COUNT; 2680 MLX5_FLOW_CONTEXT_ACTION_COUNT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f72b5c9dcfe9..3028e8d90920 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -663,6 +663,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
663 if (err) 663 if (err)
664 goto miss_rule_err; 664 goto miss_rule_err;
665 665
666 kvfree(flow_group_in);
666 return 0; 667 return 0;
667 668
668miss_rule_err: 669miss_rule_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index f418541af7cf..37d114c668b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1578,6 +1578,33 @@ static u64 matched_fgs_get_version(struct list_head *match_head)
1578 return version; 1578 return version;
1579} 1579}
1580 1580
1581static struct fs_fte *
1582lookup_fte_locked(struct mlx5_flow_group *g,
1583 u32 *match_value,
1584 bool take_write)
1585{
1586 struct fs_fte *fte_tmp;
1587
1588 if (take_write)
1589 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1590 else
1591 nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1592 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1593 rhash_fte);
1594 if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1595 fte_tmp = NULL;
1596 goto out;
1597 }
1598
1599 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1600out:
1601 if (take_write)
1602 up_write_ref_node(&g->node);
1603 else
1604 up_read_ref_node(&g->node);
1605 return fte_tmp;
1606}
1607
1581static struct mlx5_flow_handle * 1608static struct mlx5_flow_handle *
1582try_add_to_existing_fg(struct mlx5_flow_table *ft, 1609try_add_to_existing_fg(struct mlx5_flow_table *ft,
1583 struct list_head *match_head, 1610 struct list_head *match_head,
@@ -1600,10 +1627,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
1600 if (IS_ERR(fte)) 1627 if (IS_ERR(fte))
1601 return ERR_PTR(-ENOMEM); 1628 return ERR_PTR(-ENOMEM);
1602 1629
1603 list_for_each_entry(iter, match_head, list) {
1604 nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT);
1605 }
1606
1607search_again_locked: 1630search_again_locked:
1608 version = matched_fgs_get_version(match_head); 1631 version = matched_fgs_get_version(match_head);
1609 /* Try to find a fg that already contains a matching fte */ 1632 /* Try to find a fg that already contains a matching fte */
@@ -1611,20 +1634,9 @@ search_again_locked:
1611 struct fs_fte *fte_tmp; 1634 struct fs_fte *fte_tmp;
1612 1635
1613 g = iter->g; 1636 g = iter->g;
1614 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value, 1637 fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1615 rhash_fte); 1638 if (!fte_tmp)
1616 if (!fte_tmp || !tree_get_node(&fte_tmp->node))
1617 continue; 1639 continue;
1618
1619 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1620 if (!take_write) {
1621 list_for_each_entry(iter, match_head, list)
1622 up_read_ref_node(&iter->g->node);
1623 } else {
1624 list_for_each_entry(iter, match_head, list)
1625 up_write_ref_node(&iter->g->node);
1626 }
1627
1628 rule = add_rule_fg(g, spec->match_value, 1640 rule = add_rule_fg(g, spec->match_value,
1629 flow_act, dest, dest_num, fte_tmp); 1641 flow_act, dest, dest_num, fte_tmp);
1630 up_write_ref_node(&fte_tmp->node); 1642 up_write_ref_node(&fte_tmp->node);
@@ -1633,19 +1645,6 @@ search_again_locked:
1633 return rule; 1645 return rule;
1634 } 1646 }
1635 1647
1636 /* No group with matching fte found. Try to add a new fte to any
1637 * matching fg.
1638 */
1639
1640 if (!take_write) {
1641 list_for_each_entry(iter, match_head, list)
1642 up_read_ref_node(&iter->g->node);
1643 list_for_each_entry(iter, match_head, list)
1644 nested_down_write_ref_node(&iter->g->node,
1645 FS_LOCK_PARENT);
1646 take_write = true;
1647 }
1648
1649 /* Check the ft version, for case that new flow group 1648 /* Check the ft version, for case that new flow group
1650 * was added while the fgs weren't locked 1649 * was added while the fgs weren't locked
1651 */ 1650 */
@@ -1657,27 +1656,30 @@ search_again_locked:
1657 /* Check the fgs version, for case the new FTE with the 1656 /* Check the fgs version, for case the new FTE with the
1658 * same values was added while the fgs weren't locked 1657 * same values was added while the fgs weren't locked
1659 */ 1658 */
1660 if (version != matched_fgs_get_version(match_head)) 1659 if (version != matched_fgs_get_version(match_head)) {
1660 take_write = true;
1661 goto search_again_locked; 1661 goto search_again_locked;
1662 }
1662 1663
1663 list_for_each_entry(iter, match_head, list) { 1664 list_for_each_entry(iter, match_head, list) {
1664 g = iter->g; 1665 g = iter->g;
1665 1666
1666 if (!g->node.active) 1667 if (!g->node.active)
1667 continue; 1668 continue;
1669
1670 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1671
1668 err = insert_fte(g, fte); 1672 err = insert_fte(g, fte);
1669 if (err) { 1673 if (err) {
1674 up_write_ref_node(&g->node);
1670 if (err == -ENOSPC) 1675 if (err == -ENOSPC)
1671 continue; 1676 continue;
1672 list_for_each_entry(iter, match_head, list)
1673 up_write_ref_node(&iter->g->node);
1674 kmem_cache_free(steering->ftes_cache, fte); 1677 kmem_cache_free(steering->ftes_cache, fte);
1675 return ERR_PTR(err); 1678 return ERR_PTR(err);
1676 } 1679 }
1677 1680
1678 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); 1681 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1679 list_for_each_entry(iter, match_head, list) 1682 up_write_ref_node(&g->node);
1680 up_write_ref_node(&iter->g->node);
1681 rule = add_rule_fg(g, spec->match_value, 1683 rule = add_rule_fg(g, spec->match_value,
1682 flow_act, dest, dest_num, fte); 1684 flow_act, dest, dest_num, fte);
1683 up_write_ref_node(&fte->node); 1685 up_write_ref_node(&fte->node);
@@ -1686,8 +1688,6 @@ search_again_locked:
1686 } 1688 }
1687 rule = ERR_PTR(-ENOENT); 1689 rule = ERR_PTR(-ENOENT);
1688out: 1690out:
1689 list_for_each_entry(iter, match_head, list)
1690 up_write_ref_node(&iter->g->node);
1691 kmem_cache_free(steering->ftes_cache, fte); 1691 kmem_cache_free(steering->ftes_cache, fte);
1692 return rule; 1692 return rule;
1693} 1693}
@@ -1726,6 +1726,8 @@ search_again_locked:
1726 if (err) { 1726 if (err) {
1727 if (take_write) 1727 if (take_write)
1728 up_write_ref_node(&ft->node); 1728 up_write_ref_node(&ft->node);
1729 else
1730 up_read_ref_node(&ft->node);
1729 return ERR_PTR(err); 1731 return ERR_PTR(err);
1730 } 1732 }
1731 1733
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index d39b0b7011b2..9f39aeca863f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -331,9 +331,17 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
331 add_timer(&health->timer); 331 add_timer(&health->timer);
332} 332}
333 333
334void mlx5_stop_health_poll(struct mlx5_core_dev *dev) 334void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
335{ 335{
336 struct mlx5_core_health *health = &dev->priv.health; 336 struct mlx5_core_health *health = &dev->priv.health;
337 unsigned long flags;
338
339 if (disable_health) {
340 spin_lock_irqsave(&health->wq_lock, flags);
341 set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
342 set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
343 spin_unlock_irqrestore(&health->wq_lock, flags);
344 }
337 345
338 del_timer_sync(&health->timer); 346 del_timer_sync(&health->timer);
339} 347}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index cf3e4a659052..b5e9f664fc66 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -878,8 +878,10 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
878 priv->numa_node = dev_to_node(&dev->pdev->dev); 878 priv->numa_node = dev_to_node(&dev->pdev->dev);
879 879
880 priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); 880 priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
881 if (!priv->dbg_root) 881 if (!priv->dbg_root) {
882 dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n");
882 return -ENOMEM; 883 return -ENOMEM;
884 }
883 885
884 err = mlx5_pci_enable_device(dev); 886 err = mlx5_pci_enable_device(dev);
885 if (err) { 887 if (err) {
@@ -928,7 +930,7 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
928 pci_clear_master(dev->pdev); 930 pci_clear_master(dev->pdev);
929 release_bar(dev->pdev); 931 release_bar(dev->pdev);
930 mlx5_pci_disable_device(dev); 932 mlx5_pci_disable_device(dev);
931 debugfs_remove(priv->dbg_root); 933 debugfs_remove_recursive(priv->dbg_root);
932} 934}
933 935
934static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 936static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
@@ -1286,7 +1288,7 @@ err_cleanup_once:
1286 mlx5_cleanup_once(dev); 1288 mlx5_cleanup_once(dev);
1287 1289
1288err_stop_poll: 1290err_stop_poll:
1289 mlx5_stop_health_poll(dev); 1291 mlx5_stop_health_poll(dev, boot);
1290 if (mlx5_cmd_teardown_hca(dev)) { 1292 if (mlx5_cmd_teardown_hca(dev)) {
1291 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); 1293 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
1292 goto out_err; 1294 goto out_err;
@@ -1346,7 +1348,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1346 mlx5_free_irq_vectors(dev); 1348 mlx5_free_irq_vectors(dev);
1347 if (cleanup) 1349 if (cleanup)
1348 mlx5_cleanup_once(dev); 1350 mlx5_cleanup_once(dev);
1349 mlx5_stop_health_poll(dev); 1351 mlx5_stop_health_poll(dev, cleanup);
1350 err = mlx5_cmd_teardown_hca(dev); 1352 err = mlx5_cmd_teardown_hca(dev);
1351 if (err) { 1353 if (err) {
1352 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); 1354 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
@@ -1608,7 +1610,7 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
1608 * with the HCA, so the health polll is no longer needed. 1610 * with the HCA, so the health polll is no longer needed.
1609 */ 1611 */
1610 mlx5_drain_health_wq(dev); 1612 mlx5_drain_health_wq(dev);
1611 mlx5_stop_health_poll(dev); 1613 mlx5_stop_health_poll(dev, false);
1612 1614
1613 ret = mlx5_cmd_force_teardown_hca(dev); 1615 ret = mlx5_cmd_force_teardown_hca(dev);
1614 if (ret) { 1616 if (ret) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 86478a6b99c5..68e7f8df2a6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -39,9 +39,9 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
39 return (u32)wq->fbc.sz_m1 + 1; 39 return (u32)wq->fbc.sz_m1 + 1;
40} 40}
41 41
42u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq) 42u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
43{ 43{
44 return (u32)wq->fbc.frag_sz_m1 + 1; 44 return wq->fbc.frag_sz_m1 + 1;
45} 45}
46 46
47u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) 47u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
@@ -138,15 +138,16 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
138 void *qpc, struct mlx5_wq_qp *wq, 138 void *qpc, struct mlx5_wq_qp *wq,
139 struct mlx5_wq_ctrl *wq_ctrl) 139 struct mlx5_wq_ctrl *wq_ctrl)
140{ 140{
141 u32 sq_strides_offset; 141 u16 sq_strides_offset;
142 u32 rq_pg_remainder;
142 int err; 143 int err;
143 144
144 mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, 145 mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
145 MLX5_GET(qpc, qpc, log_rq_size), 146 MLX5_GET(qpc, qpc, log_rq_size),
146 &wq->rq.fbc); 147 &wq->rq.fbc);
147 148
148 sq_strides_offset = 149 rq_pg_remainder = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE;
149 ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB; 150 sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB;
150 151
151 mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB), 152 mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
152 MLX5_GET(qpc, qpc, log_sq_size), 153 MLX5_GET(qpc, qpc, log_sq_size),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 2bd4c3184eba..3a1a170bb2d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -80,7 +80,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
80 void *wqc, struct mlx5_wq_cyc *wq, 80 void *wqc, struct mlx5_wq_cyc *wq,
81 struct mlx5_wq_ctrl *wq_ctrl); 81 struct mlx5_wq_ctrl *wq_ctrl);
82u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); 82u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
83u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq); 83u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
84 84
85int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, 85int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
86 void *qpc, struct mlx5_wq_qp *wq, 86 void *qpc, struct mlx5_wq_qp *wq,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 6070d1591d1e..930700413b1d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1346,8 +1346,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1346 return -ENOMEM; 1346 return -ENOMEM;
1347 mall_tc_entry->cookie = f->cookie; 1347 mall_tc_entry->cookie = f->cookie;
1348 1348
1349 tcf_exts_to_list(f->exts, &actions); 1349 a = tcf_exts_first_action(f->exts);
1350 a = list_first_entry(&actions, struct tc_action, list);
1351 1350
1352 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 1351 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1353 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1352 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3ae930196741..3cdb7aca90b7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -414,6 +414,8 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
414void 414void
415mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); 415mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
416void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); 416void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
417void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
418 struct net_device *dev);
417 419
418/* spectrum_kvdl.c */ 420/* spectrum_kvdl.c */
419enum mlxsw_sp_kvdl_entry_type { 421enum mlxsw_sp_kvdl_entry_type {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 4327487553c5..3589432d1643 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -337,14 +337,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
337 MLXSW_SP_SB_CM(1500, 9, 0), 337 MLXSW_SP_SB_CM(1500, 9, 0),
338 MLXSW_SP_SB_CM(1500, 9, 0), 338 MLXSW_SP_SB_CM(1500, 9, 0),
339 MLXSW_SP_SB_CM(1500, 9, 0), 339 MLXSW_SP_SB_CM(1500, 9, 0),
340 MLXSW_SP_SB_CM(0, 0, 0), 340 MLXSW_SP_SB_CM(0, 140000, 15),
341 MLXSW_SP_SB_CM(0, 0, 0), 341 MLXSW_SP_SB_CM(0, 140000, 15),
342 MLXSW_SP_SB_CM(0, 0, 0), 342 MLXSW_SP_SB_CM(0, 140000, 15),
343 MLXSW_SP_SB_CM(0, 0, 0), 343 MLXSW_SP_SB_CM(0, 140000, 15),
344 MLXSW_SP_SB_CM(0, 0, 0), 344 MLXSW_SP_SB_CM(0, 140000, 15),
345 MLXSW_SP_SB_CM(0, 0, 0), 345 MLXSW_SP_SB_CM(0, 140000, 15),
346 MLXSW_SP_SB_CM(0, 0, 0), 346 MLXSW_SP_SB_CM(0, 140000, 15),
347 MLXSW_SP_SB_CM(0, 0, 0), 347 MLXSW_SP_SB_CM(0, 140000, 15),
348 MLXSW_SP_SB_CM(1, 0xff, 0), 348 MLXSW_SP_SB_CM(1, 0xff, 0),
349}; 349};
350 350
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index ebd1b24ebaa5..8d211972c5e9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -21,8 +21,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
21 struct netlink_ext_ack *extack) 21 struct netlink_ext_ack *extack)
22{ 22{
23 const struct tc_action *a; 23 const struct tc_action *a;
24 LIST_HEAD(actions); 24 int err, i;
25 int err;
26 25
27 if (!tcf_exts_has_actions(exts)) 26 if (!tcf_exts_has_actions(exts))
28 return 0; 27 return 0;
@@ -32,8 +31,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
32 if (err) 31 if (err)
33 return err; 32 return err;
34 33
35 tcf_exts_to_list(exts, &actions); 34 tcf_exts_for_each_action(i, a, exts) {
36 list_for_each_entry(a, &actions, list) {
37 if (is_tcf_gact_ok(a)) { 35 if (is_tcf_gact_ok(a)) {
38 err = mlxsw_sp_acl_rulei_act_terminate(rulei); 36 err = mlxsw_sp_acl_rulei_act_terminate(rulei);
39 if (err) { 37 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 3a96307f51b0..2ab9cf25a08a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -6234,6 +6234,17 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
6234 mlxsw_sp_vr_put(mlxsw_sp, vr); 6234 mlxsw_sp_vr_put(mlxsw_sp, vr);
6235} 6235}
6236 6236
6237void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
6238 struct net_device *dev)
6239{
6240 struct mlxsw_sp_rif *rif;
6241
6242 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6243 if (!rif)
6244 return;
6245 mlxsw_sp_rif_destroy(rif);
6246}
6247
6237static void 6248static void
6238mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params, 6249mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6239 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 6250 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 0d8444aaba01..db715da7bab7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -127,6 +127,24 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
127 return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 127 return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
128} 128}
129 129
130static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
131 void *data)
132{
133 struct mlxsw_sp *mlxsw_sp = data;
134
135 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
136 return 0;
137}
138
139static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
140 struct net_device *dev)
141{
142 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
143 netdev_walk_all_upper_dev_rcu(dev,
144 mlxsw_sp_bridge_device_upper_rif_destroy,
145 mlxsw_sp);
146}
147
130static struct mlxsw_sp_bridge_device * 148static struct mlxsw_sp_bridge_device *
131mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, 149mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
132 struct net_device *br_dev) 150 struct net_device *br_dev)
@@ -165,6 +183,8 @@ static void
165mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, 183mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
166 struct mlxsw_sp_bridge_device *bridge_device) 184 struct mlxsw_sp_bridge_device *bridge_device)
167{ 185{
186 mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
187 bridge_device->dev);
168 list_del(&bridge_device->list); 188 list_del(&bridge_device->list);
169 if (bridge_device->vlan_enabled) 189 if (bridge_device->vlan_enabled)
170 bridge->vlan_enabled_exists = false; 190 bridge->vlan_enabled_exists = false;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index e7dce79ff2c9..001b5f714c1b 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -2850,7 +2850,7 @@ static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
2850 lan743x_hardware_cleanup(adapter); 2850 lan743x_hardware_cleanup(adapter);
2851} 2851}
2852 2852
2853#ifdef CONFIG_PM 2853#ifdef CONFIG_PM_SLEEP
2854static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len) 2854static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
2855{ 2855{
2856 return bitrev16(crc16(0xFFFF, buf, len)); 2856 return bitrev16(crc16(0xFFFF, buf, len));
@@ -3016,7 +3016,7 @@ static int lan743x_pm_resume(struct device *dev)
3016static const struct dev_pm_ops lan743x_pm_ops = { 3016static const struct dev_pm_ops lan743x_pm_ops = {
3017 SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume) 3017 SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
3018}; 3018};
3019#endif /*CONFIG_PM */ 3019#endif /* CONFIG_PM_SLEEP */
3020 3020
3021static const struct pci_device_id lan743x_pcidev_tbl[] = { 3021static const struct pci_device_id lan743x_pcidev_tbl[] = {
3022 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, 3022 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
@@ -3028,7 +3028,7 @@ static struct pci_driver lan743x_pcidev_driver = {
3028 .id_table = lan743x_pcidev_tbl, 3028 .id_table = lan743x_pcidev_tbl,
3029 .probe = lan743x_pcidev_probe, 3029 .probe = lan743x_pcidev_probe,
3030 .remove = lan743x_pcidev_remove, 3030 .remove = lan743x_pcidev_remove,
3031#ifdef CONFIG_PM 3031#ifdef CONFIG_PM_SLEEP
3032 .driver.pm = &lan743x_pm_ops, 3032 .driver.pm = &lan743x_pm_ops,
3033#endif 3033#endif
3034 .shutdown = lan743x_pcidev_shutdown, 3034 .shutdown = lan743x_pcidev_shutdown,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 0ba0356ec4e6..46ba0cf257c6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -52,6 +52,7 @@
52#define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01) 52#define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01)
53#define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04) 53#define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04)
54#define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800) 54#define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800)
55#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX
55#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \ 56#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
56 NFP_FL_TUNNEL_KEY | \ 57 NFP_FL_TUNNEL_KEY | \
57 NFP_FL_TUNNEL_GENEVE_OPT) 58 NFP_FL_TUNNEL_GENEVE_OPT)
@@ -741,11 +742,16 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
741 nfp_fl_push_vlan(psh_v, a); 742 nfp_fl_push_vlan(psh_v, a);
742 *a_len += sizeof(struct nfp_fl_push_vlan); 743 *a_len += sizeof(struct nfp_fl_push_vlan);
743 } else if (is_tcf_tunnel_set(a)) { 744 } else if (is_tcf_tunnel_set(a)) {
745 struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
744 struct nfp_repr *repr = netdev_priv(netdev); 746 struct nfp_repr *repr = netdev_priv(netdev);
747
745 *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a); 748 *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
746 if (*tun_type == NFP_FL_TUNNEL_NONE) 749 if (*tun_type == NFP_FL_TUNNEL_NONE)
747 return -EOPNOTSUPP; 750 return -EOPNOTSUPP;
748 751
752 if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
753 return -EOPNOTSUPP;
754
749 /* Pre-tunnel action is required for tunnel encap. 755 /* Pre-tunnel action is required for tunnel encap.
750 * This checks for next hop entries on NFP. 756 * This checks for next hop entries on NFP.
751 * If none, the packet falls back before applying other actions. 757 * If none, the packet falls back before applying other actions.
@@ -796,11 +802,10 @@ int nfp_flower_compile_action(struct nfp_app *app,
796 struct net_device *netdev, 802 struct net_device *netdev,
797 struct nfp_fl_payload *nfp_flow) 803 struct nfp_fl_payload *nfp_flow)
798{ 804{
799 int act_len, act_cnt, err, tun_out_cnt, out_cnt; 805 int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
800 enum nfp_flower_tun_type tun_type; 806 enum nfp_flower_tun_type tun_type;
801 const struct tc_action *a; 807 const struct tc_action *a;
802 u32 csum_updated = 0; 808 u32 csum_updated = 0;
803 LIST_HEAD(actions);
804 809
805 memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); 810 memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
806 nfp_flow->meta.act_len = 0; 811 nfp_flow->meta.act_len = 0;
@@ -810,8 +815,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
810 tun_out_cnt = 0; 815 tun_out_cnt = 0;
811 out_cnt = 0; 816 out_cnt = 0;
812 817
813 tcf_exts_to_list(flow->exts, &actions); 818 tcf_exts_for_each_action(i, a, flow->exts) {
814 list_for_each_entry(a, &actions, list) {
815 err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len, 819 err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
816 netdev, &tun_type, &tun_out_cnt, 820 netdev, &tun_type, &tun_out_cnt,
817 &out_cnt, &csum_updated); 821 &out_cnt, &csum_updated);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 85f8209bf007..81d941ab895c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -70,6 +70,7 @@ struct nfp_app;
70#define NFP_FL_FEATS_GENEVE BIT(0) 70#define NFP_FL_FEATS_GENEVE BIT(0)
71#define NFP_FL_NBI_MTU_SETTING BIT(1) 71#define NFP_FL_NBI_MTU_SETTING BIT(1)
72#define NFP_FL_FEATS_GENEVE_OPT BIT(2) 72#define NFP_FL_FEATS_GENEVE_OPT BIT(2)
73#define NFP_FL_FEATS_VLAN_PCP BIT(3)
73#define NFP_FL_FEATS_LAG BIT(31) 74#define NFP_FL_FEATS_LAG BIT(31)
74 75
75struct nfp_fl_mask_id { 76struct nfp_fl_mask_id {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index a0c72f277faa..17acb8cc6044 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -56,7 +56,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
56 FLOW_DISSECTOR_KEY_VLAN, 56 FLOW_DISSECTOR_KEY_VLAN,
57 target); 57 target);
58 /* Populate the tci field. */ 58 /* Populate the tci field. */
59 if (flow_vlan->vlan_id) { 59 if (flow_vlan->vlan_id || flow_vlan->vlan_priority) {
60 tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, 60 tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
61 flow_vlan->vlan_priority) | 61 flow_vlan->vlan_priority) |
62 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, 62 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 2edab01c3beb..bd19624f10cf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -192,6 +192,17 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
192 key_size += sizeof(struct nfp_flower_mac_mpls); 192 key_size += sizeof(struct nfp_flower_mac_mpls);
193 } 193 }
194 194
195 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
196 struct flow_dissector_key_vlan *flow_vlan;
197
198 flow_vlan = skb_flow_dissector_target(flow->dissector,
199 FLOW_DISSECTOR_KEY_VLAN,
200 flow->mask);
201 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
202 flow_vlan->vlan_priority)
203 return -EOPNOTSUPP;
204 }
205
195 if (dissector_uses_key(flow->dissector, 206 if (dissector_uses_key(flow->dissector,
196 FLOW_DISSECTOR_KEY_ENC_CONTROL)) { 207 FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
197 struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL; 208 struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index a8b9fbab5f73..253bdaef1505 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -229,29 +229,16 @@ done:
229 spin_unlock_bh(&nn->reconfig_lock); 229 spin_unlock_bh(&nn->reconfig_lock);
230} 230}
231 231
232/** 232static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
233 * nfp_net_reconfig() - Reconfigure the firmware
234 * @nn: NFP Net device to reconfigure
235 * @update: The value for the update field in the BAR config
236 *
237 * Write the update word to the BAR and ping the reconfig queue. The
238 * poll until the firmware has acknowledged the update by zeroing the
239 * update word.
240 *
241 * Return: Negative errno on error, 0 on success
242 */
243int nfp_net_reconfig(struct nfp_net *nn, u32 update)
244{ 233{
245 bool cancelled_timer = false; 234 bool cancelled_timer = false;
246 u32 pre_posted_requests; 235 u32 pre_posted_requests;
247 int ret;
248 236
249 spin_lock_bh(&nn->reconfig_lock); 237 spin_lock_bh(&nn->reconfig_lock);
250 238
251 nn->reconfig_sync_present = true; 239 nn->reconfig_sync_present = true;
252 240
253 if (nn->reconfig_timer_active) { 241 if (nn->reconfig_timer_active) {
254 del_timer(&nn->reconfig_timer);
255 nn->reconfig_timer_active = false; 242 nn->reconfig_timer_active = false;
256 cancelled_timer = true; 243 cancelled_timer = true;
257 } 244 }
@@ -260,14 +247,43 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
260 247
261 spin_unlock_bh(&nn->reconfig_lock); 248 spin_unlock_bh(&nn->reconfig_lock);
262 249
263 if (cancelled_timer) 250 if (cancelled_timer) {
251 del_timer_sync(&nn->reconfig_timer);
264 nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires); 252 nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
253 }
265 254
266 /* Run the posted reconfigs which were issued before we started */ 255 /* Run the posted reconfigs which were issued before we started */
267 if (pre_posted_requests) { 256 if (pre_posted_requests) {
268 nfp_net_reconfig_start(nn, pre_posted_requests); 257 nfp_net_reconfig_start(nn, pre_posted_requests);
269 nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); 258 nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
270 } 259 }
260}
261
262static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
263{
264 nfp_net_reconfig_sync_enter(nn);
265
266 spin_lock_bh(&nn->reconfig_lock);
267 nn->reconfig_sync_present = false;
268 spin_unlock_bh(&nn->reconfig_lock);
269}
270
271/**
272 * nfp_net_reconfig() - Reconfigure the firmware
273 * @nn: NFP Net device to reconfigure
274 * @update: The value for the update field in the BAR config
275 *
276 * Write the update word to the BAR and ping the reconfig queue. The
277 * poll until the firmware has acknowledged the update by zeroing the
278 * update word.
279 *
280 * Return: Negative errno on error, 0 on success
281 */
282int nfp_net_reconfig(struct nfp_net *nn, u32 update)
283{
284 int ret;
285
286 nfp_net_reconfig_sync_enter(nn);
271 287
272 nfp_net_reconfig_start(nn, update); 288 nfp_net_reconfig_start(nn, update);
273 ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); 289 ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
@@ -3633,6 +3649,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
3633 */ 3649 */
3634void nfp_net_free(struct nfp_net *nn) 3650void nfp_net_free(struct nfp_net *nn)
3635{ 3651{
3652 WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
3636 if (nn->dp.netdev) 3653 if (nn->dp.netdev)
3637 free_netdev(nn->dp.netdev); 3654 free_netdev(nn->dp.netdev);
3638 else 3655 else
@@ -3920,4 +3937,5 @@ void nfp_net_clean(struct nfp_net *nn)
3920 return; 3937 return;
3921 3938
3922 unregister_netdev(nn->dp.netdev); 3939 unregister_netdev(nn->dp.netdev);
3940 nfp_net_reconfig_wait_posted(nn);
3923} 3941}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index d9ab5add27a8..34193c2f1699 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -407,7 +407,7 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
407 407
408 if (i == QED_INIT_MAX_POLL_COUNT) { 408 if (i == QED_INIT_MAX_POLL_COUNT) {
409 DP_ERR(p_hwfn, 409 DP_ERR(p_hwfn,
410 "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n", 410 "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
411 addr, le32_to_cpu(cmd->expected_val), 411 addr, le32_to_cpu(cmd->expected_val),
412 val, le32_to_cpu(cmd->op_data)); 412 val, le32_to_cpu(cmd->op_data));
413 } 413 }
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index d89a0e22f6e4..5d37ec7e9b0b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -48,7 +48,7 @@
48#include "qed_reg_addr.h" 48#include "qed_reg_addr.h"
49#include "qed_sriov.h" 49#include "qed_sriov.h"
50 50
51#define CHIP_MCP_RESP_ITER_US 10 51#define QED_MCP_RESP_ITER_US 10
52 52
53#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ 53#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
54#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ 54#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
@@ -183,18 +183,57 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn)
183 return 0; 183 return 0;
184} 184}
185 185
186/* Maximum of 1 sec to wait for the SHMEM ready indication */
187#define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
188#define QED_MCP_SHMEM_RDY_ITER_MS 50
189
186static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 190static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
187{ 191{
188 struct qed_mcp_info *p_info = p_hwfn->mcp_info; 192 struct qed_mcp_info *p_info = p_hwfn->mcp_info;
193 u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
194 u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
189 u32 drv_mb_offsize, mfw_mb_offsize; 195 u32 drv_mb_offsize, mfw_mb_offsize;
190 u32 mcp_pf_id = MCP_PF_ID(p_hwfn); 196 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
191 197
192 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); 198 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
193 if (!p_info->public_base) 199 if (!p_info->public_base) {
194 return 0; 200 DP_NOTICE(p_hwfn,
201 "The address of the MCP scratch-pad is not configured\n");
202 return -EINVAL;
203 }
195 204
196 p_info->public_base |= GRCBASE_MCP; 205 p_info->public_base |= GRCBASE_MCP;
197 206
207 /* Get the MFW MB address and number of supported messages */
208 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
209 SECTION_OFFSIZE_ADDR(p_info->public_base,
210 PUBLIC_MFW_MB));
211 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
212 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
213 p_info->mfw_mb_addr +
214 offsetof(struct public_mfw_mb,
215 sup_msgs));
216
217 /* The driver can notify that there was an MCP reset, and might read the
218 * SHMEM values before the MFW has completed initializing them.
219 * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
220 * data ready indication.
221 */
222 while (!p_info->mfw_mb_length && --cnt) {
223 msleep(msec);
224 p_info->mfw_mb_length =
225 (u16)qed_rd(p_hwfn, p_ptt,
226 p_info->mfw_mb_addr +
227 offsetof(struct public_mfw_mb, sup_msgs));
228 }
229
230 if (!cnt) {
231 DP_NOTICE(p_hwfn,
232 "Failed to get the SHMEM ready notification after %d msec\n",
233 QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
234 return -EBUSY;
235 }
236
198 /* Calculate the driver and MFW mailbox address */ 237 /* Calculate the driver and MFW mailbox address */
199 drv_mb_offsize = qed_rd(p_hwfn, p_ptt, 238 drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
200 SECTION_OFFSIZE_ADDR(p_info->public_base, 239 SECTION_OFFSIZE_ADDR(p_info->public_base,
@@ -204,13 +243,6 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
204 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", 243 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
205 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); 244 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
206 245
207 /* Set the MFW MB address */
208 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
209 SECTION_OFFSIZE_ADDR(p_info->public_base,
210 PUBLIC_MFW_MB));
211 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
212 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
213
214 /* Get the current driver mailbox sequence before sending 246 /* Get the current driver mailbox sequence before sending
215 * the first command 247 * the first command
216 */ 248 */
@@ -285,9 +317,15 @@ static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
285 317
286int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 318int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
287{ 319{
288 u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0; 320 u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
289 int rc = 0; 321 int rc = 0;
290 322
323 if (p_hwfn->mcp_info->b_block_cmd) {
324 DP_NOTICE(p_hwfn,
325 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
326 return -EBUSY;
327 }
328
291 /* Ensure that only a single thread is accessing the mailbox */ 329 /* Ensure that only a single thread is accessing the mailbox */
292 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); 330 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
293 331
@@ -413,14 +451,41 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
413 (p_mb_params->cmd | seq_num), p_mb_params->param); 451 (p_mb_params->cmd | seq_num), p_mb_params->param);
414} 452}
415 453
454static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
455{
456 p_hwfn->mcp_info->b_block_cmd = block_cmd;
457
458 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
459 block_cmd ? "Block" : "Unblock");
460}
461
462static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
463 struct qed_ptt *p_ptt)
464{
465 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
466 u32 delay = QED_MCP_RESP_ITER_US;
467
468 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
469 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
470 cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
471 udelay(delay);
472 cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
473 udelay(delay);
474 cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
475
476 DP_NOTICE(p_hwfn,
477 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
478 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
479}
480
416static int 481static int
417_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, 482_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
418 struct qed_ptt *p_ptt, 483 struct qed_ptt *p_ptt,
419 struct qed_mcp_mb_params *p_mb_params, 484 struct qed_mcp_mb_params *p_mb_params,
420 u32 max_retries, u32 delay) 485 u32 max_retries, u32 usecs)
421{ 486{
487 u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
422 struct qed_mcp_cmd_elem *p_cmd_elem; 488 struct qed_mcp_cmd_elem *p_cmd_elem;
423 u32 cnt = 0;
424 u16 seq_num; 489 u16 seq_num;
425 int rc = 0; 490 int rc = 0;
426 491
@@ -443,7 +508,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
443 goto err; 508 goto err;
444 509
445 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); 510 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
446 udelay(delay); 511
512 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
513 msleep(msecs);
514 else
515 udelay(usecs);
447 } while (++cnt < max_retries); 516 } while (++cnt < max_retries);
448 517
449 if (cnt >= max_retries) { 518 if (cnt >= max_retries) {
@@ -472,7 +541,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
472 * The spinlock stays locked until the list element is removed. 541 * The spinlock stays locked until the list element is removed.
473 */ 542 */
474 543
475 udelay(delay); 544 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
545 msleep(msecs);
546 else
547 udelay(usecs);
548
476 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); 549 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
477 550
478 if (p_cmd_elem->b_is_completed) 551 if (p_cmd_elem->b_is_completed)
@@ -491,11 +564,15 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
491 DP_NOTICE(p_hwfn, 564 DP_NOTICE(p_hwfn,
492 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", 565 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
493 p_mb_params->cmd, p_mb_params->param); 566 p_mb_params->cmd, p_mb_params->param);
567 qed_mcp_print_cpu_info(p_hwfn, p_ptt);
494 568
495 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); 569 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
496 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 570 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
497 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); 571 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
498 572
573 if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
574 qed_mcp_cmd_set_blocking(p_hwfn, true);
575
499 return -EAGAIN; 576 return -EAGAIN;
500 } 577 }
501 578
@@ -507,7 +584,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
507 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", 584 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
508 p_mb_params->mcp_resp, 585 p_mb_params->mcp_resp,
509 p_mb_params->mcp_param, 586 p_mb_params->mcp_param,
510 (cnt * delay) / 1000, (cnt * delay) % 1000); 587 (cnt * usecs) / 1000, (cnt * usecs) % 1000);
511 588
512 /* Clear the sequence number from the MFW response */ 589 /* Clear the sequence number from the MFW response */
513 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; 590 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
@@ -525,7 +602,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
525{ 602{
526 size_t union_data_size = sizeof(union drv_union_data); 603 size_t union_data_size = sizeof(union drv_union_data);
527 u32 max_retries = QED_DRV_MB_MAX_RETRIES; 604 u32 max_retries = QED_DRV_MB_MAX_RETRIES;
528 u32 delay = CHIP_MCP_RESP_ITER_US; 605 u32 usecs = QED_MCP_RESP_ITER_US;
529 606
530 /* MCP not initialized */ 607 /* MCP not initialized */
531 if (!qed_mcp_is_init(p_hwfn)) { 608 if (!qed_mcp_is_init(p_hwfn)) {
@@ -533,6 +610,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
533 return -EBUSY; 610 return -EBUSY;
534 } 611 }
535 612
613 if (p_hwfn->mcp_info->b_block_cmd) {
614 DP_NOTICE(p_hwfn,
615 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
616 p_mb_params->cmd, p_mb_params->param);
617 return -EBUSY;
618 }
619
536 if (p_mb_params->data_src_size > union_data_size || 620 if (p_mb_params->data_src_size > union_data_size ||
537 p_mb_params->data_dst_size > union_data_size) { 621 p_mb_params->data_dst_size > union_data_size) {
538 DP_ERR(p_hwfn, 622 DP_ERR(p_hwfn,
@@ -542,8 +626,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
542 return -EINVAL; 626 return -EINVAL;
543 } 627 }
544 628
629 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
630 max_retries = DIV_ROUND_UP(max_retries, 1000);
631 usecs *= 1000;
632 }
633
545 return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, 634 return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
546 delay); 635 usecs);
547} 636}
548 637
549int qed_mcp_cmd(struct qed_hwfn *p_hwfn, 638int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
@@ -761,6 +850,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
761 mb_params.data_src_size = sizeof(load_req); 850 mb_params.data_src_size = sizeof(load_req);
762 mb_params.p_data_dst = &load_rsp; 851 mb_params.p_data_dst = &load_rsp;
763 mb_params.data_dst_size = sizeof(load_rsp); 852 mb_params.data_dst_size = sizeof(load_rsp);
853 mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
764 854
765 DP_VERBOSE(p_hwfn, QED_MSG_SP, 855 DP_VERBOSE(p_hwfn, QED_MSG_SP,
766 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", 856 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
@@ -982,7 +1072,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
982 1072
983int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1073int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
984{ 1074{
985 u32 wol_param, mcp_resp, mcp_param; 1075 struct qed_mcp_mb_params mb_params;
1076 u32 wol_param;
986 1077
987 switch (p_hwfn->cdev->wol_config) { 1078 switch (p_hwfn->cdev->wol_config) {
988 case QED_OV_WOL_DISABLED: 1079 case QED_OV_WOL_DISABLED:
@@ -1000,8 +1091,12 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1000 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; 1091 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1001 } 1092 }
1002 1093
1003 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param, 1094 memset(&mb_params, 0, sizeof(mb_params));
1004 &mcp_resp, &mcp_param); 1095 mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
1096 mb_params.param = wol_param;
1097 mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
1098
1099 return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1005} 1100}
1006 1101
1007int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1102int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -2077,31 +2172,65 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
2077 return rc; 2172 return rc;
2078} 2173}
2079 2174
2175/* A maximal 100 msec waiting time for the MCP to halt */
2176#define QED_MCP_HALT_SLEEP_MS 10
2177#define QED_MCP_HALT_MAX_RETRIES 10
2178
2080int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2179int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2081{ 2180{
2082 u32 resp = 0, param = 0; 2181 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2083 int rc; 2182 int rc;
2084 2183
2085 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, 2184 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2086 &param); 2185 &param);
2087 if (rc) 2186 if (rc) {
2088 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2187 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2188 return rc;
2189 }
2089 2190
2090 return rc; 2191 do {
2192 msleep(QED_MCP_HALT_SLEEP_MS);
2193 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2194 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2195 break;
2196 } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
2197
2198 if (cnt == QED_MCP_HALT_MAX_RETRIES) {
2199 DP_NOTICE(p_hwfn,
2200 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2201 qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2202 return -EBUSY;
2203 }
2204
2205 qed_mcp_cmd_set_blocking(p_hwfn, true);
2206
2207 return 0;
2091} 2208}
2092 2209
2210#define QED_MCP_RESUME_SLEEP_MS 10
2211
2093int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2212int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2094{ 2213{
2095 u32 value, cpu_mode; 2214 u32 cpu_mode, cpu_state;
2096 2215
2097 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); 2216 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2098 2217
2099 value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2100 value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2101 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
2102 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); 2218 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2219 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2220 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2221 msleep(QED_MCP_RESUME_SLEEP_MS);
2222 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2103 2223
2104 return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0; 2224 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2225 DP_NOTICE(p_hwfn,
2226 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2227 cpu_mode, cpu_state);
2228 return -EBUSY;
2229 }
2230
2231 qed_mcp_cmd_set_blocking(p_hwfn, false);
2232
2233 return 0;
2105} 2234}
2106 2235
2107int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, 2236int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 047976d5c6e9..85e6b3989e7a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -635,11 +635,14 @@ struct qed_mcp_info {
635 */ 635 */
636 spinlock_t cmd_lock; 636 spinlock_t cmd_lock;
637 637
638 /* Flag to indicate whether sending a MFW mailbox command is blocked */
639 bool b_block_cmd;
640
638 /* Spinlock used for syncing SW link-changes and link-changes 641 /* Spinlock used for syncing SW link-changes and link-changes
639 * originating from attention context. 642 * originating from attention context.
640 */ 643 */
641 spinlock_t link_lock; 644 spinlock_t link_lock;
642 bool block_mb_sending; 645
643 u32 public_base; 646 u32 public_base;
644 u32 drv_mb_addr; 647 u32 drv_mb_addr;
645 u32 mfw_mb_addr; 648 u32 mfw_mb_addr;
@@ -660,14 +663,20 @@ struct qed_mcp_info {
660}; 663};
661 664
662struct qed_mcp_mb_params { 665struct qed_mcp_mb_params {
663 u32 cmd; 666 u32 cmd;
664 u32 param; 667 u32 param;
665 void *p_data_src; 668 void *p_data_src;
666 u8 data_src_size; 669 void *p_data_dst;
667 void *p_data_dst; 670 u8 data_src_size;
668 u8 data_dst_size; 671 u8 data_dst_size;
669 u32 mcp_resp; 672 u32 mcp_resp;
670 u32 mcp_param; 673 u32 mcp_param;
674 u32 flags;
675#define QED_MB_FLAG_CAN_SLEEP (0x1 << 0)
676#define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1)
677#define QED_MB_FLAGS_IS_SET(params, flag) \
678 ({ typeof(params) __params = (params); \
679 (__params && (__params->flags & QED_MB_FLAG_ ## flag)); })
671}; 680};
672 681
673struct qed_drv_tlv_hdr { 682struct qed_drv_tlv_hdr {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index d8ad2dcad8d5..f736f70956fd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -562,8 +562,10 @@
562 0 562 0
563#define MCP_REG_CPU_STATE \ 563#define MCP_REG_CPU_STATE \
564 0xe05004UL 564 0xe05004UL
565#define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10)
565#define MCP_REG_CPU_EVENT_MASK \ 566#define MCP_REG_CPU_EVENT_MASK \
566 0xe05008UL 567 0xe05008UL
568#define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL
567#define PGLUE_B_REG_PF_BAR0_SIZE \ 569#define PGLUE_B_REG_PF_BAR0_SIZE \
568 0x2aae60UL 570 0x2aae60UL
569#define PGLUE_B_REG_PF_BAR1_SIZE \ 571#define PGLUE_B_REG_PF_BAR1_SIZE \
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 9673d19308e6..b16ce7d93caf 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -2006,18 +2006,16 @@ unlock:
2006static int qede_parse_actions(struct qede_dev *edev, 2006static int qede_parse_actions(struct qede_dev *edev,
2007 struct tcf_exts *exts) 2007 struct tcf_exts *exts)
2008{ 2008{
2009 int rc = -EINVAL, num_act = 0; 2009 int rc = -EINVAL, num_act = 0, i;
2010 const struct tc_action *a; 2010 const struct tc_action *a;
2011 bool is_drop = false; 2011 bool is_drop = false;
2012 LIST_HEAD(actions);
2013 2012
2014 if (!tcf_exts_has_actions(exts)) { 2013 if (!tcf_exts_has_actions(exts)) {
2015 DP_NOTICE(edev, "No tc actions received\n"); 2014 DP_NOTICE(edev, "No tc actions received\n");
2016 return rc; 2015 return rc;
2017 } 2016 }
2018 2017
2019 tcf_exts_to_list(exts, &actions); 2018 tcf_exts_for_each_action(i, a, exts) {
2020 list_for_each_entry(a, &actions, list) {
2021 num_act++; 2019 num_act++;
2022 2020
2023 if (is_tcf_gact_shot(a)) 2021 if (is_tcf_gact_shot(a))
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 353f1c129af1..059ba9429e51 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2384,26 +2384,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
2384 return status; 2384 return status;
2385} 2385}
2386 2386
2387static netdev_features_t qlge_fix_features(struct net_device *ndev,
2388 netdev_features_t features)
2389{
2390 int err;
2391
2392 /* Update the behavior of vlan accel in the adapter */
2393 err = qlge_update_hw_vlan_features(ndev, features);
2394 if (err)
2395 return err;
2396
2397 return features;
2398}
2399
2400static int qlge_set_features(struct net_device *ndev, 2387static int qlge_set_features(struct net_device *ndev,
2401 netdev_features_t features) 2388 netdev_features_t features)
2402{ 2389{
2403 netdev_features_t changed = ndev->features ^ features; 2390 netdev_features_t changed = ndev->features ^ features;
2391 int err;
2392
2393 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2394 /* Update the behavior of vlan accel in the adapter */
2395 err = qlge_update_hw_vlan_features(ndev, features);
2396 if (err)
2397 return err;
2404 2398
2405 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2406 qlge_vlan_mode(ndev, features); 2399 qlge_vlan_mode(ndev, features);
2400 }
2407 2401
2408 return 0; 2402 return 0;
2409} 2403}
@@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = {
4719 .ndo_set_mac_address = qlge_set_mac_address, 4713 .ndo_set_mac_address = qlge_set_mac_address,
4720 .ndo_validate_addr = eth_validate_addr, 4714 .ndo_validate_addr = eth_validate_addr,
4721 .ndo_tx_timeout = qlge_tx_timeout, 4715 .ndo_tx_timeout = qlge_tx_timeout,
4722 .ndo_fix_features = qlge_fix_features,
4723 .ndo_set_features = qlge_set_features, 4716 .ndo_set_features = qlge_set_features,
4724 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, 4717 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4725 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, 4718 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
diff --git a/drivers/net/ethernet/qualcomm/qca_7k.c b/drivers/net/ethernet/qualcomm/qca_7k.c
index ffe7a16bdfc8..6c8543fb90c0 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k.c
+++ b/drivers/net/ethernet/qualcomm/qca_7k.c
@@ -45,34 +45,33 @@ qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result)
45{ 45{
46 __be16 rx_data; 46 __be16 rx_data;
47 __be16 tx_data; 47 __be16 tx_data;
48 struct spi_transfer *transfer; 48 struct spi_transfer transfer[2];
49 struct spi_message *msg; 49 struct spi_message msg;
50 int ret; 50 int ret;
51 51
52 memset(transfer, 0, sizeof(transfer));
53
54 spi_message_init(&msg);
55
52 tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg); 56 tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg);
57 *result = 0;
58
59 transfer[0].tx_buf = &tx_data;
60 transfer[0].len = QCASPI_CMD_LEN;
61 transfer[1].rx_buf = &rx_data;
62 transfer[1].len = QCASPI_CMD_LEN;
63
64 spi_message_add_tail(&transfer[0], &msg);
53 65
54 if (qca->legacy_mode) { 66 if (qca->legacy_mode) {
55 msg = &qca->spi_msg1; 67 spi_sync(qca->spi_dev, &msg);
56 transfer = &qca->spi_xfer1; 68 spi_message_init(&msg);
57 transfer->tx_buf = &tx_data;
58 transfer->rx_buf = NULL;
59 transfer->len = QCASPI_CMD_LEN;
60 spi_sync(qca->spi_dev, msg);
61 } else {
62 msg = &qca->spi_msg2;
63 transfer = &qca->spi_xfer2[0];
64 transfer->tx_buf = &tx_data;
65 transfer->rx_buf = NULL;
66 transfer->len = QCASPI_CMD_LEN;
67 transfer = &qca->spi_xfer2[1];
68 } 69 }
69 transfer->tx_buf = NULL; 70 spi_message_add_tail(&transfer[1], &msg);
70 transfer->rx_buf = &rx_data; 71 ret = spi_sync(qca->spi_dev, &msg);
71 transfer->len = QCASPI_CMD_LEN;
72 ret = spi_sync(qca->spi_dev, msg);
73 72
74 if (!ret) 73 if (!ret)
75 ret = msg->status; 74 ret = msg.status;
76 75
77 if (ret) 76 if (ret)
78 qcaspi_spi_error(qca); 77 qcaspi_spi_error(qca);
@@ -86,35 +85,32 @@ int
86qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value) 85qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value)
87{ 86{
88 __be16 tx_data[2]; 87 __be16 tx_data[2];
89 struct spi_transfer *transfer; 88 struct spi_transfer transfer[2];
90 struct spi_message *msg; 89 struct spi_message msg;
91 int ret; 90 int ret;
92 91
92 memset(&transfer, 0, sizeof(transfer));
93
94 spi_message_init(&msg);
95
93 tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg); 96 tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg);
94 tx_data[1] = cpu_to_be16(value); 97 tx_data[1] = cpu_to_be16(value);
95 98
99 transfer[0].tx_buf = &tx_data[0];
100 transfer[0].len = QCASPI_CMD_LEN;
101 transfer[1].tx_buf = &tx_data[1];
102 transfer[1].len = QCASPI_CMD_LEN;
103
104 spi_message_add_tail(&transfer[0], &msg);
96 if (qca->legacy_mode) { 105 if (qca->legacy_mode) {
97 msg = &qca->spi_msg1; 106 spi_sync(qca->spi_dev, &msg);
98 transfer = &qca->spi_xfer1; 107 spi_message_init(&msg);
99 transfer->tx_buf = &tx_data[0];
100 transfer->rx_buf = NULL;
101 transfer->len = QCASPI_CMD_LEN;
102 spi_sync(qca->spi_dev, msg);
103 } else {
104 msg = &qca->spi_msg2;
105 transfer = &qca->spi_xfer2[0];
106 transfer->tx_buf = &tx_data[0];
107 transfer->rx_buf = NULL;
108 transfer->len = QCASPI_CMD_LEN;
109 transfer = &qca->spi_xfer2[1];
110 } 108 }
111 transfer->tx_buf = &tx_data[1]; 109 spi_message_add_tail(&transfer[1], &msg);
112 transfer->rx_buf = NULL; 110 ret = spi_sync(qca->spi_dev, &msg);
113 transfer->len = QCASPI_CMD_LEN;
114 ret = spi_sync(qca->spi_dev, msg);
115 111
116 if (!ret) 112 if (!ret)
117 ret = msg->status; 113 ret = msg.status;
118 114
119 if (ret) 115 if (ret)
120 qcaspi_spi_error(qca); 116 qcaspi_spi_error(qca);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 206f0266463e..66b775d462fd 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -99,22 +99,24 @@ static u32
99qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len) 99qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len)
100{ 100{
101 __be16 cmd; 101 __be16 cmd;
102 struct spi_message *msg = &qca->spi_msg2; 102 struct spi_message msg;
103 struct spi_transfer *transfer = &qca->spi_xfer2[0]; 103 struct spi_transfer transfer[2];
104 int ret; 104 int ret;
105 105
106 memset(&transfer, 0, sizeof(transfer));
107 spi_message_init(&msg);
108
106 cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); 109 cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL);
107 transfer->tx_buf = &cmd; 110 transfer[0].tx_buf = &cmd;
108 transfer->rx_buf = NULL; 111 transfer[0].len = QCASPI_CMD_LEN;
109 transfer->len = QCASPI_CMD_LEN; 112 transfer[1].tx_buf = src;
110 transfer = &qca->spi_xfer2[1]; 113 transfer[1].len = len;
111 transfer->tx_buf = src;
112 transfer->rx_buf = NULL;
113 transfer->len = len;
114 114
115 ret = spi_sync(qca->spi_dev, msg); 115 spi_message_add_tail(&transfer[0], &msg);
116 spi_message_add_tail(&transfer[1], &msg);
117 ret = spi_sync(qca->spi_dev, &msg);
116 118
117 if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { 119 if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
118 qcaspi_spi_error(qca); 120 qcaspi_spi_error(qca);
119 return 0; 121 return 0;
120 } 122 }
@@ -125,17 +127,20 @@ qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len)
125static u32 127static u32
126qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len) 128qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len)
127{ 129{
128 struct spi_message *msg = &qca->spi_msg1; 130 struct spi_message msg;
129 struct spi_transfer *transfer = &qca->spi_xfer1; 131 struct spi_transfer transfer;
130 int ret; 132 int ret;
131 133
132 transfer->tx_buf = src; 134 memset(&transfer, 0, sizeof(transfer));
133 transfer->rx_buf = NULL; 135 spi_message_init(&msg);
134 transfer->len = len; 136
137 transfer.tx_buf = src;
138 transfer.len = len;
135 139
136 ret = spi_sync(qca->spi_dev, msg); 140 spi_message_add_tail(&transfer, &msg);
141 ret = spi_sync(qca->spi_dev, &msg);
137 142
138 if (ret || (msg->actual_length != len)) { 143 if (ret || (msg.actual_length != len)) {
139 qcaspi_spi_error(qca); 144 qcaspi_spi_error(qca);
140 return 0; 145 return 0;
141 } 146 }
@@ -146,23 +151,25 @@ qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len)
146static u32 151static u32
147qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len) 152qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len)
148{ 153{
149 struct spi_message *msg = &qca->spi_msg2; 154 struct spi_message msg;
150 __be16 cmd; 155 __be16 cmd;
151 struct spi_transfer *transfer = &qca->spi_xfer2[0]; 156 struct spi_transfer transfer[2];
152 int ret; 157 int ret;
153 158
159 memset(&transfer, 0, sizeof(transfer));
160 spi_message_init(&msg);
161
154 cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); 162 cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL);
155 transfer->tx_buf = &cmd; 163 transfer[0].tx_buf = &cmd;
156 transfer->rx_buf = NULL; 164 transfer[0].len = QCASPI_CMD_LEN;
157 transfer->len = QCASPI_CMD_LEN; 165 transfer[1].rx_buf = dst;
158 transfer = &qca->spi_xfer2[1]; 166 transfer[1].len = len;
159 transfer->tx_buf = NULL;
160 transfer->rx_buf = dst;
161 transfer->len = len;
162 167
163 ret = spi_sync(qca->spi_dev, msg); 168 spi_message_add_tail(&transfer[0], &msg);
169 spi_message_add_tail(&transfer[1], &msg);
170 ret = spi_sync(qca->spi_dev, &msg);
164 171
165 if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { 172 if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
166 qcaspi_spi_error(qca); 173 qcaspi_spi_error(qca);
167 return 0; 174 return 0;
168 } 175 }
@@ -173,17 +180,20 @@ qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len)
173static u32 180static u32
174qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len) 181qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len)
175{ 182{
176 struct spi_message *msg = &qca->spi_msg1; 183 struct spi_message msg;
177 struct spi_transfer *transfer = &qca->spi_xfer1; 184 struct spi_transfer transfer;
178 int ret; 185 int ret;
179 186
180 transfer->tx_buf = NULL; 187 memset(&transfer, 0, sizeof(transfer));
181 transfer->rx_buf = dst; 188 spi_message_init(&msg);
182 transfer->len = len;
183 189
184 ret = spi_sync(qca->spi_dev, msg); 190 transfer.rx_buf = dst;
191 transfer.len = len;
185 192
186 if (ret || (msg->actual_length != len)) { 193 spi_message_add_tail(&transfer, &msg);
194 ret = spi_sync(qca->spi_dev, &msg);
195
196 if (ret || (msg.actual_length != len)) {
187 qcaspi_spi_error(qca); 197 qcaspi_spi_error(qca);
188 return 0; 198 return 0;
189 } 199 }
@@ -195,19 +205,23 @@ static int
195qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd) 205qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd)
196{ 206{
197 __be16 tx_data; 207 __be16 tx_data;
198 struct spi_message *msg = &qca->spi_msg1; 208 struct spi_message msg;
199 struct spi_transfer *transfer = &qca->spi_xfer1; 209 struct spi_transfer transfer;
200 int ret; 210 int ret;
201 211
212 memset(&transfer, 0, sizeof(transfer));
213
214 spi_message_init(&msg);
215
202 tx_data = cpu_to_be16(cmd); 216 tx_data = cpu_to_be16(cmd);
203 transfer->len = sizeof(tx_data); 217 transfer.len = sizeof(cmd);
204 transfer->tx_buf = &tx_data; 218 transfer.tx_buf = &tx_data;
205 transfer->rx_buf = NULL; 219 spi_message_add_tail(&transfer, &msg);
206 220
207 ret = spi_sync(qca->spi_dev, msg); 221 ret = spi_sync(qca->spi_dev, &msg);
208 222
209 if (!ret) 223 if (!ret)
210 ret = msg->status; 224 ret = msg.status;
211 225
212 if (ret) 226 if (ret)
213 qcaspi_spi_error(qca); 227 qcaspi_spi_error(qca);
@@ -835,16 +849,6 @@ qcaspi_netdev_setup(struct net_device *dev)
835 qca = netdev_priv(dev); 849 qca = netdev_priv(dev);
836 memset(qca, 0, sizeof(struct qcaspi)); 850 memset(qca, 0, sizeof(struct qcaspi));
837 851
838 memset(&qca->spi_xfer1, 0, sizeof(struct spi_transfer));
839 memset(&qca->spi_xfer2, 0, sizeof(struct spi_transfer) * 2);
840
841 spi_message_init(&qca->spi_msg1);
842 spi_message_add_tail(&qca->spi_xfer1, &qca->spi_msg1);
843
844 spi_message_init(&qca->spi_msg2);
845 spi_message_add_tail(&qca->spi_xfer2[0], &qca->spi_msg2);
846 spi_message_add_tail(&qca->spi_xfer2[1], &qca->spi_msg2);
847
848 memset(&qca->txr, 0, sizeof(qca->txr)); 852 memset(&qca->txr, 0, sizeof(qca->txr));
849 qca->txr.count = TX_RING_MAX_LEN; 853 qca->txr.count = TX_RING_MAX_LEN;
850} 854}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index fc4beb1b32d1..fc0e98726b36 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -83,11 +83,6 @@ struct qcaspi {
83 struct tx_ring txr; 83 struct tx_ring txr;
84 struct qcaspi_stats stats; 84 struct qcaspi_stats stats;
85 85
86 struct spi_message spi_msg1;
87 struct spi_message spi_msg2;
88 struct spi_transfer spi_xfer1;
89 struct spi_transfer spi_xfer2[2];
90
91 u8 *rx_buffer; 86 u8 *rx_buffer;
92 u32 buffer_size; 87 u32 buffer_size;
93 u8 sync; 88 u8 sync;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 0efa977c422d..bb529ff2ca81 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -13,6 +13,7 @@
13#include <linux/pci.h> 13#include <linux/pci.h>
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/etherdevice.h> 15#include <linux/etherdevice.h>
16#include <linux/clk.h>
16#include <linux/delay.h> 17#include <linux/delay.h>
17#include <linux/ethtool.h> 18#include <linux/ethtool.h>
18#include <linux/phy.h> 19#include <linux/phy.h>
@@ -218,6 +219,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
218 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, 219 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
219 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, 220 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
220 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, 221 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
222 { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 },
221 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, 223 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
222 { PCI_VENDOR_ID_DLINK, 0x4300, 224 { PCI_VENDOR_ID_DLINK, 0x4300,
223 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, 225 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
@@ -630,7 +632,7 @@ struct rtl8169_tc_offsets {
630}; 632};
631 633
632enum rtl_flag { 634enum rtl_flag {
633 RTL_FLAG_TASK_ENABLED, 635 RTL_FLAG_TASK_ENABLED = 0,
634 RTL_FLAG_TASK_SLOW_PENDING, 636 RTL_FLAG_TASK_SLOW_PENDING,
635 RTL_FLAG_TASK_RESET_PENDING, 637 RTL_FLAG_TASK_RESET_PENDING,
636 RTL_FLAG_MAX 638 RTL_FLAG_MAX
@@ -664,6 +666,7 @@ struct rtl8169_private {
664 666
665 u16 event_slow; 667 u16 event_slow;
666 const struct rtl_coalesce_info *coalesce_info; 668 const struct rtl_coalesce_info *coalesce_info;
669 struct clk *clk;
667 670
668 struct mdio_ops { 671 struct mdio_ops {
669 void (*write)(struct rtl8169_private *, int, int); 672 void (*write)(struct rtl8169_private *, int, int);
@@ -4522,7 +4525,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
4522 rtl_hw_reset(tp); 4525 rtl_hw_reset(tp);
4523} 4526}
4524 4527
4525static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp) 4528static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
4526{ 4529{
4527 /* Set DMA burst size and Interframe Gap Time */ 4530 /* Set DMA burst size and Interframe Gap Time */
4528 RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) | 4531 RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) |
@@ -4633,12 +4636,14 @@ static void rtl_hw_start(struct rtl8169_private *tp)
4633 4636
4634 rtl_set_rx_max_size(tp); 4637 rtl_set_rx_max_size(tp);
4635 rtl_set_rx_tx_desc_registers(tp); 4638 rtl_set_rx_tx_desc_registers(tp);
4636 rtl_set_rx_tx_config_registers(tp);
4637 RTL_W8(tp, Cfg9346, Cfg9346_Lock); 4639 RTL_W8(tp, Cfg9346, Cfg9346_Lock);
4638 4640
4639 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ 4641 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4640 RTL_R8(tp, IntrMask); 4642 RTL_R8(tp, IntrMask);
4641 RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); 4643 RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
4644 rtl_init_rxcfg(tp);
4645 rtl_set_tx_config_registers(tp);
4646
4642 rtl_set_rx_mode(tp->dev); 4647 rtl_set_rx_mode(tp->dev);
4643 /* no early-rx interrupts */ 4648 /* no early-rx interrupts */
4644 RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); 4649 RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
@@ -4772,12 +4777,14 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
4772static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) 4777static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
4773{ 4778{
4774 if (enable) { 4779 if (enable) {
4775 RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
4776 RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); 4780 RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
4781 RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
4777 } else { 4782 } else {
4778 RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); 4783 RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
4779 RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); 4784 RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
4780 } 4785 }
4786
4787 udelay(10);
4781} 4788}
4782 4789
4783static void rtl_hw_start_8168bb(struct rtl8169_private *tp) 4790static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
@@ -5622,6 +5629,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
5622 5629
5623static void rtl_hw_start_8106(struct rtl8169_private *tp) 5630static void rtl_hw_start_8106(struct rtl8169_private *tp)
5624{ 5631{
5632 rtl_hw_aspm_clkreq_enable(tp, false);
5633
5625 /* Force LAN exit from ASPM if Rx/Tx are not idle */ 5634 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5626 RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); 5635 RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
5627 5636
@@ -5630,6 +5639,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
5630 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); 5639 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
5631 5640
5632 rtl_pcie_state_l2l3_enable(tp, false); 5641 rtl_pcie_state_l2l3_enable(tp, false);
5642 rtl_hw_aspm_clkreq_enable(tp, true);
5633} 5643}
5634 5644
5635static void rtl_hw_start_8101(struct rtl8169_private *tp) 5645static void rtl_hw_start_8101(struct rtl8169_private *tp)
@@ -6652,7 +6662,8 @@ static int rtl8169_close(struct net_device *dev)
6652 rtl8169_update_counters(tp); 6662 rtl8169_update_counters(tp);
6653 6663
6654 rtl_lock_work(tp); 6664 rtl_lock_work(tp);
6655 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); 6665 /* Clear all task flags */
6666 bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
6656 6667
6657 rtl8169_down(dev); 6668 rtl8169_down(dev);
6658 rtl_unlock_work(tp); 6669 rtl_unlock_work(tp);
@@ -6835,7 +6846,9 @@ static void rtl8169_net_suspend(struct net_device *dev)
6835 6846
6836 rtl_lock_work(tp); 6847 rtl_lock_work(tp);
6837 napi_disable(&tp->napi); 6848 napi_disable(&tp->napi);
6838 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); 6849 /* Clear all task flags */
6850 bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
6851
6839 rtl_unlock_work(tp); 6852 rtl_unlock_work(tp);
6840 6853
6841 rtl_pll_power_down(tp); 6854 rtl_pll_power_down(tp);
@@ -7251,6 +7264,11 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
7251 } 7264 }
7252} 7265}
7253 7266
7267static void rtl_disable_clk(void *data)
7268{
7269 clk_disable_unprepare(data);
7270}
7271
7254static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 7272static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7255{ 7273{
7256 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; 7274 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
@@ -7271,6 +7289,32 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7271 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); 7289 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
7272 tp->supports_gmii = cfg->has_gmii; 7290 tp->supports_gmii = cfg->has_gmii;
7273 7291
7292 /* Get the *optional* external "ether_clk" used on some boards */
7293 tp->clk = devm_clk_get(&pdev->dev, "ether_clk");
7294 if (IS_ERR(tp->clk)) {
7295 rc = PTR_ERR(tp->clk);
7296 if (rc == -ENOENT) {
7297 /* clk-core allows NULL (for suspend / resume) */
7298 tp->clk = NULL;
7299 } else if (rc == -EPROBE_DEFER) {
7300 return rc;
7301 } else {
7302 dev_err(&pdev->dev, "failed to get clk: %d\n", rc);
7303 return rc;
7304 }
7305 } else {
7306 rc = clk_prepare_enable(tp->clk);
7307 if (rc) {
7308 dev_err(&pdev->dev, "failed to enable clk: %d\n", rc);
7309 return rc;
7310 }
7311
7312 rc = devm_add_action_or_reset(&pdev->dev, rtl_disable_clk,
7313 tp->clk);
7314 if (rc)
7315 return rc;
7316 }
7317
7274 /* enable device (incl. PCI PM wakeup and hotplug setup) */ 7318 /* enable device (incl. PCI PM wakeup and hotplug setup) */
7275 rc = pcim_enable_device(pdev); 7319 rc = pcim_enable_device(pdev);
7276 if (rc < 0) { 7320 if (rc < 0) {
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index f3f7477043ce..bb0ebdfd4459 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -1,3 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
1# 2#
2# Renesas device configuration 3# Renesas device configuration
3# 4#
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index a05102a7df02..f21ab8c02af0 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -1,3 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
1# 2#
2# Makefile for the Renesas device drivers. 3# Makefile for the Renesas device drivers.
3# 4#
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index b81f4faf7b10..1470fc12282b 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* Renesas Ethernet AVB device driver 2/* Renesas Ethernet AVB device driver
2 * 3 *
3 * Copyright (C) 2014-2015 Renesas Electronics Corporation 4 * Copyright (C) 2014-2015 Renesas Electronics Corporation
@@ -5,10 +6,6 @@
5 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> 6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
6 * 7 *
7 * Based on the SuperH Ethernet driver 8 * Based on the SuperH Ethernet driver
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License version 2,
11 * as published by the Free Software Foundation.
12 */ 9 */
13 10
14#ifndef __RAVB_H__ 11#ifndef __RAVB_H__
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index c06f2df895c2..aff5516b781e 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* Renesas Ethernet AVB device driver 2/* Renesas Ethernet AVB device driver
2 * 3 *
3 * Copyright (C) 2014-2015 Renesas Electronics Corporation 4 * Copyright (C) 2014-2015 Renesas Electronics Corporation
@@ -5,10 +6,6 @@
5 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> 6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
6 * 7 *
7 * Based on the SuperH Ethernet driver 8 * Based on the SuperH Ethernet driver
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License version 2,
11 * as published by the Free Software Foundation.
12 */ 9 */
13 10
14#include <linux/cache.h> 11#include <linux/cache.h>
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index eede70ec37f8..0721b5c35d91 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -1,13 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* PTP 1588 clock using the Renesas Ethernet AVB 2/* PTP 1588 clock using the Renesas Ethernet AVB
2 * 3 *
3 * Copyright (C) 2013-2015 Renesas Electronics Corporation 4 * Copyright (C) 2013-2015 Renesas Electronics Corporation
4 * Copyright (C) 2015 Renesas Solutions Corp. 5 * Copyright (C) 2015 Renesas Solutions Corp.
5 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> 6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */ 7 */
12 8
13#include "ravb.h" 9#include "ravb.h"
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 5573199c4536..f27a0dc8c563 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* SuperH Ethernet device driver 2/* SuperH Ethernet device driver
2 * 3 *
3 * Copyright (C) 2014 Renesas Electronics Corporation 4 * Copyright (C) 2014 Renesas Electronics Corporation
@@ -5,18 +6,6 @@
5 * Copyright (C) 2008-2014 Renesas Solutions Corp. 6 * Copyright (C) 2008-2014 Renesas Solutions Corp.
6 * Copyright (C) 2013-2017 Cogent Embedded, Inc. 7 * Copyright (C) 2013-2017 Cogent Embedded, Inc.
7 * Copyright (C) 2014 Codethink Limited 8 * Copyright (C) 2014 Codethink Limited
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 */ 9 */
21 10
22#include <linux/module.h> 11#include <linux/module.h>
@@ -809,6 +798,41 @@ static struct sh_eth_cpu_data r8a77980_data = {
809 .magic = 1, 798 .magic = 1,
810 .cexcr = 1, 799 .cexcr = 1,
811}; 800};
801
802/* R7S9210 */
803static struct sh_eth_cpu_data r7s9210_data = {
804 .soft_reset = sh_eth_soft_reset,
805
806 .set_duplex = sh_eth_set_duplex,
807 .set_rate = sh_eth_set_rate_rcar,
808
809 .register_type = SH_ETH_REG_FAST_SH4,
810
811 .edtrr_trns = EDTRR_TRNS_ETHER,
812 .ecsr_value = ECSR_ICD,
813 .ecsipr_value = ECSIPR_ICDIP,
814 .eesipr_value = EESIPR_TWBIP | EESIPR_TABTIP | EESIPR_RABTIP |
815 EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP |
816 EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP |
817 EESIPR_RDEIP | EESIPR_RFOFIP | EESIPR_CNDIP |
818 EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
819 EESIPR_RMAFIP | EESIPR_RRFIP | EESIPR_RTLFIP |
820 EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP,
821
822 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
823 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
824 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
825
826 .fdr_value = 0x0000070f,
827
828 .apr = 1,
829 .mpr = 1,
830 .tpauser = 1,
831 .hw_swap = 1,
832 .rpadir = 1,
833 .no_ade = 1,
834 .xdfar_rw = 1,
835};
812#endif /* CONFIG_OF */ 836#endif /* CONFIG_OF */
813 837
814static void sh_eth_set_rate_sh7724(struct net_device *ndev) 838static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -3132,6 +3156,7 @@ static const struct of_device_id sh_eth_match_table[] = {
3132 { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data }, 3156 { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
3133 { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data }, 3157 { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data },
3134 { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, 3158 { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
3159 { .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data },
3135 { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data }, 3160 { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
3136 { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data }, 3161 { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
3137 { } 3162 { }
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index f94be99cf400..0c18650bbfe6 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -1,19 +1,8 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* SuperH Ethernet device driver 2/* SuperH Ethernet device driver
2 * 3 *
3 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu 4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
4 * Copyright (C) 2008-2012 Renesas Solutions Corp. 5 * Copyright (C) 2008-2012 Renesas Solutions Corp.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 */ 6 */
18 7
19#ifndef __SH_ETH_H__ 8#ifndef __SH_ETH_H__
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index edf20361ea5f..324049eebb9b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -33,7 +33,7 @@ config DWMAC_DWC_QOS_ETH
33 select PHYLIB 33 select PHYLIB
34 select CRC32 34 select CRC32
35 select MII 35 select MII
36 depends on OF && COMMON_CLK && HAS_DMA 36 depends on OF && HAS_DMA
37 help 37 help
38 Support for chips using the snps,dwc-qos-ethernet.txt DT binding. 38 Support for chips using the snps,dwc-qos-ethernet.txt DT binding.
39 39
@@ -57,7 +57,7 @@ config DWMAC_ANARION
57config DWMAC_IPQ806X 57config DWMAC_IPQ806X
58 tristate "QCA IPQ806x DWMAC support" 58 tristate "QCA IPQ806x DWMAC support"
59 default ARCH_QCOM 59 default ARCH_QCOM
60 depends on OF && COMMON_CLK && (ARCH_QCOM || COMPILE_TEST) 60 depends on OF && (ARCH_QCOM || COMPILE_TEST)
61 select MFD_SYSCON 61 select MFD_SYSCON
62 help 62 help
63 Support for QCA IPQ806X DWMAC Ethernet. 63 Support for QCA IPQ806X DWMAC Ethernet.
@@ -100,7 +100,7 @@ config DWMAC_OXNAS
100config DWMAC_ROCKCHIP 100config DWMAC_ROCKCHIP
101 tristate "Rockchip dwmac support" 101 tristate "Rockchip dwmac support"
102 default ARCH_ROCKCHIP 102 default ARCH_ROCKCHIP
103 depends on OF && COMMON_CLK && (ARCH_ROCKCHIP || COMPILE_TEST) 103 depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST)
104 select MFD_SYSCON 104 select MFD_SYSCON
105 help 105 help
106 Support for Ethernet controller on Rockchip RK3288 SoC. 106 Support for Ethernet controller on Rockchip RK3288 SoC.
@@ -110,7 +110,7 @@ config DWMAC_ROCKCHIP
110 110
111config DWMAC_SOCFPGA 111config DWMAC_SOCFPGA
112 tristate "SOCFPGA dwmac support" 112 tristate "SOCFPGA dwmac support"
113 default ARCH_SOCFPGA 113 default (ARCH_SOCFPGA || ARCH_STRATIX10)
114 depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST) 114 depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST)
115 select MFD_SYSCON 115 select MFD_SYSCON
116 help 116 help
@@ -123,7 +123,7 @@ config DWMAC_SOCFPGA
123config DWMAC_STI 123config DWMAC_STI
124 tristate "STi GMAC support" 124 tristate "STi GMAC support"
125 default ARCH_STI 125 default ARCH_STI
126 depends on OF && COMMON_CLK && (ARCH_STI || COMPILE_TEST) 126 depends on OF && (ARCH_STI || COMPILE_TEST)
127 select MFD_SYSCON 127 select MFD_SYSCON
128 ---help--- 128 ---help---
129 Support for ethernet controller on STi SOCs. 129 Support for ethernet controller on STi SOCs.
@@ -147,7 +147,7 @@ config DWMAC_STM32
147config DWMAC_SUNXI 147config DWMAC_SUNXI
148 tristate "Allwinner GMAC support" 148 tristate "Allwinner GMAC support"
149 default ARCH_SUNXI 149 default ARCH_SUNXI
150 depends on OF && COMMON_CLK && (ARCH_SUNXI || COMPILE_TEST) 150 depends on OF && (ARCH_SUNXI || COMPILE_TEST)
151 ---help--- 151 ---help---
152 Support for Allwinner A20/A31 GMAC ethernet controllers. 152 Support for Allwinner A20/A31 GMAC ethernet controllers.
153 153
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 76649adf8fb0..c0a855b7ab3b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -112,7 +112,6 @@ struct stmmac_priv {
112 u32 tx_count_frames; 112 u32 tx_count_frames;
113 u32 tx_coal_frames; 113 u32 tx_coal_frames;
114 u32 tx_coal_timer; 114 u32 tx_coal_timer;
115 bool tx_timer_armed;
116 115
117 int tx_coalesce; 116 int tx_coalesce;
118 int hwts_tx_en; 117 int hwts_tx_en;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ff1ffb46198a..9f458bb16f2a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3147,16 +3147,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3147 * element in case of no SG. 3147 * element in case of no SG.
3148 */ 3148 */
3149 priv->tx_count_frames += nfrags + 1; 3149 priv->tx_count_frames += nfrags + 1;
3150 if (likely(priv->tx_coal_frames > priv->tx_count_frames) && 3150 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3151 !priv->tx_timer_armed) {
3152 mod_timer(&priv->txtimer, 3151 mod_timer(&priv->txtimer,
3153 STMMAC_COAL_TIMER(priv->tx_coal_timer)); 3152 STMMAC_COAL_TIMER(priv->tx_coal_timer));
3154 priv->tx_timer_armed = true;
3155 } else { 3153 } else {
3156 priv->tx_count_frames = 0; 3154 priv->tx_count_frames = 0;
3157 stmmac_set_tx_ic(priv, desc); 3155 stmmac_set_tx_ic(priv, desc);
3158 priv->xstats.tx_set_ic_bit++; 3156 priv->xstats.tx_set_ic_bit++;
3159 priv->tx_timer_armed = false;
3160 } 3157 }
3161 3158
3162 skb_tx_timestamp(skb); 3159 skb_tx_timestamp(skb);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3609c7b696c7..2b800ce1d5bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -67,7 +67,7 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins)
67 * Description: 67 * Description:
68 * This function validates the number of Unicast address entries supported 68 * This function validates the number of Unicast address entries supported
69 * by a particular Synopsys 10/100/1000 controller. The Synopsys controller 69 * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
70 * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter 70 * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
71 * logic. This function validates a valid, supported configuration is 71 * logic. This function validates a valid, supported configuration is
72 * selected, and defaults to 1 Unicast address if an unsupported 72 * selected, and defaults to 1 Unicast address if an unsupported
73 * configuration is selected. 73 * configuration is selected.
@@ -77,8 +77,7 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
77 int x = ucast_entries; 77 int x = ucast_entries;
78 78
79 switch (x) { 79 switch (x) {
80 case 1: 80 case 1 ... 32:
81 case 32:
82 case 64: 81 case 64:
83 case 128: 82 case 128:
84 break; 83 break;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 1a96dd9c1091..531294f4978b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -61,7 +61,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry,
61 struct stmmac_tc_entry *action_entry = entry; 61 struct stmmac_tc_entry *action_entry = entry;
62 const struct tc_action *act; 62 const struct tc_action *act;
63 struct tcf_exts *exts; 63 struct tcf_exts *exts;
64 LIST_HEAD(actions); 64 int i;
65 65
66 exts = cls->knode.exts; 66 exts = cls->knode.exts;
67 if (!tcf_exts_has_actions(exts)) 67 if (!tcf_exts_has_actions(exts))
@@ -69,8 +69,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry,
69 if (frag) 69 if (frag)
70 action_entry = frag; 70 action_entry = frag;
71 71
72 tcf_exts_to_list(exts, &actions); 72 tcf_exts_for_each_action(i, act, exts) {
73 list_for_each_entry(act, &actions, list) {
74 /* Accept */ 73 /* Accept */
75 if (is_tcf_gact_ok(act)) { 74 if (is_tcf_gact_ok(act)) {
76 action_entry->val.af = 1; 75 action_entry->val.af = 1;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 9263d638bd6d..f932923f7d56 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -41,6 +41,7 @@ config TI_DAVINCI_MDIO
41config TI_DAVINCI_CPDMA 41config TI_DAVINCI_CPDMA
42 tristate "TI DaVinci CPDMA Support" 42 tristate "TI DaVinci CPDMA Support"
43 depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST 43 depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
44 select GENERIC_ALLOCATOR
44 ---help--- 45 ---help---
45 This driver supports TI's DaVinci CPDMA dma engine. 46 This driver supports TI's DaVinci CPDMA dma engine.
46 47
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 0c1adad7415d..396e1cd10667 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -170,10 +170,13 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
170 struct device_node *node; 170 struct device_node *node;
171 struct cpsw_phy_sel_priv *priv; 171 struct cpsw_phy_sel_priv *priv;
172 172
173 node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel"); 173 node = of_parse_phandle(dev->of_node, "cpsw-phy-sel", 0);
174 if (!node) { 174 if (!node) {
175 dev_err(dev, "Phy mode driver DT not found\n"); 175 node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
176 return; 176 if (!node) {
177 dev_err(dev, "Phy mode driver DT not found\n");
178 return;
179 }
177 } 180 }
178 181
179 dev = bus_find_device(&platform_bus_type, NULL, node, match); 182 dev = bus_find_device(&platform_bus_type, NULL, node, match);
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 31c3d77b4733..fe01e141c8f8 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1203,6 +1203,9 @@ static void netvsc_send_vf(struct net_device *ndev,
1203 1203
1204 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; 1204 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1205 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; 1205 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1206 netdev_info(ndev, "VF slot %u %s\n",
1207 net_device_ctx->vf_serial,
1208 net_device_ctx->vf_alloc ? "added" : "removed");
1206} 1209}
1207 1210
1208static void netvsc_receive_inband(struct net_device *ndev, 1211static void netvsc_receive_inband(struct net_device *ndev,
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 507f68190cb1..3af6d8d15233 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -29,6 +29,7 @@
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/inetdevice.h> 30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/pci.h>
32#include <linux/skbuff.h> 33#include <linux/skbuff.h>
33#include <linux/if_vlan.h> 34#include <linux/if_vlan.h>
34#include <linux/in.h> 35#include <linux/in.h>
@@ -1893,20 +1894,6 @@ out_unlock:
1893 rtnl_unlock(); 1894 rtnl_unlock();
1894} 1895}
1895 1896
1896static struct net_device *get_netvsc_bymac(const u8 *mac)
1897{
1898 struct net_device_context *ndev_ctx;
1899
1900 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
1901 struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx);
1902
1903 if (ether_addr_equal(mac, dev->perm_addr))
1904 return dev;
1905 }
1906
1907 return NULL;
1908}
1909
1910static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) 1897static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
1911{ 1898{
1912 struct net_device_context *net_device_ctx; 1899 struct net_device_context *net_device_ctx;
@@ -2035,22 +2022,48 @@ static void netvsc_vf_setup(struct work_struct *w)
2035 rtnl_unlock(); 2022 rtnl_unlock();
2036} 2023}
2037 2024
2025/* Find netvsc by VMBus serial number.
2026 * The PCI hyperv controller records the serial number as the slot.
2027 */
2028static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
2029{
2030 struct device *parent = vf_netdev->dev.parent;
2031 struct net_device_context *ndev_ctx;
2032 struct pci_dev *pdev;
2033
2034 if (!parent || !dev_is_pci(parent))
2035 return NULL; /* not a PCI device */
2036
2037 pdev = to_pci_dev(parent);
2038 if (!pdev->slot) {
2039 netdev_notice(vf_netdev, "no PCI slot information\n");
2040 return NULL;
2041 }
2042
2043 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2044 if (!ndev_ctx->vf_alloc)
2045 continue;
2046
2047 if (ndev_ctx->vf_serial == pdev->slot->number)
2048 return hv_get_drvdata(ndev_ctx->device_ctx);
2049 }
2050
2051 netdev_notice(vf_netdev,
2052 "no netdev found for slot %u\n", pdev->slot->number);
2053 return NULL;
2054}
2055
2038static int netvsc_register_vf(struct net_device *vf_netdev) 2056static int netvsc_register_vf(struct net_device *vf_netdev)
2039{ 2057{
2040 struct net_device *ndev;
2041 struct net_device_context *net_device_ctx; 2058 struct net_device_context *net_device_ctx;
2042 struct netvsc_device *netvsc_dev; 2059 struct netvsc_device *netvsc_dev;
2060 struct net_device *ndev;
2043 int ret; 2061 int ret;
2044 2062
2045 if (vf_netdev->addr_len != ETH_ALEN) 2063 if (vf_netdev->addr_len != ETH_ALEN)
2046 return NOTIFY_DONE; 2064 return NOTIFY_DONE;
2047 2065
2048 /* 2066 ndev = get_netvsc_byslot(vf_netdev);
2049 * We will use the MAC address to locate the synthetic interface to
2050 * associate with the VF interface. If we don't find a matching
2051 * synthetic interface, move on.
2052 */
2053 ndev = get_netvsc_bymac(vf_netdev->perm_addr);
2054 if (!ndev) 2067 if (!ndev)
2055 return NOTIFY_DONE; 2068 return NOTIFY_DONE;
2056 2069
@@ -2201,6 +2214,16 @@ static int netvsc_probe(struct hv_device *dev,
2201 2214
2202 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); 2215 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
2203 2216
2217 /* We must get rtnl lock before scheduling nvdev->subchan_work,
2218 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
2219 * all subchannels to show up, but that may not happen because
2220 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
2221 * -> ... -> device_add() -> ... -> __device_attach() can't get
2222 * the device lock, so all the subchannels can't be processed --
2223 * finally netvsc_subchan_work() hangs for ever.
2224 */
2225 rtnl_lock();
2226
2204 if (nvdev->num_chn > 1) 2227 if (nvdev->num_chn > 1)
2205 schedule_work(&nvdev->subchan_work); 2228 schedule_work(&nvdev->subchan_work);
2206 2229
@@ -2219,7 +2242,6 @@ static int netvsc_probe(struct hv_device *dev,
2219 else 2242 else
2220 net->max_mtu = ETH_DATA_LEN; 2243 net->max_mtu = ETH_DATA_LEN;
2221 2244
2222 rtnl_lock();
2223 ret = register_netdevice(net); 2245 ret = register_netdevice(net);
2224 if (ret != 0) { 2246 if (ret != 0) {
2225 pr_err("Unable to register netdev.\n"); 2247 pr_err("Unable to register netdev.\n");
@@ -2258,17 +2280,15 @@ static int netvsc_remove(struct hv_device *dev)
2258 2280
2259 cancel_delayed_work_sync(&ndev_ctx->dwork); 2281 cancel_delayed_work_sync(&ndev_ctx->dwork);
2260 2282
2261 rcu_read_lock(); 2283 rtnl_lock();
2262 nvdev = rcu_dereference(ndev_ctx->nvdev); 2284 nvdev = rtnl_dereference(ndev_ctx->nvdev);
2263 2285 if (nvdev)
2264 if (nvdev)
2265 cancel_work_sync(&nvdev->subchan_work); 2286 cancel_work_sync(&nvdev->subchan_work);
2266 2287
2267 /* 2288 /*
2268 * Call to the vsc driver to let it know that the device is being 2289 * Call to the vsc driver to let it know that the device is being
2269 * removed. Also blocks mtu and channel changes. 2290 * removed. Also blocks mtu and channel changes.
2270 */ 2291 */
2271 rtnl_lock();
2272 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 2292 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2273 if (vf_netdev) 2293 if (vf_netdev)
2274 netvsc_unregister_vf(vf_netdev); 2294 netvsc_unregister_vf(vf_netdev);
@@ -2280,7 +2300,6 @@ static int netvsc_remove(struct hv_device *dev)
2280 list_del(&ndev_ctx->list); 2300 list_del(&ndev_ctx->list);
2281 2301
2282 rtnl_unlock(); 2302 rtnl_unlock();
2283 rcu_read_unlock();
2284 2303
2285 hv_set_drvdata(dev, NULL); 2304 hv_set_drvdata(dev, NULL);
2286 2305
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 4637d980310e..52fffb98fde9 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -398,7 +398,6 @@ static umode_t sfp_hwmon_is_visible(const void *data,
398 switch (type) { 398 switch (type) {
399 case hwmon_temp: 399 case hwmon_temp:
400 switch (attr) { 400 switch (attr) {
401 case hwmon_temp_input:
402 case hwmon_temp_min_alarm: 401 case hwmon_temp_min_alarm:
403 case hwmon_temp_max_alarm: 402 case hwmon_temp_max_alarm:
404 case hwmon_temp_lcrit_alarm: 403 case hwmon_temp_lcrit_alarm:
@@ -407,13 +406,16 @@ static umode_t sfp_hwmon_is_visible(const void *data,
407 case hwmon_temp_max: 406 case hwmon_temp_max:
408 case hwmon_temp_lcrit: 407 case hwmon_temp_lcrit:
409 case hwmon_temp_crit: 408 case hwmon_temp_crit:
409 if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
410 return 0;
411 /* fall through */
412 case hwmon_temp_input:
410 return 0444; 413 return 0444;
411 default: 414 default:
412 return 0; 415 return 0;
413 } 416 }
414 case hwmon_in: 417 case hwmon_in:
415 switch (attr) { 418 switch (attr) {
416 case hwmon_in_input:
417 case hwmon_in_min_alarm: 419 case hwmon_in_min_alarm:
418 case hwmon_in_max_alarm: 420 case hwmon_in_max_alarm:
419 case hwmon_in_lcrit_alarm: 421 case hwmon_in_lcrit_alarm:
@@ -422,13 +424,16 @@ static umode_t sfp_hwmon_is_visible(const void *data,
422 case hwmon_in_max: 424 case hwmon_in_max:
423 case hwmon_in_lcrit: 425 case hwmon_in_lcrit:
424 case hwmon_in_crit: 426 case hwmon_in_crit:
427 if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
428 return 0;
429 /* fall through */
430 case hwmon_in_input:
425 return 0444; 431 return 0444;
426 default: 432 default:
427 return 0; 433 return 0;
428 } 434 }
429 case hwmon_curr: 435 case hwmon_curr:
430 switch (attr) { 436 switch (attr) {
431 case hwmon_curr_input:
432 case hwmon_curr_min_alarm: 437 case hwmon_curr_min_alarm:
433 case hwmon_curr_max_alarm: 438 case hwmon_curr_max_alarm:
434 case hwmon_curr_lcrit_alarm: 439 case hwmon_curr_lcrit_alarm:
@@ -437,6 +442,10 @@ static umode_t sfp_hwmon_is_visible(const void *data,
437 case hwmon_curr_max: 442 case hwmon_curr_max:
438 case hwmon_curr_lcrit: 443 case hwmon_curr_lcrit:
439 case hwmon_curr_crit: 444 case hwmon_curr_crit:
445 if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
446 return 0;
447 /* fall through */
448 case hwmon_curr_input:
440 return 0444; 449 return 0444;
441 default: 450 default:
442 return 0; 451 return 0;
@@ -452,7 +461,6 @@ static umode_t sfp_hwmon_is_visible(const void *data,
452 channel == 1) 461 channel == 1)
453 return 0; 462 return 0;
454 switch (attr) { 463 switch (attr) {
455 case hwmon_power_input:
456 case hwmon_power_min_alarm: 464 case hwmon_power_min_alarm:
457 case hwmon_power_max_alarm: 465 case hwmon_power_max_alarm:
458 case hwmon_power_lcrit_alarm: 466 case hwmon_power_lcrit_alarm:
@@ -461,6 +469,10 @@ static umode_t sfp_hwmon_is_visible(const void *data,
461 case hwmon_power_max: 469 case hwmon_power_max:
462 case hwmon_power_lcrit: 470 case hwmon_power_lcrit:
463 case hwmon_power_crit: 471 case hwmon_power_crit:
472 if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
473 return 0;
474 /* fall through */
475 case hwmon_power_input:
464 return 0444; 476 return 0444;
465 default: 477 default:
466 return 0; 478 return 0;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index ce61231e96ea..62dc564b251d 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -429,6 +429,9 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
429 if (!skb) 429 if (!skb)
430 goto out; 430 goto out;
431 431
432 if (skb_mac_header_len(skb) < ETH_HLEN)
433 goto drop;
434
432 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) 435 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
433 goto drop; 436 goto drop;
434 437
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index cb0cc30c3d6a..533b6fb8d923 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -967,6 +967,13 @@ static const struct usb_device_id products[] = {
967 USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), 967 USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
968 .driver_info = (unsigned long)&qmi_wwan_info, 968 .driver_info = (unsigned long)&qmi_wwan_info,
969 }, 969 },
970 { /* Quectel EP06/EG06/EM06 */
971 USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306,
972 USB_CLASS_VENDOR_SPEC,
973 USB_SUBCLASS_VENDOR_SPEC,
974 0xff),
975 .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
976 },
970 977
971 /* 3. Combined interface devices matching on interface number */ 978 /* 3. Combined interface devices matching on interface number */
972 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ 979 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
@@ -1206,13 +1213,13 @@ static const struct usb_device_id products[] = {
1206 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ 1213 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
1207 {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */ 1214 {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */
1208 {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */ 1215 {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */
1209 {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ 1216 {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
1210 {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ 1217 {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 10)},/* Sierra Wireless MC74xx */
1211 {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ 1218 {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
1212 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ 1219 {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 10)},/* Sierra Wireless EM74xx */
1213 {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ 1220 {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
1214 {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ 1221 {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */
1215 {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ 1222 {QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
1216 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 1223 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
1217 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 1224 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
1218 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 1225 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
@@ -1255,7 +1262,6 @@ static const struct usb_device_id products[] = {
1255 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ 1262 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
1256 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ 1263 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
1257 {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ 1264 {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
1258 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
1259 1265
1260 /* 4. Gobi 1000 devices */ 1266 /* 4. Gobi 1000 devices */
1261 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 1267 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
@@ -1331,6 +1337,19 @@ static bool quectel_ec20_detected(struct usb_interface *intf)
1331 return false; 1337 return false;
1332} 1338}
1333 1339
1340static bool quectel_ep06_diag_detected(struct usb_interface *intf)
1341{
1342 struct usb_device *dev = interface_to_usbdev(intf);
1343 struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
1344
1345 if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c &&
1346 le16_to_cpu(dev->descriptor.idProduct) == 0x0306 &&
1347 intf_desc.bNumEndpoints == 2)
1348 return true;
1349
1350 return false;
1351}
1352
1334static int qmi_wwan_probe(struct usb_interface *intf, 1353static int qmi_wwan_probe(struct usb_interface *intf,
1335 const struct usb_device_id *prod) 1354 const struct usb_device_id *prod)
1336{ 1355{
@@ -1365,6 +1384,15 @@ static int qmi_wwan_probe(struct usb_interface *intf,
1365 return -ENODEV; 1384 return -ENODEV;
1366 } 1385 }
1367 1386
1387 /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so
1388 * we need to match on class/subclass/protocol. These values are
1389 * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
1390 * different. Ignore the current interface if the number of endpoints
1391 * the number for the diag interface (two).
1392 */
1393 if (quectel_ep06_diag_detected(intf))
1394 return -ENODEV;
1395
1368 return usbnet_probe(intf, id); 1396 return usbnet_probe(intf, id);
1369} 1397}
1370 1398
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 97742708460b..2cd71bdb6484 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -5217,8 +5217,8 @@ static int rtl8152_probe(struct usb_interface *intf,
5217 netdev->hw_features &= ~NETIF_F_RXCSUM; 5217 netdev->hw_features &= ~NETIF_F_RXCSUM;
5218 } 5218 }
5219 5219
5220 if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && 5220 if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
5221 udev->serial && !strcmp(udev->serial, "000001000000")) { 5221 (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) {
5222 dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation"); 5222 dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
5223 set_bit(DELL_TB_RX_AGG_BUG, &tp->flags); 5223 set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
5224 } 5224 }
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 8d679c8b7f25..41a00cd76955 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -463,6 +463,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
463 int mac_len, delta, off; 463 int mac_len, delta, off;
464 struct xdp_buff xdp; 464 struct xdp_buff xdp;
465 465
466 skb_orphan(skb);
467
466 rcu_read_lock(); 468 rcu_read_lock();
467 xdp_prog = rcu_dereference(rq->xdp_prog); 469 xdp_prog = rcu_dereference(rq->xdp_prog);
468 if (unlikely(!xdp_prog)) { 470 if (unlikely(!xdp_prog)) {
@@ -508,8 +510,6 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
508 skb_copy_header(nskb, skb); 510 skb_copy_header(nskb, skb);
509 head_off = skb_headroom(nskb) - skb_headroom(skb); 511 head_off = skb_headroom(nskb) - skb_headroom(skb);
510 skb_headers_offset_update(nskb, head_off); 512 skb_headers_offset_update(nskb, head_off);
511 if (skb->sk)
512 skb_set_owner_w(nskb, skb->sk);
513 consume_skb(skb); 513 consume_skb(skb);
514 skb = nskb; 514 skb = nskb;
515 } 515 }
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index b4c3a957c102..73969dbeb5c5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -985,15 +985,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
985 const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? 985 const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
986 iwl_ext_nvm_channels : iwl_nvm_channels; 986 iwl_ext_nvm_channels : iwl_nvm_channels;
987 struct ieee80211_regdomain *regd, *copy_rd; 987 struct ieee80211_regdomain *regd, *copy_rd;
988 int size_of_regd, regd_to_copy, wmms_to_copy; 988 int size_of_regd, regd_to_copy;
989 int size_of_wmms = 0;
990 struct ieee80211_reg_rule *rule; 989 struct ieee80211_reg_rule *rule;
991 struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm;
992 struct regdb_ptrs *regdb_ptrs; 990 struct regdb_ptrs *regdb_ptrs;
993 enum nl80211_band band; 991 enum nl80211_band band;
994 int center_freq, prev_center_freq = 0; 992 int center_freq, prev_center_freq = 0;
995 int valid_rules = 0, n_wmms = 0; 993 int valid_rules = 0;
996 int i;
997 bool new_rule; 994 bool new_rule;
998 int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? 995 int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
999 IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; 996 IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
@@ -1012,11 +1009,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
1012 sizeof(struct ieee80211_regdomain) + 1009 sizeof(struct ieee80211_regdomain) +
1013 num_of_ch * sizeof(struct ieee80211_reg_rule); 1010 num_of_ch * sizeof(struct ieee80211_reg_rule);
1014 1011
1015 if (geo_info & GEO_WMM_ETSI_5GHZ_INFO) 1012 regd = kzalloc(size_of_regd, GFP_KERNEL);
1016 size_of_wmms =
1017 num_of_ch * sizeof(struct ieee80211_wmm_rule);
1018
1019 regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL);
1020 if (!regd) 1013 if (!regd)
1021 return ERR_PTR(-ENOMEM); 1014 return ERR_PTR(-ENOMEM);
1022 1015
@@ -1030,8 +1023,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
1030 regd->alpha2[0] = fw_mcc >> 8; 1023 regd->alpha2[0] = fw_mcc >> 8;
1031 regd->alpha2[1] = fw_mcc & 0xff; 1024 regd->alpha2[1] = fw_mcc & 0xff;
1032 1025
1033 wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
1034
1035 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { 1026 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
1036 ch_flags = (u16)__le32_to_cpup(channels + ch_idx); 1027 ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
1037 band = (ch_idx < NUM_2GHZ_CHANNELS) ? 1028 band = (ch_idx < NUM_2GHZ_CHANNELS) ?
@@ -1085,26 +1076,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
1085 band == NL80211_BAND_2GHZ) 1076 band == NL80211_BAND_2GHZ)
1086 continue; 1077 continue;
1087 1078
1088 if (!reg_query_regdb_wmm(regd->alpha2, center_freq, 1079 reg_query_regdb_wmm(regd->alpha2, center_freq, rule);
1089 &regdb_ptrs[n_wmms].token, wmm_rule)) {
1090 /* Add only new rules */
1091 for (i = 0; i < n_wmms; i++) {
1092 if (regdb_ptrs[i].token ==
1093 regdb_ptrs[n_wmms].token) {
1094 rule->wmm_rule = regdb_ptrs[i].rule;
1095 break;
1096 }
1097 }
1098 if (i == n_wmms) {
1099 rule->wmm_rule = wmm_rule;
1100 regdb_ptrs[n_wmms++].rule = wmm_rule;
1101 wmm_rule++;
1102 }
1103 }
1104 } 1080 }
1105 1081
1106 regd->n_reg_rules = valid_rules; 1082 regd->n_reg_rules = valid_rules;
1107 regd->n_wmm_rules = n_wmms;
1108 1083
1109 /* 1084 /*
1110 * Narrow down regdom for unused regulatory rules to prevent hole 1085 * Narrow down regdom for unused regulatory rules to prevent hole
@@ -1113,28 +1088,13 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
1113 regd_to_copy = sizeof(struct ieee80211_regdomain) + 1088 regd_to_copy = sizeof(struct ieee80211_regdomain) +
1114 valid_rules * sizeof(struct ieee80211_reg_rule); 1089 valid_rules * sizeof(struct ieee80211_reg_rule);
1115 1090
1116 wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms; 1091 copy_rd = kzalloc(regd_to_copy, GFP_KERNEL);
1117
1118 copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL);
1119 if (!copy_rd) { 1092 if (!copy_rd) {
1120 copy_rd = ERR_PTR(-ENOMEM); 1093 copy_rd = ERR_PTR(-ENOMEM);
1121 goto out; 1094 goto out;
1122 } 1095 }
1123 1096
1124 memcpy(copy_rd, regd, regd_to_copy); 1097 memcpy(copy_rd, regd, regd_to_copy);
1125 memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd,
1126 wmms_to_copy);
1127
1128 d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy);
1129 s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
1130
1131 for (i = 0; i < regd->n_reg_rules; i++) {
1132 if (!regd->reg_rules[i].wmm_rule)
1133 continue;
1134
1135 copy_rd->reg_rules[i].wmm_rule = d_wmm +
1136 (regd->reg_rules[i].wmm_rule - s_wmm);
1137 }
1138 1098
1139out: 1099out:
1140 kfree(regdb_ptrs); 1100 kfree(regdb_ptrs);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 998dfac0fcff..1068757ec42e 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -34,6 +34,7 @@
34#include <net/net_namespace.h> 34#include <net/net_namespace.h>
35#include <net/netns/generic.h> 35#include <net/netns/generic.h>
36#include <linux/rhashtable.h> 36#include <linux/rhashtable.h>
37#include <linux/nospec.h>
37#include "mac80211_hwsim.h" 38#include "mac80211_hwsim.h"
38 39
39#define WARN_QUEUE 100 40#define WARN_QUEUE 100
@@ -2820,9 +2821,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2820 IEEE80211_VHT_CAP_SHORT_GI_80 | 2821 IEEE80211_VHT_CAP_SHORT_GI_80 |
2821 IEEE80211_VHT_CAP_SHORT_GI_160 | 2822 IEEE80211_VHT_CAP_SHORT_GI_160 |
2822 IEEE80211_VHT_CAP_TXSTBC | 2823 IEEE80211_VHT_CAP_TXSTBC |
2823 IEEE80211_VHT_CAP_RXSTBC_1 |
2824 IEEE80211_VHT_CAP_RXSTBC_2 |
2825 IEEE80211_VHT_CAP_RXSTBC_3 |
2826 IEEE80211_VHT_CAP_RXSTBC_4 | 2824 IEEE80211_VHT_CAP_RXSTBC_4 |
2827 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; 2825 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
2828 sband->vht_cap.vht_mcs.rx_mcs_map = 2826 sband->vht_cap.vht_mcs.rx_mcs_map =
@@ -3317,6 +3315,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
3317 if (info->attrs[HWSIM_ATTR_CHANNELS]) 3315 if (info->attrs[HWSIM_ATTR_CHANNELS])
3318 param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); 3316 param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
3319 3317
3318 if (param.channels < 1) {
3319 GENL_SET_ERR_MSG(info, "must have at least one channel");
3320 return -EINVAL;
3321 }
3322
3320 if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) { 3323 if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
3321 GENL_SET_ERR_MSG(info, "too many channels specified"); 3324 GENL_SET_ERR_MSG(info, "too many channels specified");
3322 return -EINVAL; 3325 return -EINVAL;
@@ -3350,6 +3353,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
3350 kfree(hwname); 3353 kfree(hwname);
3351 return -EINVAL; 3354 return -EINVAL;
3352 } 3355 }
3356
3357 idx = array_index_nospec(idx,
3358 ARRAY_SIZE(hwsim_world_regdom_custom));
3353 param.regd = hwsim_world_regdom_custom[idx]; 3359 param.regd = hwsim_world_regdom_custom[idx];
3354 } 3360 }
3355 3361
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 73f596a90c69..f17f602e6171 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -87,8 +87,7 @@ struct netfront_cb {
87/* IRQ name is queue name with "-tx" or "-rx" appended */ 87/* IRQ name is queue name with "-tx" or "-rx" appended */
88#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 88#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
89 89
90static DECLARE_WAIT_QUEUE_HEAD(module_load_q); 90static DECLARE_WAIT_QUEUE_HEAD(module_wq);
91static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
92 91
93struct netfront_stats { 92struct netfront_stats {
94 u64 packets; 93 u64 packets;
@@ -909,7 +908,11 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
909 BUG_ON(pull_to <= skb_headlen(skb)); 908 BUG_ON(pull_to <= skb_headlen(skb));
910 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 909 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
911 } 910 }
912 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); 911 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
912 queue->rx.rsp_cons = ++cons;
913 kfree_skb(nskb);
914 return ~0U;
915 }
913 916
914 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 917 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
915 skb_frag_page(nfrag), 918 skb_frag_page(nfrag),
@@ -1046,6 +1049,8 @@ err:
1046 skb->len += rx->status; 1049 skb->len += rx->status;
1047 1050
1048 i = xennet_fill_frags(queue, skb, &tmpq); 1051 i = xennet_fill_frags(queue, skb, &tmpq);
1052 if (unlikely(i == ~0U))
1053 goto err;
1049 1054
1050 if (rx->flags & XEN_NETRXF_csum_blank) 1055 if (rx->flags & XEN_NETRXF_csum_blank)
1051 skb->ip_summed = CHECKSUM_PARTIAL; 1056 skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1332,11 +1337,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1332 netif_carrier_off(netdev); 1337 netif_carrier_off(netdev);
1333 1338
1334 xenbus_switch_state(dev, XenbusStateInitialising); 1339 xenbus_switch_state(dev, XenbusStateInitialising);
1335 wait_event(module_load_q, 1340 wait_event(module_wq,
1336 xenbus_read_driver_state(dev->otherend) != 1341 xenbus_read_driver_state(dev->otherend) !=
1337 XenbusStateClosed && 1342 XenbusStateClosed &&
1338 xenbus_read_driver_state(dev->otherend) != 1343 xenbus_read_driver_state(dev->otherend) !=
1339 XenbusStateUnknown); 1344 XenbusStateUnknown);
1340 return netdev; 1345 return netdev;
1341 1346
1342 exit: 1347 exit:
@@ -2010,15 +2015,14 @@ static void netback_changed(struct xenbus_device *dev,
2010 2015
2011 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); 2016 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2012 2017
2018 wake_up_all(&module_wq);
2019
2013 switch (backend_state) { 2020 switch (backend_state) {
2014 case XenbusStateInitialising: 2021 case XenbusStateInitialising:
2015 case XenbusStateInitialised: 2022 case XenbusStateInitialised:
2016 case XenbusStateReconfiguring: 2023 case XenbusStateReconfiguring:
2017 case XenbusStateReconfigured: 2024 case XenbusStateReconfigured:
2018 break;
2019
2020 case XenbusStateUnknown: 2025 case XenbusStateUnknown:
2021 wake_up_all(&module_unload_q);
2022 break; 2026 break;
2023 2027
2024 case XenbusStateInitWait: 2028 case XenbusStateInitWait:
@@ -2034,12 +2038,10 @@ static void netback_changed(struct xenbus_device *dev,
2034 break; 2038 break;
2035 2039
2036 case XenbusStateClosed: 2040 case XenbusStateClosed:
2037 wake_up_all(&module_unload_q);
2038 if (dev->state == XenbusStateClosed) 2041 if (dev->state == XenbusStateClosed)
2039 break; 2042 break;
2040 /* Missed the backend's CLOSING state -- fallthrough */ 2043 /* Missed the backend's CLOSING state -- fallthrough */
2041 case XenbusStateClosing: 2044 case XenbusStateClosing:
2042 wake_up_all(&module_unload_q);
2043 xenbus_frontend_closed(dev); 2045 xenbus_frontend_closed(dev);
2044 break; 2046 break;
2045 } 2047 }
@@ -2147,14 +2149,14 @@ static int xennet_remove(struct xenbus_device *dev)
2147 2149
2148 if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { 2150 if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
2149 xenbus_switch_state(dev, XenbusStateClosing); 2151 xenbus_switch_state(dev, XenbusStateClosing);
2150 wait_event(module_unload_q, 2152 wait_event(module_wq,
2151 xenbus_read_driver_state(dev->otherend) == 2153 xenbus_read_driver_state(dev->otherend) ==
2152 XenbusStateClosing || 2154 XenbusStateClosing ||
2153 xenbus_read_driver_state(dev->otherend) == 2155 xenbus_read_driver_state(dev->otherend) ==
2154 XenbusStateUnknown); 2156 XenbusStateUnknown);
2155 2157
2156 xenbus_switch_state(dev, XenbusStateClosed); 2158 xenbus_switch_state(dev, XenbusStateClosed);
2157 wait_event(module_unload_q, 2159 wait_event(module_wq,
2158 xenbus_read_driver_state(dev->otherend) == 2160 xenbus_read_driver_state(dev->otherend) ==
2159 XenbusStateClosed || 2161 XenbusStateClosed ||
2160 xenbus_read_driver_state(dev->otherend) == 2162 xenbus_read_driver_state(dev->otherend) ==
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 1b9951d2067e..d668682f91df 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -316,6 +316,14 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
316 old_value = *dbbuf_db; 316 old_value = *dbbuf_db;
317 *dbbuf_db = value; 317 *dbbuf_db = value;
318 318
319 /*
320 * Ensure that the doorbell is updated before reading the event
321 * index from memory. The controller needs to provide similar
322 * ordering to ensure the envent index is updated before reading
323 * the doorbell.
324 */
325 mb();
326
319 if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) 327 if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
320 return false; 328 return false;
321 } 329 }
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index ebf3e7a6c49e..b5ec96abd048 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1210,7 +1210,7 @@ static int __init nvmet_init(void)
1210 1210
1211 error = nvmet_init_discovery(); 1211 error = nvmet_init_discovery();
1212 if (error) 1212 if (error)
1213 goto out; 1213 goto out_free_work_queue;
1214 1214
1215 error = nvmet_init_configfs(); 1215 error = nvmet_init_configfs();
1216 if (error) 1216 if (error)
@@ -1219,6 +1219,8 @@ static int __init nvmet_init(void)
1219 1219
1220out_exit_discovery: 1220out_exit_discovery:
1221 nvmet_exit_discovery(); 1221 nvmet_exit_discovery();
1222out_free_work_queue:
1223 destroy_workqueue(buffered_io_wq);
1222out: 1224out:
1223 return error; 1225 return error;
1224} 1226}
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 34712def81b1..5251689a1d9a 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -311,7 +311,7 @@ fcloop_tgt_lsrqst_done_work(struct work_struct *work)
311 struct fcloop_tport *tport = tls_req->tport; 311 struct fcloop_tport *tport = tls_req->tport;
312 struct nvmefc_ls_req *lsreq = tls_req->lsreq; 312 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
313 313
314 if (tport->remoteport) 314 if (!tport || tport->remoteport)
315 lsreq->done(lsreq, tls_req->status); 315 lsreq->done(lsreq, tls_req->status);
316} 316}
317 317
@@ -329,6 +329,7 @@ fcloop_ls_req(struct nvme_fc_local_port *localport,
329 329
330 if (!rport->targetport) { 330 if (!rport->targetport) {
331 tls_req->status = -ECONNREFUSED; 331 tls_req->status = -ECONNREFUSED;
332 tls_req->tport = NULL;
332 schedule_work(&tls_req->work); 333 schedule_work(&tls_req->work);
333 return ret; 334 return ret;
334 } 335 }
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 3533e918ea37..bfc4da660bb4 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -66,6 +66,7 @@ struct nvmet_rdma_rsp {
66 66
67 struct nvmet_req req; 67 struct nvmet_req req;
68 68
69 bool allocated;
69 u8 n_rdma; 70 u8 n_rdma;
70 u32 flags; 71 u32 flags;
71 u32 invalidate_rkey; 72 u32 invalidate_rkey;
@@ -174,11 +175,19 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
174 unsigned long flags; 175 unsigned long flags;
175 176
176 spin_lock_irqsave(&queue->rsps_lock, flags); 177 spin_lock_irqsave(&queue->rsps_lock, flags);
177 rsp = list_first_entry(&queue->free_rsps, 178 rsp = list_first_entry_or_null(&queue->free_rsps,
178 struct nvmet_rdma_rsp, free_list); 179 struct nvmet_rdma_rsp, free_list);
179 list_del(&rsp->free_list); 180 if (likely(rsp))
181 list_del(&rsp->free_list);
180 spin_unlock_irqrestore(&queue->rsps_lock, flags); 182 spin_unlock_irqrestore(&queue->rsps_lock, flags);
181 183
184 if (unlikely(!rsp)) {
185 rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
186 if (unlikely(!rsp))
187 return NULL;
188 rsp->allocated = true;
189 }
190
182 return rsp; 191 return rsp;
183} 192}
184 193
@@ -187,6 +196,11 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
187{ 196{
188 unsigned long flags; 197 unsigned long flags;
189 198
199 if (rsp->allocated) {
200 kfree(rsp);
201 return;
202 }
203
190 spin_lock_irqsave(&rsp->queue->rsps_lock, flags); 204 spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
191 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); 205 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
192 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); 206 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
@@ -776,6 +790,15 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
776 790
777 cmd->queue = queue; 791 cmd->queue = queue;
778 rsp = nvmet_rdma_get_rsp(queue); 792 rsp = nvmet_rdma_get_rsp(queue);
793 if (unlikely(!rsp)) {
794 /*
795 * we get here only under memory pressure,
796 * silently drop and have the host retry
797 * as we can't even fail it.
798 */
799 nvmet_rdma_post_recv(queue->dev, cmd);
800 return;
801 }
779 rsp->queue = queue; 802 rsp->queue = queue;
780 rsp->cmd = cmd; 803 rsp->cmd = cmd;
781 rsp->flags = 0; 804 rsp->flags = 0;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 466e3c8582f0..74eaedd5b860 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -54,6 +54,28 @@ DEFINE_MUTEX(of_mutex);
54 */ 54 */
55DEFINE_RAW_SPINLOCK(devtree_lock); 55DEFINE_RAW_SPINLOCK(devtree_lock);
56 56
57bool of_node_name_eq(const struct device_node *np, const char *name)
58{
59 const char *node_name;
60 size_t len;
61
62 if (!np)
63 return false;
64
65 node_name = kbasename(np->full_name);
66 len = strchrnul(node_name, '@') - node_name;
67
68 return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
69}
70
71bool of_node_name_prefix(const struct device_node *np, const char *prefix)
72{
73 if (!np)
74 return false;
75
76 return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
77}
78
57int of_n_addr_cells(struct device_node *np) 79int of_n_addr_cells(struct device_node *np)
58{ 80{
59 u32 cells; 81 u32 cells;
@@ -118,6 +140,9 @@ void of_populate_phandle_cache(void)
118 if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) 140 if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
119 phandles++; 141 phandles++;
120 142
143 if (!phandles)
144 goto out;
145
121 cache_entries = roundup_pow_of_two(phandles); 146 cache_entries = roundup_pow_of_two(phandles);
122 phandle_cache_mask = cache_entries - 1; 147 phandle_cache_mask = cache_entries - 1;
123 148
@@ -720,6 +745,31 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
720EXPORT_SYMBOL(of_get_next_available_child); 745EXPORT_SYMBOL(of_get_next_available_child);
721 746
722/** 747/**
748 * of_get_compatible_child - Find compatible child node
749 * @parent: parent node
750 * @compatible: compatible string
751 *
752 * Lookup child node whose compatible property contains the given compatible
753 * string.
754 *
755 * Returns a node pointer with refcount incremented, use of_node_put() on it
756 * when done; or NULL if not found.
757 */
758struct device_node *of_get_compatible_child(const struct device_node *parent,
759 const char *compatible)
760{
761 struct device_node *child;
762
763 for_each_child_of_node(parent, child) {
764 if (of_device_is_compatible(child, compatible))
765 break;
766 }
767
768 return child;
769}
770EXPORT_SYMBOL(of_get_compatible_child);
771
772/**
723 * of_get_child_by_name - Find the child node by name for a given parent 773 * of_get_child_by_name - Find the child node by name for a given parent
724 * @node: parent node 774 * @node: parent node
725 * @name: child name to look for. 775 * @name: child name to look for.
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 7ba90c290a42..6c59673933e9 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -241,6 +241,10 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
241 if (!dev) 241 if (!dev)
242 goto err_clear_flag; 242 goto err_clear_flag;
243 243
244 /* AMBA devices only support a single DMA mask */
245 dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
246 dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
247
244 /* setup generic device info */ 248 /* setup generic device info */
245 dev->dev.of_node = of_node_get(node); 249 dev->dev.of_node = of_node_get(node);
246 dev->dev.fwnode = &node->fwnode; 250 dev->dev.fwnode = &node->fwnode;
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index c00f82cc54aa..ee80e79db21a 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -89,6 +89,9 @@ static enum pci_protocol_version_t pci_protocol_version;
89 89
90#define STATUS_REVISION_MISMATCH 0xC0000059 90#define STATUS_REVISION_MISMATCH 0xC0000059
91 91
92/* space for 32bit serial number as string */
93#define SLOT_NAME_SIZE 11
94
92/* 95/*
93 * Message Types 96 * Message Types
94 */ 97 */
@@ -494,6 +497,7 @@ struct hv_pci_dev {
494 struct list_head list_entry; 497 struct list_head list_entry;
495 refcount_t refs; 498 refcount_t refs;
496 enum hv_pcichild_state state; 499 enum hv_pcichild_state state;
500 struct pci_slot *pci_slot;
497 struct pci_function_description desc; 501 struct pci_function_description desc;
498 bool reported_missing; 502 bool reported_missing;
499 struct hv_pcibus_device *hbus; 503 struct hv_pcibus_device *hbus;
@@ -1457,6 +1461,34 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
1457 spin_unlock_irqrestore(&hbus->device_list_lock, flags); 1461 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1458} 1462}
1459 1463
1464/*
1465 * Assign entries in sysfs pci slot directory.
1466 *
1467 * Note that this function does not need to lock the children list
1468 * because it is called from pci_devices_present_work which
1469 * is serialized with hv_eject_device_work because they are on the
1470 * same ordered workqueue. Therefore hbus->children list will not change
1471 * even when pci_create_slot sleeps.
1472 */
1473static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
1474{
1475 struct hv_pci_dev *hpdev;
1476 char name[SLOT_NAME_SIZE];
1477 int slot_nr;
1478
1479 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1480 if (hpdev->pci_slot)
1481 continue;
1482
1483 slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
1484 snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
1485 hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
1486 name, NULL);
1487 if (!hpdev->pci_slot)
1488 pr_warn("pci_create slot %s failed\n", name);
1489 }
1490}
1491
1460/** 1492/**
1461 * create_root_hv_pci_bus() - Expose a new root PCI bus 1493 * create_root_hv_pci_bus() - Expose a new root PCI bus
1462 * @hbus: Root PCI bus, as understood by this driver 1494 * @hbus: Root PCI bus, as understood by this driver
@@ -1480,6 +1512,7 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
1480 pci_lock_rescan_remove(); 1512 pci_lock_rescan_remove();
1481 pci_scan_child_bus(hbus->pci_bus); 1513 pci_scan_child_bus(hbus->pci_bus);
1482 pci_bus_assign_resources(hbus->pci_bus); 1514 pci_bus_assign_resources(hbus->pci_bus);
1515 hv_pci_assign_slots(hbus);
1483 pci_bus_add_devices(hbus->pci_bus); 1516 pci_bus_add_devices(hbus->pci_bus);
1484 pci_unlock_rescan_remove(); 1517 pci_unlock_rescan_remove();
1485 hbus->state = hv_pcibus_installed; 1518 hbus->state = hv_pcibus_installed;
@@ -1742,6 +1775,7 @@ static void pci_devices_present_work(struct work_struct *work)
1742 */ 1775 */
1743 pci_lock_rescan_remove(); 1776 pci_lock_rescan_remove();
1744 pci_scan_child_bus(hbus->pci_bus); 1777 pci_scan_child_bus(hbus->pci_bus);
1778 hv_pci_assign_slots(hbus);
1745 pci_unlock_rescan_remove(); 1779 pci_unlock_rescan_remove();
1746 break; 1780 break;
1747 1781
@@ -1858,6 +1892,9 @@ static void hv_eject_device_work(struct work_struct *work)
1858 list_del(&hpdev->list_entry); 1892 list_del(&hpdev->list_entry);
1859 spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags); 1893 spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
1860 1894
1895 if (hpdev->pci_slot)
1896 pci_destroy_slot(hpdev->pci_slot);
1897
1861 memset(&ctxt, 0, sizeof(ctxt)); 1898 memset(&ctxt, 0, sizeof(ctxt));
1862 ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; 1899 ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
1863 ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; 1900 ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 7136e3430925..a938abdb41ce 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -496,7 +496,7 @@ int pciehp_power_on_slot(struct slot *slot)
496 u16 slot_status; 496 u16 slot_status;
497 int retval; 497 int retval;
498 498
499 /* Clear sticky power-fault bit from previous power failures */ 499 /* Clear power-fault bit from previous power failures */
500 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); 500 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
501 if (slot_status & PCI_EXP_SLTSTA_PFD) 501 if (slot_status & PCI_EXP_SLTSTA_PFD)
502 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, 502 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
@@ -646,6 +646,14 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
646 pciehp_handle_button_press(slot); 646 pciehp_handle_button_press(slot);
647 } 647 }
648 648
649 /* Check Power Fault Detected */
650 if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
651 ctrl->power_fault_detected = 1;
652 ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
653 pciehp_set_attention_status(slot, 1);
654 pciehp_green_led_off(slot);
655 }
656
649 /* 657 /*
650 * Disable requests have higher priority than Presence Detect Changed 658 * Disable requests have higher priority than Presence Detect Changed
651 * or Data Link Layer State Changed events. 659 * or Data Link Layer State Changed events.
@@ -657,14 +665,6 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
657 pciehp_handle_presence_or_link_change(slot, events); 665 pciehp_handle_presence_or_link_change(slot, events);
658 up_read(&ctrl->reset_lock); 666 up_read(&ctrl->reset_lock);
659 667
660 /* Check Power Fault Detected */
661 if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
662 ctrl->power_fault_detected = 1;
663 ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
664 pciehp_set_attention_status(slot, 1);
665 pciehp_green_led_off(slot);
666 }
667
668 pci_config_pm_runtime_put(pdev); 668 pci_config_pm_runtime_put(pdev);
669 wake_up(&ctrl->requester); 669 wake_up(&ctrl->requester);
670 return IRQ_HANDLED; 670 return IRQ_HANDLED;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 29ff9619b5fa..1835f3a7aa8d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -4547,6 +4547,7 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4547 4547
4548 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS); 4548 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4549} 4549}
4550EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4550 4551
4551static int pci_parent_bus_reset(struct pci_dev *dev, int probe) 4552static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4552{ 4553{
@@ -5200,7 +5201,7 @@ static int __pci_reset_bus(struct pci_bus *bus)
5200 */ 5201 */
5201int pci_reset_bus(struct pci_dev *pdev) 5202int pci_reset_bus(struct pci_dev *pdev)
5202{ 5203{
5203 return pci_probe_reset_slot(pdev->slot) ? 5204 return (!pci_probe_reset_slot(pdev->slot)) ?
5204 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus); 5205 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5205} 5206}
5206EXPORT_SYMBOL_GPL(pci_reset_bus); 5207EXPORT_SYMBOL_GPL(pci_reset_bus);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ec784009a36b..201f9e5ff55c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2074,6 +2074,7 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
2074{ 2074{
2075#ifdef CONFIG_PCI_PASID 2075#ifdef CONFIG_PCI_PASID
2076 struct pci_dev *bridge; 2076 struct pci_dev *bridge;
2077 int pcie_type;
2077 u32 cap; 2078 u32 cap;
2078 2079
2079 if (!pci_is_pcie(dev)) 2080 if (!pci_is_pcie(dev))
@@ -2083,7 +2084,9 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
2083 if (!(cap & PCI_EXP_DEVCAP2_EE_PREFIX)) 2084 if (!(cap & PCI_EXP_DEVCAP2_EE_PREFIX))
2084 return; 2085 return;
2085 2086
2086 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) 2087 pcie_type = pci_pcie_type(dev);
2088 if (pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
2089 pcie_type == PCI_EXP_TYPE_RC_END)
2087 dev->eetlp_prefix_path = 1; 2090 dev->eetlp_prefix_path = 1;
2088 else { 2091 else {
2089 bridge = pci_upstream_bridge(dev); 2092 bridge = pci_upstream_bridge(dev);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index ef7143a274e0..6bc27b7fd452 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4355,11 +4355,6 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
4355 * 4355 *
4356 * 0x9d10-0x9d1b PCI Express Root port #{1-12} 4356 * 0x9d10-0x9d1b PCI Express Root port #{1-12}
4357 * 4357 *
4358 * The 300 series chipset suffers from the same bug so include those root
4359 * ports here as well.
4360 *
4361 * 0xa32c-0xa343 PCI Express Root port #{0-24}
4362 *
4363 * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html 4358 * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
4364 * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html 4359 * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
4365 * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html 4360 * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
@@ -4377,7 +4372,6 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
4377 case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */ 4372 case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
4378 case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */ 4373 case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
4379 case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */ 4374 case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
4380 case 0xa32c ... 0xa343: /* 300 series */
4381 return true; 4375 return true;
4382 } 4376 }
4383 4377
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 9940cc70f38b..54a8b30dda38 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -14,6 +14,8 @@
14#include <linux/poll.h> 14#include <linux/poll.h>
15#include <linux/wait.h> 15#include <linux/wait.h>
16 16
17#include <linux/nospec.h>
18
17MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver"); 19MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
18MODULE_VERSION("0.1"); 20MODULE_VERSION("0.1");
19MODULE_LICENSE("GPL"); 21MODULE_LICENSE("GPL");
@@ -909,6 +911,8 @@ static int ioctl_port_to_pff(struct switchtec_dev *stdev,
909 default: 911 default:
910 if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id)) 912 if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
911 return -EINVAL; 913 return -EINVAL;
914 p.port = array_index_nospec(p.port,
915 ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
912 p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]); 916 p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
913 break; 917 break;
914 } 918 }
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
index ece41fb2848f..c4f4d904e4a6 100644
--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -1040,7 +1040,7 @@ static int madera_pin_probe(struct platform_device *pdev)
1040 } 1040 }
1041 1041
1042 /* if the configuration is provided through pdata, apply it */ 1042 /* if the configuration is provided through pdata, apply it */
1043 if (pdata) { 1043 if (pdata && pdata->gpio_configs) {
1044 ret = pinctrl_register_mappings(pdata->gpio_configs, 1044 ret = pinctrl_register_mappings(pdata->gpio_configs,
1045 pdata->n_gpio_configs); 1045 pdata->n_gpio_configs);
1046 if (ret) { 1046 if (ret) {
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 6a1b6058b991..628817c40e3b 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -793,7 +793,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev)
793 793
794 err = pinctrl_generic_add_group(jzpc->pctl, group->name, 794 err = pinctrl_generic_add_group(jzpc->pctl, group->name,
795 group->pins, group->num_pins, group->data); 795 group->pins, group->num_pins, group->data);
796 if (err) { 796 if (err < 0) {
797 dev_err(dev, "Failed to register group %s\n", 797 dev_err(dev, "Failed to register group %s\n",
798 group->name); 798 group->name);
799 return err; 799 return err;
@@ -806,7 +806,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev)
806 err = pinmux_generic_add_function(jzpc->pctl, func->name, 806 err = pinmux_generic_add_function(jzpc->pctl, func->name,
807 func->group_names, func->num_group_names, 807 func->group_names, func->num_group_names,
808 func->data); 808 func->data);
809 if (err) { 809 if (err < 0) {
810 dev_err(dev, "Failed to register function %s\n", 810 dev_err(dev, "Failed to register function %s\n",
811 func->name); 811 func->name);
812 return err; 812 return err;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 2155a30c282b..5d72ffad32c2 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -634,6 +634,29 @@ static void msm_gpio_irq_mask(struct irq_data *d)
634 raw_spin_lock_irqsave(&pctrl->lock, flags); 634 raw_spin_lock_irqsave(&pctrl->lock, flags);
635 635
636 val = readl(pctrl->regs + g->intr_cfg_reg); 636 val = readl(pctrl->regs + g->intr_cfg_reg);
637 /*
638 * There are two bits that control interrupt forwarding to the CPU. The
639 * RAW_STATUS_EN bit causes the level or edge sensed on the line to be
640 * latched into the interrupt status register when the hardware detects
641 * an irq that it's configured for (either edge for edge type or level
642 * for level type irq). The 'non-raw' status enable bit causes the
643 * hardware to assert the summary interrupt to the CPU if the latched
644 * status bit is set. There's a bug though, the edge detection logic
645 * seems to have a problem where toggling the RAW_STATUS_EN bit may
646 * cause the status bit to latch spuriously when there isn't any edge
647 * so we can't touch that bit for edge type irqs and we have to keep
648 * the bit set anyway so that edges are latched while the line is masked.
649 *
650 * To make matters more complicated, leaving the RAW_STATUS_EN bit
651 * enabled all the time causes level interrupts to re-latch into the
652 * status register because the level is still present on the line after
653 * we ack it. We clear the raw status enable bit during mask here and
654 * set the bit on unmask so the interrupt can't latch into the hardware
655 * while it's masked.
656 */
657 if (irqd_get_trigger_type(d) & IRQ_TYPE_LEVEL_MASK)
658 val &= ~BIT(g->intr_raw_status_bit);
659
637 val &= ~BIT(g->intr_enable_bit); 660 val &= ~BIT(g->intr_enable_bit);
638 writel(val, pctrl->regs + g->intr_cfg_reg); 661 writel(val, pctrl->regs + g->intr_cfg_reg);
639 662
@@ -655,6 +678,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
655 raw_spin_lock_irqsave(&pctrl->lock, flags); 678 raw_spin_lock_irqsave(&pctrl->lock, flags);
656 679
657 val = readl(pctrl->regs + g->intr_cfg_reg); 680 val = readl(pctrl->regs + g->intr_cfg_reg);
681 val |= BIT(g->intr_raw_status_bit);
658 val |= BIT(g->intr_enable_bit); 682 val |= BIT(g->intr_enable_bit);
659 writel(val, pctrl->regs + g->intr_cfg_reg); 683 writel(val, pctrl->regs + g->intr_cfg_reg);
660 684
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ec891bc7d10a..f039266b275d 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -872,8 +872,6 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
872 if (bits & 0x07) 872 if (bits & 0x07)
873 return -EINVAL; 873 return -EINVAL;
874 874
875 memset(bitmap, 0, bits / 8);
876
877 if (str[0] == '0' && str[1] == 'x') 875 if (str[0] == '0' && str[1] == 'x')
878 str++; 876 str++;
879 if (*str == 'x') 877 if (*str == 'x')
@@ -895,25 +893,23 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
895} 893}
896 894
897/* 895/*
898 * str2clrsetmasks() - parse bitmask argument and set the clear and 896 * modify_bitmap() - parse bitmask argument and modify an existing
899 * the set bitmap mask. A concatenation (done with ',') of these terms 897 * bit mask accordingly. A concatenation (done with ',') of these
900 * is recognized: 898 * terms is recognized:
901 * +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>] 899 * +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>]
902 * <bitnr> may be any valid number (hex, decimal or octal) in the range 900 * <bitnr> may be any valid number (hex, decimal or octal) in the range
903 * 0...bits-1; the leading + or - is required. Here are some examples: 901 * 0...bits-1; the leading + or - is required. Here are some examples:
904 * +0-15,+32,-128,-0xFF 902 * +0-15,+32,-128,-0xFF
905 * -0-255,+1-16,+0x128 903 * -0-255,+1-16,+0x128
906 * +1,+2,+3,+4,-5,-7-10 904 * +1,+2,+3,+4,-5,-7-10
907 * Returns a clear and a set bitmask. Every positive value in the string 905 * Returns the new bitmap after all changes have been applied. Every
908 * results in a bit set in the set mask and every negative value in the 906 * positive value in the string will set a bit and every negative value
909 * string results in a bit SET in the clear mask. As a bit may be touched 907 * in the string will clear a bit. As a bit may be touched more than once,
910 * more than once, the last 'operation' wins: +0-255,-128 = all but bit 908 * the last 'operation' wins:
911 * 128 set in the set mask, only bit 128 set in the clear mask. 909 * +0-255,-128 = first bits 0-255 will be set, then bit 128 will be
910 * cleared again. All other bits are unmodified.
912 */ 911 */
913static int str2clrsetmasks(const char *str, 912static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
914 unsigned long *clrmap,
915 unsigned long *setmap,
916 int bits)
917{ 913{
918 int a, i, z; 914 int a, i, z;
919 char *np, sign; 915 char *np, sign;
@@ -922,9 +918,6 @@ static int str2clrsetmasks(const char *str,
922 if (bits & 0x07) 918 if (bits & 0x07)
923 return -EINVAL; 919 return -EINVAL;
924 920
925 memset(clrmap, 0, bits / 8);
926 memset(setmap, 0, bits / 8);
927
928 while (*str) { 921 while (*str) {
929 sign = *str++; 922 sign = *str++;
930 if (sign != '+' && sign != '-') 923 if (sign != '+' && sign != '-')
@@ -940,13 +933,10 @@ static int str2clrsetmasks(const char *str,
940 str = np; 933 str = np;
941 } 934 }
942 for (i = a; i <= z; i++) 935 for (i = a; i <= z; i++)
943 if (sign == '+') { 936 if (sign == '+')
944 set_bit_inv(i, setmap); 937 set_bit_inv(i, bitmap);
945 clear_bit_inv(i, clrmap); 938 else
946 } else { 939 clear_bit_inv(i, bitmap);
947 clear_bit_inv(i, setmap);
948 set_bit_inv(i, clrmap);
949 }
950 while (*str == ',' || *str == '\n') 940 while (*str == ',' || *str == '\n')
951 str++; 941 str++;
952 } 942 }
@@ -970,44 +960,34 @@ static int process_mask_arg(const char *str,
970 unsigned long *bitmap, int bits, 960 unsigned long *bitmap, int bits,
971 struct mutex *lock) 961 struct mutex *lock)
972{ 962{
973 int i; 963 unsigned long *newmap, size;
964 int rc;
974 965
975 /* bits needs to be a multiple of 8 */ 966 /* bits needs to be a multiple of 8 */
976 if (bits & 0x07) 967 if (bits & 0x07)
977 return -EINVAL; 968 return -EINVAL;
978 969
970 size = BITS_TO_LONGS(bits)*sizeof(unsigned long);
971 newmap = kmalloc(size, GFP_KERNEL);
972 if (!newmap)
973 return -ENOMEM;
974 if (mutex_lock_interruptible(lock)) {
975 kfree(newmap);
976 return -ERESTARTSYS;
977 }
978
979 if (*str == '+' || *str == '-') { 979 if (*str == '+' || *str == '-') {
980 DECLARE_BITMAP(clrm, bits); 980 memcpy(newmap, bitmap, size);
981 DECLARE_BITMAP(setm, bits); 981 rc = modify_bitmap(str, newmap, bits);
982
983 i = str2clrsetmasks(str, clrm, setm, bits);
984 if (i)
985 return i;
986 if (mutex_lock_interruptible(lock))
987 return -ERESTARTSYS;
988 for (i = 0; i < bits; i++) {
989 if (test_bit_inv(i, clrm))
990 clear_bit_inv(i, bitmap);
991 if (test_bit_inv(i, setm))
992 set_bit_inv(i, bitmap);
993 }
994 } else { 982 } else {
995 DECLARE_BITMAP(setm, bits); 983 memset(newmap, 0, size);
996 984 rc = hex2bitmap(str, newmap, bits);
997 i = hex2bitmap(str, setm, bits);
998 if (i)
999 return i;
1000 if (mutex_lock_interruptible(lock))
1001 return -ERESTARTSYS;
1002 for (i = 0; i < bits; i++)
1003 if (test_bit_inv(i, setm))
1004 set_bit_inv(i, bitmap);
1005 else
1006 clear_bit_inv(i, bitmap);
1007 } 985 }
986 if (rc == 0)
987 memcpy(bitmap, newmap, size);
1008 mutex_unlock(lock); 988 mutex_unlock(lock);
1009 989 kfree(newmap);
1010 return 0; 990 return rc;
1011} 991}
1012 992
1013/* 993/*
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 49f64eb3eab0..de8282420f96 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -25,6 +25,7 @@
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26#include <linux/netdev_features.h> 26#include <linux/netdev_features.h>
27#include <linux/skbuff.h> 27#include <linux/skbuff.h>
28#include <linux/vmalloc.h>
28 29
29#include <net/iucv/af_iucv.h> 30#include <net/iucv/af_iucv.h>
30#include <net/dsfield.h> 31#include <net/dsfield.h>
@@ -4699,7 +4700,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4699 4700
4700 priv.buffer_len = oat_data.buffer_len; 4701 priv.buffer_len = oat_data.buffer_len;
4701 priv.response_len = 0; 4702 priv.response_len = 0;
4702 priv.buffer = kzalloc(oat_data.buffer_len, GFP_KERNEL); 4703 priv.buffer = vzalloc(oat_data.buffer_len);
4703 if (!priv.buffer) { 4704 if (!priv.buffer) {
4704 rc = -ENOMEM; 4705 rc = -ENOMEM;
4705 goto out; 4706 goto out;
@@ -4740,7 +4741,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4740 rc = -EFAULT; 4741 rc = -EFAULT;
4741 4742
4742out_free: 4743out_free:
4743 kfree(priv.buffer); 4744 vfree(priv.buffer);
4744out: 4745out:
4745 return rc; 4746 return rc;
4746} 4747}
@@ -5706,6 +5707,8 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
5706 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 5707 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
5707 dev->hw_features |= NETIF_F_SG; 5708 dev->hw_features |= NETIF_F_SG;
5708 dev->vlan_features |= NETIF_F_SG; 5709 dev->vlan_features |= NETIF_F_SG;
5710 if (IS_IQD(card))
5711 dev->features |= NETIF_F_SG;
5709 } 5712 }
5710 5713
5711 return dev; 5714 return dev;
@@ -5768,8 +5771,10 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5768 qeth_update_from_chp_desc(card); 5771 qeth_update_from_chp_desc(card);
5769 5772
5770 card->dev = qeth_alloc_netdev(card); 5773 card->dev = qeth_alloc_netdev(card);
5771 if (!card->dev) 5774 if (!card->dev) {
5775 rc = -ENOMEM;
5772 goto err_card; 5776 goto err_card;
5777 }
5773 5778
5774 qeth_determine_capabilities(card); 5779 qeth_determine_capabilities(card);
5775 enforced_disc = qeth_enforce_discipline(card); 5780 enforced_disc = qeth_enforce_discipline(card);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 710fa74892ae..b5e38531733f 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -423,7 +423,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
423 default: 423 default:
424 dev_kfree_skb_any(skb); 424 dev_kfree_skb_any(skb);
425 QETH_CARD_TEXT(card, 3, "inbunkno"); 425 QETH_CARD_TEXT(card, 3, "inbunkno");
426 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); 426 QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
427 continue; 427 continue;
428 } 428 }
429 work_done++; 429 work_done++;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 7175086677fb..ada258c01a08 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1390,7 +1390,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
1390 default: 1390 default:
1391 dev_kfree_skb_any(skb); 1391 dev_kfree_skb_any(skb);
1392 QETH_CARD_TEXT(card, 3, "inbunkno"); 1392 QETH_CARD_TEXT(card, 3, "inbunkno");
1393 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); 1393 QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
1394 continue; 1394 continue;
1395 } 1395 }
1396 work_done++; 1396 work_done++;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8fc851a9e116..7c097006c54d 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -52,12 +52,12 @@ config SCSI_MQ_DEFAULT
52 default y 52 default y
53 depends on SCSI 53 depends on SCSI
54 ---help--- 54 ---help---
55 This option enables the new blk-mq based I/O path for SCSI 55 This option enables the blk-mq based I/O path for SCSI devices by
56 devices by default. With the option the scsi_mod.use_blk_mq 56 default. With this option the scsi_mod.use_blk_mq module/boot
57 module/boot option defaults to Y, without it to N, but it can 57 option defaults to Y, without it to N, but it can still be
58 still be overridden either way. 58 overridden either way.
59 59
60 If unsure say N. 60 If unsure say Y.
61 61
62config SCSI_PROC_FS 62config SCSI_PROC_FS
63 bool "legacy /proc/scsi/ support" 63 bool "legacy /proc/scsi/ support"
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 29bf1e60f542..39eb415987fc 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1346,7 +1346,7 @@ struct fib {
1346struct aac_hba_map_info { 1346struct aac_hba_map_info {
1347 __le32 rmw_nexus; /* nexus for native HBA devices */ 1347 __le32 rmw_nexus; /* nexus for native HBA devices */
1348 u8 devtype; /* device type */ 1348 u8 devtype; /* device type */
1349 u8 reset_state; /* 0 - no reset, 1..x - */ 1349 s8 reset_state; /* 0 - no reset, 1..x - */
1350 /* after xth TM LUN reset */ 1350 /* after xth TM LUN reset */
1351 u16 qd_limit; 1351 u16 qd_limit;
1352 u32 scan_counter; 1352 u32 scan_counter;
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 23d07e9f87d0..e51923886475 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -1602,6 +1602,46 @@ fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
1602} 1602}
1603 1603
1604/** 1604/**
1605 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
1606 * @caps32: a 32-bit Port Capabilities value
1607 *
1608 * Returns the equivalent 16-bit Port Capabilities value. Note that
1609 * not all 32-bit Port Capabilities can be represented in the 16-bit
1610 * Port Capabilities and some fields/values may not make it.
1611 */
1612fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
1613{
1614 fw_port_cap16_t caps16 = 0;
1615
1616 #define CAP32_TO_CAP16(__cap) \
1617 do { \
1618 if (caps32 & FW_PORT_CAP32_##__cap) \
1619 caps16 |= FW_PORT_CAP_##__cap; \
1620 } while (0)
1621
1622 CAP32_TO_CAP16(SPEED_100M);
1623 CAP32_TO_CAP16(SPEED_1G);
1624 CAP32_TO_CAP16(SPEED_10G);
1625 CAP32_TO_CAP16(SPEED_25G);
1626 CAP32_TO_CAP16(SPEED_40G);
1627 CAP32_TO_CAP16(SPEED_100G);
1628 CAP32_TO_CAP16(FC_RX);
1629 CAP32_TO_CAP16(FC_TX);
1630 CAP32_TO_CAP16(802_3_PAUSE);
1631 CAP32_TO_CAP16(802_3_ASM_DIR);
1632 CAP32_TO_CAP16(ANEG);
1633 CAP32_TO_CAP16(FORCE_PAUSE);
1634 CAP32_TO_CAP16(MDIAUTO);
1635 CAP32_TO_CAP16(MDISTRAIGHT);
1636 CAP32_TO_CAP16(FEC_RS);
1637 CAP32_TO_CAP16(FEC_BASER_RS);
1638
1639 #undef CAP32_TO_CAP16
1640
1641 return caps16;
1642}
1643
1644/**
1605 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities 1645 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
1606 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value 1646 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
1607 * 1647 *
@@ -1759,7 +1799,7 @@ csio_enable_ports(struct csio_hw *hw)
1759 val = 1; 1799 val = 1;
1760 1800
1761 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, 1801 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO,
1762 hw->pfn, 0, 1, &param, &val, false, 1802 hw->pfn, 0, 1, &param, &val, true,
1763 NULL); 1803 NULL);
1764 1804
1765 if (csio_mb_issue(hw, mbp)) { 1805 if (csio_mb_issue(hw, mbp)) {
@@ -1769,16 +1809,9 @@ csio_enable_ports(struct csio_hw *hw)
1769 return -EINVAL; 1809 return -EINVAL;
1770 } 1810 }
1771 1811
1772 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1, 1812 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1773 &val); 1813 0, NULL);
1774 if (retval != FW_SUCCESS) { 1814 fw_caps = retval ? FW_CAPS16 : FW_CAPS32;
1775 csio_err(hw, "FW_PARAMS_CMD(r) port:%d failed: 0x%x\n",
1776 portid, retval);
1777 mempool_free(mbp, hw->mb_mempool);
1778 return -EINVAL;
1779 }
1780
1781 fw_caps = val;
1782 } 1815 }
1783 1816
1784 /* Read PORT information */ 1817 /* Read PORT information */
@@ -2364,8 +2397,8 @@ bye:
2364} 2397}
2365 2398
2366/* 2399/*
2367 * Returns -EINVAL if attempts to flash the firmware failed 2400 * Returns -EINVAL if attempts to flash the firmware failed,
2368 * else returns 0, 2401 * -ENOMEM if memory allocation failed else returns 0,
2369 * if flashing was not attempted because the card had the 2402 * if flashing was not attempted because the card had the
2370 * latest firmware ECANCELED is returned 2403 * latest firmware ECANCELED is returned
2371 */ 2404 */
@@ -2393,6 +2426,13 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
2393 return -EINVAL; 2426 return -EINVAL;
2394 } 2427 }
2395 2428
2429 /* allocate memory to read the header of the firmware on the
2430 * card
2431 */
2432 card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
2433 if (!card_fw)
2434 return -ENOMEM;
2435
2396 if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) 2436 if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))
2397 fw_bin_file = FW_FNAME_T5; 2437 fw_bin_file = FW_FNAME_T5;
2398 else 2438 else
@@ -2406,11 +2446,6 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
2406 fw_size = fw->size; 2446 fw_size = fw->size;
2407 } 2447 }
2408 2448
2409 /* allocate memory to read the header of the firmware on the
2410 * card
2411 */
2412 card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
2413
2414 /* upgrade FW logic */ 2449 /* upgrade FW logic */
2415 ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw, 2450 ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw,
2416 hw->fw_state, reset); 2451 hw->fw_state, reset);
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 9e73ef771eb7..e351af6e7c81 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -639,6 +639,7 @@ int csio_handle_intr_status(struct csio_hw *, unsigned int,
639 639
640fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps); 640fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps);
641fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16); 641fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16);
642fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32);
642fw_port_cap32_t lstatus_to_fwcap(u32 lstatus); 643fw_port_cap32_t lstatus_to_fwcap(u32 lstatus);
643 644
644int csio_hw_start(struct csio_hw *); 645int csio_hw_start(struct csio_hw *);
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index c026417269c3..6f13673d6aa0 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -368,7 +368,7 @@ csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
368 FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 368 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
369 369
370 if (fw_caps == FW_CAPS16) 370 if (fw_caps == FW_CAPS16)
371 cmdp->u.l1cfg.rcap = cpu_to_be32(fc); 371 cmdp->u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(fc));
372 else 372 else
373 cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc); 373 cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc);
374} 374}
@@ -395,8 +395,8 @@ csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,
395 *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap)); 395 *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap));
396 *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap)); 396 *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap));
397 } else { 397 } else {
398 *pcaps = ntohs(rsp->u.info32.pcaps32); 398 *pcaps = be32_to_cpu(rsp->u.info32.pcaps32);
399 *acaps = ntohs(rsp->u.info32.acaps32); 399 *acaps = be32_to_cpu(rsp->u.info32.acaps32);
400 } 400 }
401 } 401 }
402} 402}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index f02dcc875a09..ea4b0bb0c1cd 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -563,35 +563,13 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
563} 563}
564EXPORT_SYMBOL(scsi_host_get); 564EXPORT_SYMBOL(scsi_host_get);
565 565
566struct scsi_host_mq_in_flight {
567 int cnt;
568};
569
570static void scsi_host_check_in_flight(struct request *rq, void *data,
571 bool reserved)
572{
573 struct scsi_host_mq_in_flight *in_flight = data;
574
575 if (blk_mq_request_started(rq))
576 in_flight->cnt++;
577}
578
579/** 566/**
580 * scsi_host_busy - Return the host busy counter 567 * scsi_host_busy - Return the host busy counter
581 * @shost: Pointer to Scsi_Host to inc. 568 * @shost: Pointer to Scsi_Host to inc.
582 **/ 569 **/
583int scsi_host_busy(struct Scsi_Host *shost) 570int scsi_host_busy(struct Scsi_Host *shost)
584{ 571{
585 struct scsi_host_mq_in_flight in_flight = { 572 return atomic_read(&shost->host_busy);
586 .cnt = 0,
587 };
588
589 if (!shost->use_blk_mq)
590 return atomic_read(&shost->host_busy);
591
592 blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight,
593 &in_flight);
594 return in_flight.cnt;
595} 573}
596EXPORT_SYMBOL(scsi_host_busy); 574EXPORT_SYMBOL(scsi_host_busy);
597 575
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 58bb70b886d7..c120929d4ffe 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -976,7 +976,7 @@ static struct scsi_host_template hpsa_driver_template = {
976#endif 976#endif
977 .sdev_attrs = hpsa_sdev_attrs, 977 .sdev_attrs = hpsa_sdev_attrs,
978 .shost_attrs = hpsa_shost_attrs, 978 .shost_attrs = hpsa_shost_attrs,
979 .max_sectors = 1024, 979 .max_sectors = 2048,
980 .no_write_same = 1, 980 .no_write_same = 1,
981}; 981};
982 982
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e0d0da5f43d6..43732e8d1347 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -672,7 +672,7 @@ struct lpfc_hba {
672#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ 672#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
673#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ 673#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
674#define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */ 674#define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
675#define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */ 675#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */
676 676
677 uint32_t hba_flag; /* hba generic flags */ 677 uint32_t hba_flag; /* hba generic flags */
678#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ 678#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5a25553415f8..057a60abe664 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5122,16 +5122,16 @@ LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality");
5122 5122
5123/* 5123/*
5124# lpfc_fdmi_on: Controls FDMI support. 5124# lpfc_fdmi_on: Controls FDMI support.
5125# 0 No FDMI support (default) 5125# 0 No FDMI support
5126# 1 Traditional FDMI support 5126# 1 Traditional FDMI support (default)
5127# Traditional FDMI support means the driver will assume FDMI-2 support; 5127# Traditional FDMI support means the driver will assume FDMI-2 support;
5128# however, if that fails, it will fallback to FDMI-1. 5128# however, if that fails, it will fallback to FDMI-1.
5129# If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on. 5129# If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on.
5130# If lpfc_enable_SmartSAN is set 0, the driver uses the current value of 5130# If lpfc_enable_SmartSAN is set 0, the driver uses the current value of
5131# lpfc_fdmi_on. 5131# lpfc_fdmi_on.
5132# Value range [0,1]. Default value is 0. 5132# Value range [0,1]. Default value is 1.
5133*/ 5133*/
5134LPFC_ATTR_R(fdmi_on, 0, 0, 1, "Enable FDMI support"); 5134LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support");
5135 5135
5136/* 5136/*
5137# Specifies the maximum number of ELS cmds we can have outstanding (for 5137# Specifies the maximum number of ELS cmds we can have outstanding (for
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index fc3babc15fa3..a6f96b35e971 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -77,6 +77,11 @@ enum qedi_nvm_tgts {
77 QEDI_NVM_TGT_SEC, 77 QEDI_NVM_TGT_SEC,
78}; 78};
79 79
80struct qedi_nvm_iscsi_image {
81 struct nvm_iscsi_cfg iscsi_cfg;
82 u32 crc;
83};
84
80struct qedi_uio_ctrl { 85struct qedi_uio_ctrl {
81 /* meta data */ 86 /* meta data */
82 u32 uio_hsi_version; 87 u32 uio_hsi_version;
@@ -294,7 +299,7 @@ struct qedi_ctx {
294 void *bdq_pbl_list; 299 void *bdq_pbl_list;
295 dma_addr_t bdq_pbl_list_dma; 300 dma_addr_t bdq_pbl_list_dma;
296 u8 bdq_pbl_list_num_entries; 301 u8 bdq_pbl_list_num_entries;
297 struct nvm_iscsi_cfg *iscsi_cfg; 302 struct qedi_nvm_iscsi_image *iscsi_image;
298 dma_addr_t nvm_buf_dma; 303 dma_addr_t nvm_buf_dma;
299 void __iomem *bdq_primary_prod; 304 void __iomem *bdq_primary_prod;
300 void __iomem *bdq_secondary_prod; 305 void __iomem *bdq_secondary_prod;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index aa96bccb5a96..cc8e64dc65ad 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1346,23 +1346,26 @@ exit_setup_int:
1346 1346
1347static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi) 1347static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
1348{ 1348{
1349 if (qedi->iscsi_cfg) 1349 if (qedi->iscsi_image)
1350 dma_free_coherent(&qedi->pdev->dev, 1350 dma_free_coherent(&qedi->pdev->dev,
1351 sizeof(struct nvm_iscsi_cfg), 1351 sizeof(struct qedi_nvm_iscsi_image),
1352 qedi->iscsi_cfg, qedi->nvm_buf_dma); 1352 qedi->iscsi_image, qedi->nvm_buf_dma);
1353} 1353}
1354 1354
1355static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi) 1355static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
1356{ 1356{
1357 qedi->iscsi_cfg = dma_zalloc_coherent(&qedi->pdev->dev, 1357 struct qedi_nvm_iscsi_image nvm_image;
1358 sizeof(struct nvm_iscsi_cfg), 1358
1359 &qedi->nvm_buf_dma, GFP_KERNEL); 1359 qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev,
1360 if (!qedi->iscsi_cfg) { 1360 sizeof(nvm_image),
1361 &qedi->nvm_buf_dma,
1362 GFP_KERNEL);
1363 if (!qedi->iscsi_image) {
1361 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); 1364 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
1362 return -ENOMEM; 1365 return -ENOMEM;
1363 } 1366 }
1364 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 1367 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1365 "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_cfg, 1368 "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_image,
1366 qedi->nvm_buf_dma); 1369 qedi->nvm_buf_dma);
1367 1370
1368 return 0; 1371 return 0;
@@ -1905,7 +1908,7 @@ qedi_get_nvram_block(struct qedi_ctx *qedi)
1905 struct nvm_iscsi_block *block; 1908 struct nvm_iscsi_block *block;
1906 1909
1907 pf = qedi->dev_info.common.abs_pf_id; 1910 pf = qedi->dev_info.common.abs_pf_id;
1908 block = &qedi->iscsi_cfg->block[0]; 1911 block = &qedi->iscsi_image->iscsi_cfg.block[0];
1909 for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) { 1912 for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) {
1910 flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >> 1913 flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >>
1911 NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET; 1914 NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET;
@@ -2194,15 +2197,14 @@ static void qedi_boot_release(void *data)
2194static int qedi_get_boot_info(struct qedi_ctx *qedi) 2197static int qedi_get_boot_info(struct qedi_ctx *qedi)
2195{ 2198{
2196 int ret = 1; 2199 int ret = 1;
2197 u16 len; 2200 struct qedi_nvm_iscsi_image nvm_image;
2198
2199 len = sizeof(struct nvm_iscsi_cfg);
2200 2201
2201 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 2202 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
2202 "Get NVM iSCSI CFG image\n"); 2203 "Get NVM iSCSI CFG image\n");
2203 ret = qedi_ops->common->nvm_get_image(qedi->cdev, 2204 ret = qedi_ops->common->nvm_get_image(qedi->cdev,
2204 QED_NVM_IMAGE_ISCSI_CFG, 2205 QED_NVM_IMAGE_ISCSI_CFG,
2205 (char *)qedi->iscsi_cfg, len); 2206 (char *)qedi->iscsi_image,
2207 sizeof(nvm_image));
2206 if (ret) 2208 if (ret)
2207 QEDI_ERR(&qedi->dbg_ctx, 2209 QEDI_ERR(&qedi->dbg_ctx,
2208 "Could not get NVM image. ret = %d\n", ret); 2210 "Could not get NVM image. ret = %d\n", ret);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 0adfb3bce0fd..eb97d2dd3651 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -345,8 +345,7 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost)
345 unsigned long flags; 345 unsigned long flags;
346 346
347 rcu_read_lock(); 347 rcu_read_lock();
348 if (!shost->use_blk_mq) 348 atomic_dec(&shost->host_busy);
349 atomic_dec(&shost->host_busy);
350 if (unlikely(scsi_host_in_recovery(shost))) { 349 if (unlikely(scsi_host_in_recovery(shost))) {
351 spin_lock_irqsave(shost->host_lock, flags); 350 spin_lock_irqsave(shost->host_lock, flags);
352 if (shost->host_failed || shost->host_eh_scheduled) 351 if (shost->host_failed || shost->host_eh_scheduled)
@@ -445,12 +444,7 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget)
445 444
446static inline bool scsi_host_is_busy(struct Scsi_Host *shost) 445static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
447{ 446{
448 /* 447 if (shost->can_queue > 0 &&
449 * blk-mq can handle host queue busy efficiently via host-wide driver
450 * tag allocation
451 */
452
453 if (!shost->use_blk_mq && shost->can_queue > 0 &&
454 atomic_read(&shost->host_busy) >= shost->can_queue) 448 atomic_read(&shost->host_busy) >= shost->can_queue)
455 return true; 449 return true;
456 if (atomic_read(&shost->host_blocked) > 0) 450 if (atomic_read(&shost->host_blocked) > 0)
@@ -1606,10 +1600,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
1606 if (scsi_host_in_recovery(shost)) 1600 if (scsi_host_in_recovery(shost))
1607 return 0; 1601 return 0;
1608 1602
1609 if (!shost->use_blk_mq) 1603 busy = atomic_inc_return(&shost->host_busy) - 1;
1610 busy = atomic_inc_return(&shost->host_busy) - 1;
1611 else
1612 busy = 0;
1613 if (atomic_read(&shost->host_blocked) > 0) { 1604 if (atomic_read(&shost->host_blocked) > 0) {
1614 if (busy) 1605 if (busy)
1615 goto starved; 1606 goto starved;
@@ -1625,7 +1616,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
1625 "unblocking host at zero depth\n")); 1616 "unblocking host at zero depth\n"));
1626 } 1617 }
1627 1618
1628 if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue) 1619 if (shost->can_queue > 0 && busy >= shost->can_queue)
1629 goto starved; 1620 goto starved;
1630 if (shost->host_self_blocked) 1621 if (shost->host_self_blocked)
1631 goto starved; 1622 goto starved;
@@ -1711,9 +1702,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1711 * with the locks as normal issue path does. 1702 * with the locks as normal issue path does.
1712 */ 1703 */
1713 atomic_inc(&sdev->device_busy); 1704 atomic_inc(&sdev->device_busy);
1714 1705 atomic_inc(&shost->host_busy);
1715 if (!shost->use_blk_mq)
1716 atomic_inc(&shost->host_busy);
1717 if (starget->can_queue > 0) 1706 if (starget->can_queue > 0)
1718 atomic_inc(&starget->target_busy); 1707 atomic_inc(&starget->target_busy);
1719 1708
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 7cb3ab0a35a0..3082e72e4f6c 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -30,7 +30,11 @@
30 30
31#define DRIVER_NAME "fsl-dspi" 31#define DRIVER_NAME "fsl-dspi"
32 32
33#ifdef CONFIG_M5441x
34#define DSPI_FIFO_SIZE 16
35#else
33#define DSPI_FIFO_SIZE 4 36#define DSPI_FIFO_SIZE 4
37#endif
34#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024) 38#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
35 39
36#define SPI_MCR 0x00 40#define SPI_MCR 0x00
@@ -623,9 +627,11 @@ static void dspi_tcfq_read(struct fsl_dspi *dspi)
623static void dspi_eoq_write(struct fsl_dspi *dspi) 627static void dspi_eoq_write(struct fsl_dspi *dspi)
624{ 628{
625 int fifo_size = DSPI_FIFO_SIZE; 629 int fifo_size = DSPI_FIFO_SIZE;
630 u16 xfer_cmd = dspi->tx_cmd;
626 631
627 /* Fill TX FIFO with as many transfers as possible */ 632 /* Fill TX FIFO with as many transfers as possible */
628 while (dspi->len && fifo_size--) { 633 while (dspi->len && fifo_size--) {
634 dspi->tx_cmd = xfer_cmd;
629 /* Request EOQF for last transfer in FIFO */ 635 /* Request EOQF for last transfer in FIFO */
630 if (dspi->len == dspi->bytes_per_word || fifo_size == 0) 636 if (dspi->len == dspi->bytes_per_word || fifo_size == 0)
631 dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ; 637 dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ec395a6baf9c..9da0bc5a036c 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2143,8 +2143,17 @@ int spi_register_controller(struct spi_controller *ctlr)
2143 */ 2143 */
2144 if (ctlr->num_chipselect == 0) 2144 if (ctlr->num_chipselect == 0)
2145 return -EINVAL; 2145 return -EINVAL;
2146 /* allocate dynamic bus number using Linux idr */ 2146 if (ctlr->bus_num >= 0) {
2147 if ((ctlr->bus_num < 0) && ctlr->dev.of_node) { 2147 /* devices with a fixed bus num must check-in with the num */
2148 mutex_lock(&board_lock);
2149 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2150 ctlr->bus_num + 1, GFP_KERNEL);
2151 mutex_unlock(&board_lock);
2152 if (WARN(id < 0, "couldn't get idr"))
2153 return id == -ENOSPC ? -EBUSY : id;
2154 ctlr->bus_num = id;
2155 } else if (ctlr->dev.of_node) {
2156 /* allocate dynamic bus number using Linux idr */
2148 id = of_alias_get_id(ctlr->dev.of_node, "spi"); 2157 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2149 if (id >= 0) { 2158 if (id >= 0) {
2150 ctlr->bus_num = id; 2159 ctlr->bus_num = id;
diff --git a/drivers/staging/erofs/Kconfig b/drivers/staging/erofs/Kconfig
index 96f614934df1..663b755bf2fb 100644
--- a/drivers/staging/erofs/Kconfig
+++ b/drivers/staging/erofs/Kconfig
@@ -2,7 +2,7 @@
2 2
3config EROFS_FS 3config EROFS_FS
4 tristate "EROFS filesystem support" 4 tristate "EROFS filesystem support"
5 depends on BROKEN 5 depends on BLOCK
6 help 6 help
7 EROFS(Enhanced Read-Only File System) is a lightweight 7 EROFS(Enhanced Read-Only File System) is a lightweight
8 read-only file system with modern designs (eg. page-sized 8 read-only file system with modern designs (eg. page-sized
diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
index 1aec509c805f..2df9768edac9 100644
--- a/drivers/staging/erofs/super.c
+++ b/drivers/staging/erofs/super.c
@@ -340,7 +340,7 @@ static int erofs_read_super(struct super_block *sb,
340 goto err_sbread; 340 goto err_sbread;
341 341
342 sb->s_magic = EROFS_SUPER_MAGIC; 342 sb->s_magic = EROFS_SUPER_MAGIC;
343 sb->s_flags |= MS_RDONLY | MS_NOATIME; 343 sb->s_flags |= SB_RDONLY | SB_NOATIME;
344 sb->s_maxbytes = MAX_LFS_FILESIZE; 344 sb->s_maxbytes = MAX_LFS_FILESIZE;
345 sb->s_time_gran = 1; 345 sb->s_time_gran = 1;
346 346
@@ -627,7 +627,7 @@ static int erofs_remount(struct super_block *sb, int *flags, char *data)
627{ 627{
628 BUG_ON(!sb_rdonly(sb)); 628 BUG_ON(!sb_rdonly(sb));
629 629
630 *flags |= MS_RDONLY; 630 *flags |= SB_RDONLY;
631 return 0; 631 return 0;
632} 632}
633 633
diff --git a/drivers/staging/fbtft/TODO b/drivers/staging/fbtft/TODO
index 7e64c7e438f0..a9f4802bb6be 100644
--- a/drivers/staging/fbtft/TODO
+++ b/drivers/staging/fbtft/TODO
@@ -2,3 +2,7 @@
2 GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO 2 GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO
3 lines from device tree, ACPI or board files, board files should 3 lines from device tree, ACPI or board files, board files should
4 use <linux/gpio/machine.h> 4 use <linux/gpio/machine.h>
5
6* convert all these over to drm_simple_display_pipe and submit for inclusion
7 into the DRM subsystem under drivers/gpu/drm - fbdev doesn't take any new
8 drivers anymore.
diff --git a/drivers/staging/gasket/TODO b/drivers/staging/gasket/TODO
index 6ff8e01b04cc..5b1865f8af2d 100644
--- a/drivers/staging/gasket/TODO
+++ b/drivers/staging/gasket/TODO
@@ -1,9 +1,22 @@
1This is a list of things that need to be done to get this driver out of the 1This is a list of things that need to be done to get this driver out of the
2staging directory. 2staging directory.
3
4- Implement the gasket framework's functionality through UIO instead of
5 introducing a new user-space drivers framework that is quite similar.
6
7 UIO provides the necessary bits to implement user-space drivers. Meanwhile
8 the gasket APIs adds some extra conveniences like PCI BAR mapping, and
9 MSI interrupts. Add these features to the UIO subsystem, then re-implement
10 the Apex driver as a basic UIO driver instead (include/linux/uio_driver.h)
11
3- Document sysfs files with Documentation/ABI/ entries. 12- Document sysfs files with Documentation/ABI/ entries.
13
4- Use misc interface instead of major number for driver version description. 14- Use misc interface instead of major number for driver version description.
15
5- Add descriptions of module_param's 16- Add descriptions of module_param's
17
6- apex_get_status() should actually check status. 18- apex_get_status() should actually check status.
19
7- "drivers" should never be dealing with "raw" sysfs calls or mess around with 20- "drivers" should never be dealing with "raw" sysfs calls or mess around with
8 kobjects at all. The driver core should handle all of this for you 21 kobjects at all. The driver core should handle all of this for you
9 automaically. There should not be a need for raw attribute macros. 22 automaically. There should not be a need for raw attribute macros.
diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c
index da92c493f157..69cc508af1bc 100644
--- a/drivers/staging/vboxvideo/vbox_drv.c
+++ b/drivers/staging/vboxvideo/vbox_drv.c
@@ -59,6 +59,11 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
59 ret = PTR_ERR(dev); 59 ret = PTR_ERR(dev);
60 goto err_drv_alloc; 60 goto err_drv_alloc;
61 } 61 }
62
63 ret = pci_enable_device(pdev);
64 if (ret)
65 goto err_pci_enable;
66
62 dev->pdev = pdev; 67 dev->pdev = pdev;
63 pci_set_drvdata(pdev, dev); 68 pci_set_drvdata(pdev, dev);
64 69
@@ -75,6 +80,8 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
75 err_drv_dev_register: 80 err_drv_dev_register:
76 vbox_driver_unload(dev); 81 vbox_driver_unload(dev);
77 err_vbox_driver_load: 82 err_vbox_driver_load:
83 pci_disable_device(pdev);
84 err_pci_enable:
78 drm_dev_put(dev); 85 drm_dev_put(dev);
79 err_drv_alloc: 86 err_drv_alloc:
80 return ret; 87 return ret;
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c
index a83eac8668d0..79836c8fb909 100644
--- a/drivers/staging/vboxvideo/vbox_mode.c
+++ b/drivers/staging/vboxvideo/vbox_mode.c
@@ -323,6 +323,11 @@ static int vbox_crtc_page_flip(struct drm_crtc *crtc,
323 if (rc) 323 if (rc)
324 return rc; 324 return rc;
325 325
326 mutex_lock(&vbox->hw_mutex);
327 vbox_set_view(crtc);
328 vbox_do_modeset(crtc, &crtc->mode);
329 mutex_unlock(&vbox->hw_mutex);
330
326 spin_lock_irqsave(&drm->event_lock, flags); 331 spin_lock_irqsave(&drm->event_lock, flags);
327 332
328 if (event) 333 if (event)
diff --git a/drivers/staging/wilc1000/Makefile b/drivers/staging/wilc1000/Makefile
index f7b07c0b5ce2..ee7e26b886a5 100644
--- a/drivers/staging/wilc1000/Makefile
+++ b/drivers/staging/wilc1000/Makefile
@@ -1,4 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_WILC1000) += wilc1000.o
2 3
3ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \ 4ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \
4 -DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\" 5 -DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\"
@@ -11,9 +12,7 @@ wilc1000-objs := wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \
11 wilc_wlan.o 12 wilc_wlan.o
12 13
13obj-$(CONFIG_WILC1000_SDIO) += wilc1000-sdio.o 14obj-$(CONFIG_WILC1000_SDIO) += wilc1000-sdio.o
14wilc1000-sdio-objs += $(wilc1000-objs)
15wilc1000-sdio-objs += wilc_sdio.o 15wilc1000-sdio-objs += wilc_sdio.o
16 16
17obj-$(CONFIG_WILC1000_SPI) += wilc1000-spi.o 17obj-$(CONFIG_WILC1000_SPI) += wilc1000-spi.o
18wilc1000-spi-objs += $(wilc1000-objs)
19wilc1000-spi-objs += wilc_spi.o 18wilc1000-spi-objs += wilc_spi.o
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 01cf4bd2e192..3b8d237decbf 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -1038,8 +1038,8 @@ void wilc_netdev_cleanup(struct wilc *wilc)
1038 } 1038 }
1039 1039
1040 kfree(wilc); 1040 kfree(wilc);
1041 wilc_debugfs_remove();
1042} 1041}
1042EXPORT_SYMBOL_GPL(wilc_netdev_cleanup);
1043 1043
1044static const struct net_device_ops wilc_netdev_ops = { 1044static const struct net_device_ops wilc_netdev_ops = {
1045 .ndo_init = mac_init_fn, 1045 .ndo_init = mac_init_fn,
@@ -1062,7 +1062,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
1062 if (!wl) 1062 if (!wl)
1063 return -ENOMEM; 1063 return -ENOMEM;
1064 1064
1065 wilc_debugfs_init();
1066 *wilc = wl; 1065 *wilc = wl;
1067 wl->io_type = io_type; 1066 wl->io_type = io_type;
1068 wl->hif_func = ops; 1067 wl->hif_func = ops;
@@ -1124,3 +1123,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
1124 1123
1125 return 0; 1124 return 0;
1126} 1125}
1126EXPORT_SYMBOL_GPL(wilc_netdev_init);
1127
1128MODULE_LICENSE("GPL");
diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c
index edc72876458d..8001df66b8c2 100644
--- a/drivers/staging/wilc1000/wilc_debugfs.c
+++ b/drivers/staging/wilc1000/wilc_debugfs.c
@@ -19,6 +19,7 @@ static struct dentry *wilc_dir;
19 19
20#define DBG_LEVEL_ALL (DEBUG | INFO | WRN | ERR) 20#define DBG_LEVEL_ALL (DEBUG | INFO | WRN | ERR)
21static atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR); 21static atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR);
22EXPORT_SYMBOL_GPL(WILC_DEBUG_LEVEL);
22 23
23static ssize_t wilc_debug_level_read(struct file *file, char __user *userbuf, 24static ssize_t wilc_debug_level_read(struct file *file, char __user *userbuf,
24 size_t count, loff_t *ppos) 25 size_t count, loff_t *ppos)
@@ -87,7 +88,7 @@ static struct wilc_debugfs_info_t debugfs_info[] = {
87 }, 88 },
88}; 89};
89 90
90int wilc_debugfs_init(void) 91static int __init wilc_debugfs_init(void)
91{ 92{
92 int i; 93 int i;
93 struct wilc_debugfs_info_t *info; 94 struct wilc_debugfs_info_t *info;
@@ -103,10 +104,12 @@ int wilc_debugfs_init(void)
103 } 104 }
104 return 0; 105 return 0;
105} 106}
107module_init(wilc_debugfs_init);
106 108
107void wilc_debugfs_remove(void) 109static void __exit wilc_debugfs_remove(void)
108{ 110{
109 debugfs_remove_recursive(wilc_dir); 111 debugfs_remove_recursive(wilc_dir);
110} 112}
113module_exit(wilc_debugfs_remove);
111 114
112#endif 115#endif
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index 6787b6e9f124..8b184aa30d25 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -417,6 +417,7 @@ void chip_allow_sleep(struct wilc *wilc)
417 wilc->hif_func->hif_write_reg(wilc, 0xf0, reg & ~BIT(0)); 417 wilc->hif_func->hif_write_reg(wilc, 0xf0, reg & ~BIT(0));
418 wilc->hif_func->hif_write_reg(wilc, 0xfa, 0); 418 wilc->hif_func->hif_write_reg(wilc, 0xfa, 0);
419} 419}
420EXPORT_SYMBOL_GPL(chip_allow_sleep);
420 421
421void chip_wakeup(struct wilc *wilc) 422void chip_wakeup(struct wilc *wilc)
422{ 423{
@@ -471,6 +472,7 @@ void chip_wakeup(struct wilc *wilc)
471 } 472 }
472 chip_ps_state = CHIP_WAKEDUP; 473 chip_ps_state = CHIP_WAKEDUP;
473} 474}
475EXPORT_SYMBOL_GPL(chip_wakeup);
474 476
475void wilc_chip_sleep_manually(struct wilc *wilc) 477void wilc_chip_sleep_manually(struct wilc *wilc)
476{ 478{
@@ -484,6 +486,7 @@ void wilc_chip_sleep_manually(struct wilc *wilc)
484 chip_ps_state = CHIP_SLEEPING_MANUAL; 486 chip_ps_state = CHIP_SLEEPING_MANUAL;
485 release_bus(wilc, RELEASE_ONLY); 487 release_bus(wilc, RELEASE_ONLY);
486} 488}
489EXPORT_SYMBOL_GPL(wilc_chip_sleep_manually);
487 490
488void host_wakeup_notify(struct wilc *wilc) 491void host_wakeup_notify(struct wilc *wilc)
489{ 492{
@@ -491,6 +494,7 @@ void host_wakeup_notify(struct wilc *wilc)
491 wilc->hif_func->hif_write_reg(wilc, 0x10b0, 1); 494 wilc->hif_func->hif_write_reg(wilc, 0x10b0, 1);
492 release_bus(wilc, RELEASE_ONLY); 495 release_bus(wilc, RELEASE_ONLY);
493} 496}
497EXPORT_SYMBOL_GPL(host_wakeup_notify);
494 498
495void host_sleep_notify(struct wilc *wilc) 499void host_sleep_notify(struct wilc *wilc)
496{ 500{
@@ -498,6 +502,7 @@ void host_sleep_notify(struct wilc *wilc)
498 wilc->hif_func->hif_write_reg(wilc, 0x10ac, 1); 502 wilc->hif_func->hif_write_reg(wilc, 0x10ac, 1);
499 release_bus(wilc, RELEASE_ONLY); 503 release_bus(wilc, RELEASE_ONLY);
500} 504}
505EXPORT_SYMBOL_GPL(host_sleep_notify);
501 506
502int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count) 507int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
503{ 508{
@@ -871,6 +876,7 @@ void wilc_handle_isr(struct wilc *wilc)
871 876
872 release_bus(wilc, RELEASE_ALLOW_SLEEP); 877 release_bus(wilc, RELEASE_ALLOW_SLEEP);
873} 878}
879EXPORT_SYMBOL_GPL(wilc_handle_isr);
874 880
875int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, 881int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
876 u32 buffer_size) 882 u32 buffer_size)
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index 00d13b153f80..b81a73b9bd67 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -831,6 +831,4 @@ struct wilc;
831int wilc_wlan_init(struct net_device *dev); 831int wilc_wlan_init(struct net_device *dev);
832u32 wilc_get_chipid(struct wilc *wilc, bool update); 832u32 wilc_get_chipid(struct wilc *wilc, bool update);
833 833
834int wilc_debugfs_init(void);
835void wilc_debugfs_remove(void);
836#endif 834#endif
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
index 768cce0ccb80..76a262674c8d 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -207,8 +207,8 @@ cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo,
207 ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); 207 ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
208 sgl->offset = sg_offset; 208 sgl->offset = sg_offset;
209 if (!ret) { 209 if (!ret) {
210 pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", 210 pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
211 __func__, 0, xferlen, sgcnt); 211 __func__, 0, xferlen, sgcnt);
212 goto rel_ppods; 212 goto rel_ppods;
213 } 213 }
214 214
@@ -250,8 +250,8 @@ cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
250 250
251 ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length); 251 ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length);
252 if (ret < 0) { 252 if (ret < 0) {
253 pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n", 253 pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
254 csk, cmd, cmd->se_cmd.data_length, ttinfo->nents); 254 csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
255 255
256 ttinfo->sgl = NULL; 256 ttinfo->sgl = NULL;
257 ttinfo->nents = 0; 257 ttinfo->nents = 0;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 94bad43c41ff..9cdfccbdd06f 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4208,22 +4208,15 @@ int iscsit_close_connection(
4208 crypto_free_ahash(tfm); 4208 crypto_free_ahash(tfm);
4209 } 4209 }
4210 4210
4211 free_cpumask_var(conn->conn_cpumask);
4212
4213 kfree(conn->conn_ops);
4214 conn->conn_ops = NULL;
4215
4216 if (conn->sock) 4211 if (conn->sock)
4217 sock_release(conn->sock); 4212 sock_release(conn->sock);
4218 4213
4219 if (conn->conn_transport->iscsit_free_conn) 4214 if (conn->conn_transport->iscsit_free_conn)
4220 conn->conn_transport->iscsit_free_conn(conn); 4215 conn->conn_transport->iscsit_free_conn(conn);
4221 4216
4222 iscsit_put_transport(conn->conn_transport);
4223
4224 pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); 4217 pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
4225 conn->conn_state = TARG_CONN_STATE_FREE; 4218 conn->conn_state = TARG_CONN_STATE_FREE;
4226 kfree(conn); 4219 iscsit_free_conn(conn);
4227 4220
4228 spin_lock_bh(&sess->conn_lock); 4221 spin_lock_bh(&sess->conn_lock);
4229 atomic_dec(&sess->nconn); 4222 atomic_dec(&sess->nconn);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 9e74f8bc2963..bb90c80ff388 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -67,45 +67,10 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
67 goto out_req_buf; 67 goto out_req_buf;
68 } 68 }
69 69
70 conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
71 if (!conn->conn_ops) {
72 pr_err("Unable to allocate memory for"
73 " struct iscsi_conn_ops.\n");
74 goto out_rsp_buf;
75 }
76
77 init_waitqueue_head(&conn->queues_wq);
78 INIT_LIST_HEAD(&conn->conn_list);
79 INIT_LIST_HEAD(&conn->conn_cmd_list);
80 INIT_LIST_HEAD(&conn->immed_queue_list);
81 INIT_LIST_HEAD(&conn->response_queue_list);
82 init_completion(&conn->conn_post_wait_comp);
83 init_completion(&conn->conn_wait_comp);
84 init_completion(&conn->conn_wait_rcfr_comp);
85 init_completion(&conn->conn_waiting_on_uc_comp);
86 init_completion(&conn->conn_logout_comp);
87 init_completion(&conn->rx_half_close_comp);
88 init_completion(&conn->tx_half_close_comp);
89 init_completion(&conn->rx_login_comp);
90 spin_lock_init(&conn->cmd_lock);
91 spin_lock_init(&conn->conn_usage_lock);
92 spin_lock_init(&conn->immed_queue_lock);
93 spin_lock_init(&conn->nopin_timer_lock);
94 spin_lock_init(&conn->response_queue_lock);
95 spin_lock_init(&conn->state_lock);
96
97 if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
98 pr_err("Unable to allocate conn->conn_cpumask\n");
99 goto out_conn_ops;
100 }
101 conn->conn_login = login; 70 conn->conn_login = login;
102 71
103 return login; 72 return login;
104 73
105out_conn_ops:
106 kfree(conn->conn_ops);
107out_rsp_buf:
108 kfree(login->rsp_buf);
109out_req_buf: 74out_req_buf:
110 kfree(login->req_buf); 75 kfree(login->req_buf);
111out_login: 76out_login:
@@ -310,11 +275,9 @@ static int iscsi_login_zero_tsih_s1(
310 return -ENOMEM; 275 return -ENOMEM;
311 } 276 }
312 277
313 ret = iscsi_login_set_conn_values(sess, conn, pdu->cid); 278 if (iscsi_login_set_conn_values(sess, conn, pdu->cid))
314 if (unlikely(ret)) { 279 goto free_sess;
315 kfree(sess); 280
316 return ret;
317 }
318 sess->init_task_tag = pdu->itt; 281 sess->init_task_tag = pdu->itt;
319 memcpy(&sess->isid, pdu->isid, 6); 282 memcpy(&sess->isid, pdu->isid, 6);
320 sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn); 283 sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn);
@@ -1149,6 +1112,75 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
1149 return 0; 1112 return 0;
1150} 1113}
1151 1114
1115static struct iscsi_conn *iscsit_alloc_conn(struct iscsi_np *np)
1116{
1117 struct iscsi_conn *conn;
1118
1119 conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
1120 if (!conn) {
1121 pr_err("Could not allocate memory for new connection\n");
1122 return NULL;
1123 }
1124 pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
1125 conn->conn_state = TARG_CONN_STATE_FREE;
1126
1127 init_waitqueue_head(&conn->queues_wq);
1128 INIT_LIST_HEAD(&conn->conn_list);
1129 INIT_LIST_HEAD(&conn->conn_cmd_list);
1130 INIT_LIST_HEAD(&conn->immed_queue_list);
1131 INIT_LIST_HEAD(&conn->response_queue_list);
1132 init_completion(&conn->conn_post_wait_comp);
1133 init_completion(&conn->conn_wait_comp);
1134 init_completion(&conn->conn_wait_rcfr_comp);
1135 init_completion(&conn->conn_waiting_on_uc_comp);
1136 init_completion(&conn->conn_logout_comp);
1137 init_completion(&conn->rx_half_close_comp);
1138 init_completion(&conn->tx_half_close_comp);
1139 init_completion(&conn->rx_login_comp);
1140 spin_lock_init(&conn->cmd_lock);
1141 spin_lock_init(&conn->conn_usage_lock);
1142 spin_lock_init(&conn->immed_queue_lock);
1143 spin_lock_init(&conn->nopin_timer_lock);
1144 spin_lock_init(&conn->response_queue_lock);
1145 spin_lock_init(&conn->state_lock);
1146
1147 timer_setup(&conn->nopin_response_timer,
1148 iscsit_handle_nopin_response_timeout, 0);
1149 timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0);
1150
1151 if (iscsit_conn_set_transport(conn, np->np_transport) < 0)
1152 goto free_conn;
1153
1154 conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
1155 if (!conn->conn_ops) {
1156 pr_err("Unable to allocate memory for struct iscsi_conn_ops.\n");
1157 goto put_transport;
1158 }
1159
1160 if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
1161 pr_err("Unable to allocate conn->conn_cpumask\n");
1162 goto free_mask;
1163 }
1164
1165 return conn;
1166
1167free_mask:
1168 free_cpumask_var(conn->conn_cpumask);
1169put_transport:
1170 iscsit_put_transport(conn->conn_transport);
1171free_conn:
1172 kfree(conn);
1173 return NULL;
1174}
1175
1176void iscsit_free_conn(struct iscsi_conn *conn)
1177{
1178 free_cpumask_var(conn->conn_cpumask);
1179 kfree(conn->conn_ops);
1180 iscsit_put_transport(conn->conn_transport);
1181 kfree(conn);
1182}
1183
1152void iscsi_target_login_sess_out(struct iscsi_conn *conn, 1184void iscsi_target_login_sess_out(struct iscsi_conn *conn,
1153 struct iscsi_np *np, bool zero_tsih, bool new_sess) 1185 struct iscsi_np *np, bool zero_tsih, bool new_sess)
1154{ 1186{
@@ -1198,10 +1230,6 @@ old_sess_out:
1198 crypto_free_ahash(tfm); 1230 crypto_free_ahash(tfm);
1199 } 1231 }
1200 1232
1201 free_cpumask_var(conn->conn_cpumask);
1202
1203 kfree(conn->conn_ops);
1204
1205 if (conn->param_list) { 1233 if (conn->param_list) {
1206 iscsi_release_param_list(conn->param_list); 1234 iscsi_release_param_list(conn->param_list);
1207 conn->param_list = NULL; 1235 conn->param_list = NULL;
@@ -1219,8 +1247,7 @@ old_sess_out:
1219 if (conn->conn_transport->iscsit_free_conn) 1247 if (conn->conn_transport->iscsit_free_conn)
1220 conn->conn_transport->iscsit_free_conn(conn); 1248 conn->conn_transport->iscsit_free_conn(conn);
1221 1249
1222 iscsit_put_transport(conn->conn_transport); 1250 iscsit_free_conn(conn);
1223 kfree(conn);
1224} 1251}
1225 1252
1226static int __iscsi_target_login_thread(struct iscsi_np *np) 1253static int __iscsi_target_login_thread(struct iscsi_np *np)
@@ -1250,31 +1277,16 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1250 } 1277 }
1251 spin_unlock_bh(&np->np_thread_lock); 1278 spin_unlock_bh(&np->np_thread_lock);
1252 1279
1253 conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL); 1280 conn = iscsit_alloc_conn(np);
1254 if (!conn) { 1281 if (!conn) {
1255 pr_err("Could not allocate memory for"
1256 " new connection\n");
1257 /* Get another socket */ 1282 /* Get another socket */
1258 return 1; 1283 return 1;
1259 } 1284 }
1260 pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
1261 conn->conn_state = TARG_CONN_STATE_FREE;
1262
1263 timer_setup(&conn->nopin_response_timer,
1264 iscsit_handle_nopin_response_timeout, 0);
1265 timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0);
1266
1267 if (iscsit_conn_set_transport(conn, np->np_transport) < 0) {
1268 kfree(conn);
1269 return 1;
1270 }
1271 1285
1272 rc = np->np_transport->iscsit_accept_np(np, conn); 1286 rc = np->np_transport->iscsit_accept_np(np, conn);
1273 if (rc == -ENOSYS) { 1287 if (rc == -ENOSYS) {
1274 complete(&np->np_restart_comp); 1288 complete(&np->np_restart_comp);
1275 iscsit_put_transport(conn->conn_transport); 1289 iscsit_free_conn(conn);
1276 kfree(conn);
1277 conn = NULL;
1278 goto exit; 1290 goto exit;
1279 } else if (rc < 0) { 1291 } else if (rc < 0) {
1280 spin_lock_bh(&np->np_thread_lock); 1292 spin_lock_bh(&np->np_thread_lock);
@@ -1282,17 +1294,13 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1282 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; 1294 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
1283 spin_unlock_bh(&np->np_thread_lock); 1295 spin_unlock_bh(&np->np_thread_lock);
1284 complete(&np->np_restart_comp); 1296 complete(&np->np_restart_comp);
1285 iscsit_put_transport(conn->conn_transport); 1297 iscsit_free_conn(conn);
1286 kfree(conn);
1287 conn = NULL;
1288 /* Get another socket */ 1298 /* Get another socket */
1289 return 1; 1299 return 1;
1290 } 1300 }
1291 spin_unlock_bh(&np->np_thread_lock); 1301 spin_unlock_bh(&np->np_thread_lock);
1292 iscsit_put_transport(conn->conn_transport); 1302 iscsit_free_conn(conn);
1293 kfree(conn); 1303 return 1;
1294 conn = NULL;
1295 goto out;
1296 } 1304 }
1297 /* 1305 /*
1298 * Perform the remaining iSCSI connection initialization items.. 1306 * Perform the remaining iSCSI connection initialization items..
@@ -1442,7 +1450,6 @@ old_sess_out:
1442 tpg_np = NULL; 1450 tpg_np = NULL;
1443 } 1451 }
1444 1452
1445out:
1446 return 1; 1453 return 1;
1447 1454
1448exit: 1455exit:
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 74ac3abc44a0..3b8e3639ff5d 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -19,7 +19,7 @@ extern int iscsi_target_setup_login_socket(struct iscsi_np *,
19extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *); 19extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
20extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); 20extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
21extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); 21extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
22extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); 22extern void iscsit_free_conn(struct iscsi_conn *);
23extern int iscsit_start_kthreads(struct iscsi_conn *); 23extern int iscsit_start_kthreads(struct iscsi_conn *);
24extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); 24extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
25extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, 25extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 977a8307fbb1..4f2816559205 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -260,10 +260,13 @@ static int of_thermal_set_mode(struct thermal_zone_device *tz,
260 260
261 mutex_lock(&tz->lock); 261 mutex_lock(&tz->lock);
262 262
263 if (mode == THERMAL_DEVICE_ENABLED) 263 if (mode == THERMAL_DEVICE_ENABLED) {
264 tz->polling_delay = data->polling_delay; 264 tz->polling_delay = data->polling_delay;
265 else 265 tz->passive_delay = data->passive_delay;
266 } else {
266 tz->polling_delay = 0; 267 tz->polling_delay = 0;
268 tz->passive_delay = 0;
269 }
267 270
268 mutex_unlock(&tz->lock); 271 mutex_unlock(&tz->lock);
269 272
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index c866cc165960..450ed66edf58 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -1,16 +1,6 @@
1/* 1// SPDX-License-Identifier: GPL-2.0
2 * Copyright 2016 Freescale Semiconductor, Inc. 2//
3 * 3// Copyright 2016 Freescale Semiconductor, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 */
14 4
15#include <linux/module.h> 5#include <linux/module.h>
16#include <linux/platform_device.h> 6#include <linux/platform_device.h>
@@ -197,7 +187,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
197 int ret; 187 int ret;
198 struct qoriq_tmu_data *data; 188 struct qoriq_tmu_data *data;
199 struct device_node *np = pdev->dev.of_node; 189 struct device_node *np = pdev->dev.of_node;
200 u32 site = 0; 190 u32 site;
201 191
202 if (!np) { 192 if (!np) {
203 dev_err(&pdev->dev, "Device OF-Node is NULL"); 193 dev_err(&pdev->dev, "Device OF-Node is NULL");
@@ -233,8 +223,9 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
233 if (ret < 0) 223 if (ret < 0)
234 goto err_tmu; 224 goto err_tmu;
235 225
236 data->tz = thermal_zone_of_sensor_register(&pdev->dev, data->sensor_id, 226 data->tz = devm_thermal_zone_of_sensor_register(&pdev->dev,
237 data, &tmu_tz_ops); 227 data->sensor_id,
228 data, &tmu_tz_ops);
238 if (IS_ERR(data->tz)) { 229 if (IS_ERR(data->tz)) {
239 ret = PTR_ERR(data->tz); 230 ret = PTR_ERR(data->tz);
240 dev_err(&pdev->dev, 231 dev_err(&pdev->dev,
@@ -243,7 +234,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
243 } 234 }
244 235
245 /* Enable monitoring */ 236 /* Enable monitoring */
246 site |= 0x1 << (15 - data->sensor_id); 237 site = 0x1 << (15 - data->sensor_id);
247 tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr); 238 tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr);
248 239
249 return 0; 240 return 0;
@@ -261,8 +252,6 @@ static int qoriq_tmu_remove(struct platform_device *pdev)
261{ 252{
262 struct qoriq_tmu_data *data = platform_get_drvdata(pdev); 253 struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
263 254
264 thermal_zone_of_sensor_unregister(&pdev->dev, data->tz);
265
266 /* Disable monitoring */ 255 /* Disable monitoring */
267 tmu_write(data, TMR_DISABLE, &data->regs->tmr); 256 tmu_write(data, TMR_DISABLE, &data->regs->tmr);
268 257
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 766521eb7071..7aed5337bdd3 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -1,19 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * R-Car Gen3 THS thermal sensor driver 3 * R-Car Gen3 THS thermal sensor driver
3 * Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen. 4 * Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen.
4 * 5 *
5 * Copyright (C) 2016 Renesas Electronics Corporation. 6 * Copyright (C) 2016 Renesas Electronics Corporation.
6 * Copyright (C) 2016 Sang Engineering 7 * Copyright (C) 2016 Sang Engineering
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 */ 8 */
18#include <linux/delay.h> 9#include <linux/delay.h>
19#include <linux/err.h> 10#include <linux/err.h>
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index e77e63070e99..78f932822d38 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -1,21 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * R-Car THS/TSC thermal sensor driver 3 * R-Car THS/TSC thermal sensor driver
3 * 4 *
4 * Copyright (C) 2012 Renesas Solutions Corp. 5 * Copyright (C) 2012 Renesas Solutions Corp.
5 * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 6 * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
19 */ 7 */
20#include <linux/delay.h> 8#include <linux/delay.h>
21#include <linux/err.h> 9#include <linux/err.h>
@@ -660,6 +648,6 @@ static struct platform_driver rcar_thermal_driver = {
660}; 648};
661module_platform_driver(rcar_thermal_driver); 649module_platform_driver(rcar_thermal_driver);
662 650
663MODULE_LICENSE("GPL"); 651MODULE_LICENSE("GPL v2");
664MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver"); 652MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver");
665MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); 653MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 5414c4a87bea..27284a2dcd2b 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -522,6 +522,8 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
522 return -EIO; 522 return -EIO;
523 523
524 while (count > 0) { 524 while (count > 0) {
525 int ret = 0;
526
525 spin_lock_irqsave(&hp->lock, flags); 527 spin_lock_irqsave(&hp->lock, flags);
526 528
527 rsize = hp->outbuf_size - hp->n_outbuf; 529 rsize = hp->outbuf_size - hp->n_outbuf;
@@ -537,10 +539,13 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
537 } 539 }
538 540
539 if (hp->n_outbuf > 0) 541 if (hp->n_outbuf > 0)
540 hvc_push(hp); 542 ret = hvc_push(hp);
541 543
542 spin_unlock_irqrestore(&hp->lock, flags); 544 spin_unlock_irqrestore(&hp->lock, flags);
543 545
546 if (!ret)
547 break;
548
544 if (count) { 549 if (count) {
545 if (hp->n_outbuf > 0) 550 if (hp->n_outbuf > 0)
546 hvc_flush(hp); 551 hvc_flush(hp);
@@ -623,6 +628,15 @@ static int hvc_chars_in_buffer(struct tty_struct *tty)
623#define MAX_TIMEOUT (2000) 628#define MAX_TIMEOUT (2000)
624static u32 timeout = MIN_TIMEOUT; 629static u32 timeout = MIN_TIMEOUT;
625 630
631/*
632 * Maximum number of bytes to get from the console driver if hvc_poll is
633 * called from driver (and can't sleep). Any more than this and we break
634 * and start polling with khvcd. This value was derived from from an OpenBMC
635 * console with the OPAL driver that results in about 0.25ms interrupts off
636 * latency.
637 */
638#define HVC_ATOMIC_READ_MAX 128
639
626#define HVC_POLL_READ 0x00000001 640#define HVC_POLL_READ 0x00000001
627#define HVC_POLL_WRITE 0x00000002 641#define HVC_POLL_WRITE 0x00000002
628 642
@@ -669,8 +683,8 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep)
669 if (!hp->irq_requested) 683 if (!hp->irq_requested)
670 poll_mask |= HVC_POLL_READ; 684 poll_mask |= HVC_POLL_READ;
671 685
686 read_again:
672 /* Read data if any */ 687 /* Read data if any */
673
674 count = tty_buffer_request_room(&hp->port, N_INBUF); 688 count = tty_buffer_request_room(&hp->port, N_INBUF);
675 689
676 /* If flip is full, just reschedule a later read */ 690 /* If flip is full, just reschedule a later read */
@@ -717,9 +731,23 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep)
717#endif /* CONFIG_MAGIC_SYSRQ */ 731#endif /* CONFIG_MAGIC_SYSRQ */
718 tty_insert_flip_char(&hp->port, buf[i], 0); 732 tty_insert_flip_char(&hp->port, buf[i], 0);
719 } 733 }
720 if (n == count) 734 read_total += n;
721 poll_mask |= HVC_POLL_READ; 735
722 read_total = n; 736 if (may_sleep) {
737 /* Keep going until the flip is full */
738 spin_unlock_irqrestore(&hp->lock, flags);
739 cond_resched();
740 spin_lock_irqsave(&hp->lock, flags);
741 goto read_again;
742 } else if (read_total < HVC_ATOMIC_READ_MAX) {
743 /* Break and defer if it's a large read in atomic */
744 goto read_again;
745 }
746
747 /*
748 * Latency break, schedule another poll immediately.
749 */
750 poll_mask |= HVC_POLL_READ;
723 751
724 out: 752 out:
725 /* Wakeup write queue if necessary */ 753 /* Wakeup write queue if necessary */
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 27346d69f393..f9b40a9dc4d3 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -780,20 +780,9 @@ static int acm_tty_write(struct tty_struct *tty,
780 } 780 }
781 781
782 if (acm->susp_count) { 782 if (acm->susp_count) {
783 if (acm->putbuffer) {
784 /* now to preserve order */
785 usb_anchor_urb(acm->putbuffer->urb, &acm->delayed);
786 acm->putbuffer = NULL;
787 }
788 usb_anchor_urb(wb->urb, &acm->delayed); 783 usb_anchor_urb(wb->urb, &acm->delayed);
789 spin_unlock_irqrestore(&acm->write_lock, flags); 784 spin_unlock_irqrestore(&acm->write_lock, flags);
790 return count; 785 return count;
791 } else {
792 if (acm->putbuffer) {
793 /* at this point there is no good way to handle errors */
794 acm_start_wb(acm, acm->putbuffer);
795 acm->putbuffer = NULL;
796 }
797 } 786 }
798 787
799 stat = acm_start_wb(acm, wb); 788 stat = acm_start_wb(acm, wb);
@@ -804,66 +793,6 @@ static int acm_tty_write(struct tty_struct *tty,
804 return count; 793 return count;
805} 794}
806 795
807static void acm_tty_flush_chars(struct tty_struct *tty)
808{
809 struct acm *acm = tty->driver_data;
810 struct acm_wb *cur;
811 int err;
812 unsigned long flags;
813
814 spin_lock_irqsave(&acm->write_lock, flags);
815
816 cur = acm->putbuffer;
817 if (!cur) /* nothing to do */
818 goto out;
819
820 acm->putbuffer = NULL;
821 err = usb_autopm_get_interface_async(acm->control);
822 if (err < 0) {
823 cur->use = 0;
824 acm->putbuffer = cur;
825 goto out;
826 }
827
828 if (acm->susp_count)
829 usb_anchor_urb(cur->urb, &acm->delayed);
830 else
831 acm_start_wb(acm, cur);
832out:
833 spin_unlock_irqrestore(&acm->write_lock, flags);
834 return;
835}
836
837static int acm_tty_put_char(struct tty_struct *tty, unsigned char ch)
838{
839 struct acm *acm = tty->driver_data;
840 struct acm_wb *cur;
841 int wbn;
842 unsigned long flags;
843
844overflow:
845 cur = acm->putbuffer;
846 if (!cur) {
847 spin_lock_irqsave(&acm->write_lock, flags);
848 wbn = acm_wb_alloc(acm);
849 if (wbn >= 0) {
850 cur = &acm->wb[wbn];
851 acm->putbuffer = cur;
852 }
853 spin_unlock_irqrestore(&acm->write_lock, flags);
854 if (!cur)
855 return 0;
856 }
857
858 if (cur->len == acm->writesize) {
859 acm_tty_flush_chars(tty);
860 goto overflow;
861 }
862
863 cur->buf[cur->len++] = ch;
864 return 1;
865}
866
867static int acm_tty_write_room(struct tty_struct *tty) 796static int acm_tty_write_room(struct tty_struct *tty)
868{ 797{
869 struct acm *acm = tty->driver_data; 798 struct acm *acm = tty->driver_data;
@@ -1987,8 +1916,6 @@ static const struct tty_operations acm_ops = {
1987 .cleanup = acm_tty_cleanup, 1916 .cleanup = acm_tty_cleanup,
1988 .hangup = acm_tty_hangup, 1917 .hangup = acm_tty_hangup,
1989 .write = acm_tty_write, 1918 .write = acm_tty_write,
1990 .put_char = acm_tty_put_char,
1991 .flush_chars = acm_tty_flush_chars,
1992 .write_room = acm_tty_write_room, 1919 .write_room = acm_tty_write_room,
1993 .ioctl = acm_tty_ioctl, 1920 .ioctl = acm_tty_ioctl,
1994 .throttle = acm_tty_throttle, 1921 .throttle = acm_tty_throttle,
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index eacc116e83da..ca06b20d7af9 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -96,7 +96,6 @@ struct acm {
96 unsigned long read_urbs_free; 96 unsigned long read_urbs_free;
97 struct urb *read_urbs[ACM_NR]; 97 struct urb *read_urbs[ACM_NR];
98 struct acm_rb read_buffers[ACM_NR]; 98 struct acm_rb read_buffers[ACM_NR];
99 struct acm_wb *putbuffer; /* for acm_tty_put_char() */
100 int rx_buflimit; 99 int rx_buflimit;
101 spinlock_t read_lock; 100 spinlock_t read_lock;
102 u8 *notification_buffer; /* to reassemble fragmented notifications */ 101 u8 *notification_buffer; /* to reassemble fragmented notifications */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index bec581fb7c63..656d247819c9 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -460,7 +460,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
460 460
461 set_bit(WDM_RESPONDING, &desc->flags); 461 set_bit(WDM_RESPONDING, &desc->flags);
462 spin_unlock_irq(&desc->iuspin); 462 spin_unlock_irq(&desc->iuspin);
463 rv = usb_submit_urb(desc->response, GFP_KERNEL); 463 rv = usb_submit_urb(desc->response, GFP_ATOMIC);
464 spin_lock_irq(&desc->iuspin); 464 spin_lock_irq(&desc->iuspin);
465 if (rv) { 465 if (rv) {
466 dev_err(&desc->intf->dev, 466 dev_err(&desc->intf->dev,
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 50a2362ed3ea..48277bbc15e4 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -246,6 +246,31 @@ int of_usb_update_otg_caps(struct device_node *np,
246} 246}
247EXPORT_SYMBOL_GPL(of_usb_update_otg_caps); 247EXPORT_SYMBOL_GPL(of_usb_update_otg_caps);
248 248
249/**
250 * usb_of_get_companion_dev - Find the companion device
251 * @dev: the device pointer to find a companion
252 *
253 * Find the companion device from platform bus.
254 *
255 * Takes a reference to the returned struct device which needs to be dropped
256 * after use.
257 *
258 * Return: On success, a pointer to the companion device, %NULL on failure.
259 */
260struct device *usb_of_get_companion_dev(struct device *dev)
261{
262 struct device_node *node;
263 struct platform_device *pdev = NULL;
264
265 node = of_parse_phandle(dev->of_node, "companion", 0);
266 if (node)
267 pdev = of_find_device_by_node(node);
268
269 of_node_put(node);
270
271 return pdev ? &pdev->dev : NULL;
272}
273EXPORT_SYMBOL_GPL(usb_of_get_companion_dev);
249#endif 274#endif
250 275
251MODULE_LICENSE("GPL"); 276MODULE_LICENSE("GPL");
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 66fe1b78d952..03432467b05f 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -515,8 +515,6 @@ static int resume_common(struct device *dev, int event)
515 event == PM_EVENT_RESTORE); 515 event == PM_EVENT_RESTORE);
516 if (retval) { 516 if (retval) {
517 dev_err(dev, "PCI post-resume error %d!\n", retval); 517 dev_err(dev, "PCI post-resume error %d!\n", retval);
518 if (hcd->shared_hcd)
519 usb_hc_died(hcd->shared_hcd);
520 usb_hc_died(hcd); 518 usb_hc_died(hcd);
521 } 519 }
522 } 520 }
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 228672f2c4a1..bfa5eda0cc26 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1341,6 +1341,11 @@ void usb_enable_interface(struct usb_device *dev,
1341 * is submitted that needs that bandwidth. Some other operating systems 1341 * is submitted that needs that bandwidth. Some other operating systems
1342 * allocate bandwidth early, when a configuration is chosen. 1342 * allocate bandwidth early, when a configuration is chosen.
1343 * 1343 *
1344 * xHCI reserves bandwidth and configures the alternate setting in
1345 * usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting
1346 * may be disabled. Drivers cannot rely on any particular alternate
1347 * setting being in effect after a failure.
1348 *
1344 * This call is synchronous, and may not be used in an interrupt context. 1349 * This call is synchronous, and may not be used in an interrupt context.
1345 * Also, drivers must not change altsettings while urbs are scheduled for 1350 * Also, drivers must not change altsettings while urbs are scheduled for
1346 * endpoints in that interface; all such urbs must first be completed 1351 * endpoints in that interface; all such urbs must first be completed
@@ -1376,6 +1381,12 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
1376 alternate); 1381 alternate);
1377 return -EINVAL; 1382 return -EINVAL;
1378 } 1383 }
1384 /*
1385 * usb3 hosts configure the interface in usb_hcd_alloc_bandwidth,
1386 * including freeing dropped endpoint ring buffers.
1387 * Make sure the interface endpoints are flushed before that
1388 */
1389 usb_disable_interface(dev, iface, false);
1379 1390
1380 /* Make sure we have enough bandwidth for this alternate interface. 1391 /* Make sure we have enough bandwidth for this alternate interface.
1381 * Remove the current alt setting and add the new alt setting. 1392 * Remove the current alt setting and add the new alt setting.
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c
index fd77442c2d12..651708d8c908 100644
--- a/drivers/usb/core/of.c
+++ b/drivers/usb/core/of.c
@@ -105,29 +105,3 @@ usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum)
105 return NULL; 105 return NULL;
106} 106}
107EXPORT_SYMBOL_GPL(usb_of_get_interface_node); 107EXPORT_SYMBOL_GPL(usb_of_get_interface_node);
108
109/**
110 * usb_of_get_companion_dev - Find the companion device
111 * @dev: the device pointer to find a companion
112 *
113 * Find the companion device from platform bus.
114 *
115 * Takes a reference to the returned struct device which needs to be dropped
116 * after use.
117 *
118 * Return: On success, a pointer to the companion device, %NULL on failure.
119 */
120struct device *usb_of_get_companion_dev(struct device *dev)
121{
122 struct device_node *node;
123 struct platform_device *pdev = NULL;
124
125 node = of_parse_phandle(dev->of_node, "companion", 0);
126 if (node)
127 pdev = of_find_device_by_node(node);
128
129 of_node_put(node);
130
131 return pdev ? &pdev->dev : NULL;
132}
133EXPORT_SYMBOL_GPL(usb_of_get_companion_dev);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 097057d2eacf..e77dfe5ed5ec 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -178,6 +178,10 @@ static const struct usb_device_id usb_quirk_list[] = {
178 /* CBM - Flash disk */ 178 /* CBM - Flash disk */
179 { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, 179 { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
180 180
181 /* WORLDE Controller KS49 or Prodipe MIDI 49C USB controller */
182 { USB_DEVICE(0x0218, 0x0201), .driver_info =
183 USB_QUIRK_CONFIG_INTF_STRINGS },
184
181 /* WORLDE easy key (easykey.25) MIDI controller */ 185 /* WORLDE easy key (easykey.25) MIDI controller */
182 { USB_DEVICE(0x0218, 0x0401), .driver_info = 186 { USB_DEVICE(0x0218, 0x0401), .driver_info =
183 USB_QUIRK_CONFIG_INTF_STRINGS }, 187 USB_QUIRK_CONFIG_INTF_STRINGS },
@@ -406,6 +410,9 @@ static const struct usb_device_id usb_quirk_list[] = {
406 { USB_DEVICE(0x2040, 0x7200), .driver_info = 410 { USB_DEVICE(0x2040, 0x7200), .driver_info =
407 USB_QUIRK_CONFIG_INTF_STRINGS }, 411 USB_QUIRK_CONFIG_INTF_STRINGS },
408 412
413 /* DJI CineSSD */
414 { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
415
409 /* INTEL VALUE SSD */ 416 /* INTEL VALUE SSD */
410 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, 417 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
411 418
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 9a53a58e676e..577642895b57 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -412,8 +412,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
412 dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n", 412 dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n",
413 (unsigned long)res->start, hsotg->regs); 413 (unsigned long)res->start, hsotg->regs);
414 414
415 hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg);
416
417 retval = dwc2_lowlevel_hw_init(hsotg); 415 retval = dwc2_lowlevel_hw_init(hsotg);
418 if (retval) 416 if (retval)
419 return retval; 417 return retval;
@@ -438,6 +436,8 @@ static int dwc2_driver_probe(struct platform_device *dev)
438 if (retval) 436 if (retval)
439 return retval; 437 return retval;
440 438
439 hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg);
440
441 retval = dwc2_get_dr_mode(hsotg); 441 retval = dwc2_get_dr_mode(hsotg);
442 if (retval) 442 if (retval)
443 goto error; 443 goto error;
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 40bf9e0bbc59..4c2771c5e727 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -180,8 +180,7 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
180 return 0; 180 return 0;
181} 181}
182 182
183#ifdef CONFIG_PM 183static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev)
184static int dwc3_of_simple_runtime_suspend(struct device *dev)
185{ 184{
186 struct dwc3_of_simple *simple = dev_get_drvdata(dev); 185 struct dwc3_of_simple *simple = dev_get_drvdata(dev);
187 int i; 186 int i;
@@ -192,7 +191,7 @@ static int dwc3_of_simple_runtime_suspend(struct device *dev)
192 return 0; 191 return 0;
193} 192}
194 193
195static int dwc3_of_simple_runtime_resume(struct device *dev) 194static int __maybe_unused dwc3_of_simple_runtime_resume(struct device *dev)
196{ 195{
197 struct dwc3_of_simple *simple = dev_get_drvdata(dev); 196 struct dwc3_of_simple *simple = dev_get_drvdata(dev);
198 int ret; 197 int ret;
@@ -210,7 +209,7 @@ static int dwc3_of_simple_runtime_resume(struct device *dev)
210 return 0; 209 return 0;
211} 210}
212 211
213static int dwc3_of_simple_suspend(struct device *dev) 212static int __maybe_unused dwc3_of_simple_suspend(struct device *dev)
214{ 213{
215 struct dwc3_of_simple *simple = dev_get_drvdata(dev); 214 struct dwc3_of_simple *simple = dev_get_drvdata(dev);
216 215
@@ -220,7 +219,7 @@ static int dwc3_of_simple_suspend(struct device *dev)
220 return 0; 219 return 0;
221} 220}
222 221
223static int dwc3_of_simple_resume(struct device *dev) 222static int __maybe_unused dwc3_of_simple_resume(struct device *dev)
224{ 223{
225 struct dwc3_of_simple *simple = dev_get_drvdata(dev); 224 struct dwc3_of_simple *simple = dev_get_drvdata(dev);
226 225
@@ -229,7 +228,6 @@ static int dwc3_of_simple_resume(struct device *dev)
229 228
230 return 0; 229 return 0;
231} 230}
232#endif
233 231
234static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = { 232static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = {
235 SET_SYSTEM_SLEEP_PM_OPS(dwc3_of_simple_suspend, dwc3_of_simple_resume) 233 SET_SYSTEM_SLEEP_PM_OPS(dwc3_of_simple_suspend, dwc3_of_simple_resume)
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 5edd79470368..1286076a8890 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -85,8 +85,8 @@ static int dwc3_byt_enable_ulpi_refclock(struct pci_dev *pci)
85 u32 value; 85 u32 value;
86 86
87 reg = pcim_iomap(pci, GP_RWBAR, 0); 87 reg = pcim_iomap(pci, GP_RWBAR, 0);
88 if (IS_ERR(reg)) 88 if (!reg)
89 return PTR_ERR(reg); 89 return -ENOMEM;
90 90
91 value = readl(reg + GP_RWREG1); 91 value = readl(reg + GP_RWREG1);
92 if (!(value & GP_RWREG1_ULPI_REFCLK_DISABLE)) 92 if (!(value & GP_RWREG1_ULPI_REFCLK_DISABLE))
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 032ea7d709ba..2b53194081ba 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -473,7 +473,6 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
473 473
474/** 474/**
475 * dwc3_gadget_start_config - configure ep resources 475 * dwc3_gadget_start_config - configure ep resources
476 * @dwc: pointer to our controller context structure
477 * @dep: endpoint that is being enabled 476 * @dep: endpoint that is being enabled
478 * 477 *
479 * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's 478 * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index 53a48f561458..587c5037ff07 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -1063,12 +1063,15 @@ static const struct usb_gadget_ops fotg210_gadget_ops = {
1063static int fotg210_udc_remove(struct platform_device *pdev) 1063static int fotg210_udc_remove(struct platform_device *pdev)
1064{ 1064{
1065 struct fotg210_udc *fotg210 = platform_get_drvdata(pdev); 1065 struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
1066 int i;
1066 1067
1067 usb_del_gadget_udc(&fotg210->gadget); 1068 usb_del_gadget_udc(&fotg210->gadget);
1068 iounmap(fotg210->reg); 1069 iounmap(fotg210->reg);
1069 free_irq(platform_get_irq(pdev, 0), fotg210); 1070 free_irq(platform_get_irq(pdev, 0), fotg210);
1070 1071
1071 fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req); 1072 fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
1073 for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
1074 kfree(fotg210->ep[i]);
1072 kfree(fotg210); 1075 kfree(fotg210);
1073 1076
1074 return 0; 1077 return 0;
@@ -1099,7 +1102,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
1099 /* initialize udc */ 1102 /* initialize udc */
1100 fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL); 1103 fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
1101 if (fotg210 == NULL) 1104 if (fotg210 == NULL)
1102 goto err_alloc; 1105 goto err;
1103 1106
1104 for (i = 0; i < FOTG210_MAX_NUM_EP; i++) { 1107 for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
1105 _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL); 1108 _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
@@ -1111,7 +1114,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
1111 fotg210->reg = ioremap(res->start, resource_size(res)); 1114 fotg210->reg = ioremap(res->start, resource_size(res));
1112 if (fotg210->reg == NULL) { 1115 if (fotg210->reg == NULL) {
1113 pr_err("ioremap error.\n"); 1116 pr_err("ioremap error.\n");
1114 goto err_map; 1117 goto err_alloc;
1115 } 1118 }
1116 1119
1117 spin_lock_init(&fotg210->lock); 1120 spin_lock_init(&fotg210->lock);
@@ -1159,7 +1162,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
1159 fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep, 1162 fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
1160 GFP_KERNEL); 1163 GFP_KERNEL);
1161 if (fotg210->ep0_req == NULL) 1164 if (fotg210->ep0_req == NULL)
1162 goto err_req; 1165 goto err_map;
1163 1166
1164 fotg210_init(fotg210); 1167 fotg210_init(fotg210);
1165 1168
@@ -1187,12 +1190,14 @@ err_req:
1187 fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req); 1190 fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
1188 1191
1189err_map: 1192err_map:
1190 if (fotg210->reg) 1193 iounmap(fotg210->reg);
1191 iounmap(fotg210->reg);
1192 1194
1193err_alloc: 1195err_alloc:
1196 for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
1197 kfree(fotg210->ep[i]);
1194 kfree(fotg210); 1198 kfree(fotg210);
1195 1199
1200err:
1196 return ret; 1201 return ret;
1197} 1202}
1198 1203
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 318246d8b2e2..b02ab2a8d927 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -1545,11 +1545,14 @@ static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
1545 writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl); 1545 writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
1546 } else { 1546 } else {
1547 writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl); 1547 writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
1548 stop_activity(dev, dev->driver); 1548 stop_activity(dev, NULL);
1549 } 1549 }
1550 1550
1551 spin_unlock_irqrestore(&dev->lock, flags); 1551 spin_unlock_irqrestore(&dev->lock, flags);
1552 1552
1553 if (!is_on && dev->driver)
1554 dev->driver->disconnect(&dev->gadget);
1555
1553 return 0; 1556 return 0;
1554} 1557}
1555 1558
@@ -2466,8 +2469,11 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
2466 nuke(&dev->ep[i]); 2469 nuke(&dev->ep[i]);
2467 2470
2468 /* report disconnect; the driver is already quiesced */ 2471 /* report disconnect; the driver is already quiesced */
2469 if (driver) 2472 if (driver) {
2473 spin_unlock(&dev->lock);
2470 driver->disconnect(&dev->gadget); 2474 driver->disconnect(&dev->gadget);
2475 spin_lock(&dev->lock);
2476 }
2471 2477
2472 usb_reinit(dev); 2478 usb_reinit(dev);
2473} 2479}
@@ -3341,6 +3347,8 @@ next_endpoints:
3341 BIT(PCI_RETRY_ABORT_INTERRUPT)) 3347 BIT(PCI_RETRY_ABORT_INTERRUPT))
3342 3348
3343static void handle_stat1_irqs(struct net2280 *dev, u32 stat) 3349static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
3350__releases(dev->lock)
3351__acquires(dev->lock)
3344{ 3352{
3345 struct net2280_ep *ep; 3353 struct net2280_ep *ep;
3346 u32 tmp, num, mask, scratch; 3354 u32 tmp, num, mask, scratch;
@@ -3381,12 +3389,14 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
3381 if (disconnect || reset) { 3389 if (disconnect || reset) {
3382 stop_activity(dev, dev->driver); 3390 stop_activity(dev, dev->driver);
3383 ep0_start(dev); 3391 ep0_start(dev);
3392 spin_unlock(&dev->lock);
3384 if (reset) 3393 if (reset)
3385 usb_gadget_udc_reset 3394 usb_gadget_udc_reset
3386 (&dev->gadget, dev->driver); 3395 (&dev->gadget, dev->driver);
3387 else 3396 else
3388 (dev->driver->disconnect) 3397 (dev->driver->disconnect)
3389 (&dev->gadget); 3398 (&dev->gadget);
3399 spin_lock(&dev->lock);
3390 return; 3400 return;
3391 } 3401 }
3392 } 3402 }
@@ -3405,6 +3415,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
3405 tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT); 3415 tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
3406 if (stat & tmp) { 3416 if (stat & tmp) {
3407 writel(tmp, &dev->regs->irqstat1); 3417 writel(tmp, &dev->regs->irqstat1);
3418 spin_unlock(&dev->lock);
3408 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) { 3419 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
3409 if (dev->driver->suspend) 3420 if (dev->driver->suspend)
3410 dev->driver->suspend(&dev->gadget); 3421 dev->driver->suspend(&dev->gadget);
@@ -3415,6 +3426,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
3415 dev->driver->resume(&dev->gadget); 3426 dev->driver->resume(&dev->gadget);
3416 /* at high speed, note erratum 0133 */ 3427 /* at high speed, note erratum 0133 */
3417 } 3428 }
3429 spin_lock(&dev->lock);
3418 stat &= ~tmp; 3430 stat &= ~tmp;
3419 } 3431 }
3420 3432
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 1f879b3f2c96..e1656f361e08 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -812,12 +812,15 @@ static void usb3_irq_epc_int_1_speed(struct renesas_usb3 *usb3)
812 switch (speed) { 812 switch (speed) {
813 case USB_STA_SPEED_SS: 813 case USB_STA_SPEED_SS:
814 usb3->gadget.speed = USB_SPEED_SUPER; 814 usb3->gadget.speed = USB_SPEED_SUPER;
815 usb3->gadget.ep0->maxpacket = USB3_EP0_SS_MAX_PACKET_SIZE;
815 break; 816 break;
816 case USB_STA_SPEED_HS: 817 case USB_STA_SPEED_HS:
817 usb3->gadget.speed = USB_SPEED_HIGH; 818 usb3->gadget.speed = USB_SPEED_HIGH;
819 usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
818 break; 820 break;
819 case USB_STA_SPEED_FS: 821 case USB_STA_SPEED_FS:
820 usb3->gadget.speed = USB_SPEED_FULL; 822 usb3->gadget.speed = USB_SPEED_FULL;
823 usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
821 break; 824 break;
822 default: 825 default:
823 usb3->gadget.speed = USB_SPEED_UNKNOWN; 826 usb3->gadget.speed = USB_SPEED_UNKNOWN;
@@ -2513,7 +2516,7 @@ static int renesas_usb3_init_ep(struct renesas_usb3 *usb3, struct device *dev,
2513 /* for control pipe */ 2516 /* for control pipe */
2514 usb3->gadget.ep0 = &usb3_ep->ep; 2517 usb3->gadget.ep0 = &usb3_ep->ep;
2515 usb_ep_set_maxpacket_limit(&usb3_ep->ep, 2518 usb_ep_set_maxpacket_limit(&usb3_ep->ep,
2516 USB3_EP0_HSFS_MAX_PACKET_SIZE); 2519 USB3_EP0_SS_MAX_PACKET_SIZE);
2517 usb3_ep->ep.caps.type_control = true; 2520 usb3_ep->ep.caps.type_control = true;
2518 usb3_ep->ep.caps.dir_in = true; 2521 usb3_ep->ep.caps.dir_in = true;
2519 usb3_ep->ep.caps.dir_out = true; 2522 usb3_ep->ep.caps.dir_out = true;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 072bd5d5738e..5b8a3d9530c4 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -2555,7 +2555,7 @@ static int u132_get_frame(struct usb_hcd *hcd)
2555 } else { 2555 } else {
2556 int frame = 0; 2556 int frame = 0;
2557 dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n"); 2557 dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n");
2558 msleep(100); 2558 mdelay(100);
2559 return frame; 2559 return frame;
2560 } 2560 }
2561} 2561}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index ef350c33dc4a..b1f27aa38b10 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1613,6 +1613,10 @@ void xhci_endpoint_copy(struct xhci_hcd *xhci,
1613 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; 1613 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1614 in_ep_ctx->deq = out_ep_ctx->deq; 1614 in_ep_ctx->deq = out_ep_ctx->deq;
1615 in_ep_ctx->tx_info = out_ep_ctx->tx_info; 1615 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1616 if (xhci->quirks & XHCI_MTK_HOST) {
1617 in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
1618 in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
1619 }
1616} 1620}
1617 1621
1618/* Copy output xhci_slot_ctx to the input xhci_slot_ctx. 1622/* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 8dc77e34a859..94e939249b2b 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -153,7 +153,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
153{ 153{
154 const struct xhci_plat_priv *priv_match; 154 const struct xhci_plat_priv *priv_match;
155 const struct hc_driver *driver; 155 const struct hc_driver *driver;
156 struct device *sysdev; 156 struct device *sysdev, *tmpdev;
157 struct xhci_hcd *xhci; 157 struct xhci_hcd *xhci;
158 struct resource *res; 158 struct resource *res;
159 struct usb_hcd *hcd; 159 struct usb_hcd *hcd;
@@ -273,19 +273,24 @@ static int xhci_plat_probe(struct platform_device *pdev)
273 goto disable_clk; 273 goto disable_clk;
274 } 274 }
275 275
276 if (device_property_read_bool(sysdev, "usb2-lpm-disable")) 276 /* imod_interval is the interrupt moderation value in nanoseconds. */
277 xhci->quirks |= XHCI_HW_LPM_DISABLE; 277 xhci->imod_interval = 40000;
278 278
279 if (device_property_read_bool(sysdev, "usb3-lpm-capable")) 279 /* Iterate over all parent nodes for finding quirks */
280 xhci->quirks |= XHCI_LPM_SUPPORT; 280 for (tmpdev = &pdev->dev; tmpdev; tmpdev = tmpdev->parent) {
281 281
282 if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped")) 282 if (device_property_read_bool(tmpdev, "usb2-lpm-disable"))
283 xhci->quirks |= XHCI_BROKEN_PORT_PED; 283 xhci->quirks |= XHCI_HW_LPM_DISABLE;
284 284
285 /* imod_interval is the interrupt moderation value in nanoseconds. */ 285 if (device_property_read_bool(tmpdev, "usb3-lpm-capable"))
286 xhci->imod_interval = 40000; 286 xhci->quirks |= XHCI_LPM_SUPPORT;
287 device_property_read_u32(sysdev, "imod-interval-ns", 287
288 &xhci->imod_interval); 288 if (device_property_read_bool(tmpdev, "quirk-broken-port-ped"))
289 xhci->quirks |= XHCI_BROKEN_PORT_PED;
290
291 device_property_read_u32(tmpdev, "imod-interval-ns",
292 &xhci->imod_interval);
293 }
289 294
290 hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0); 295 hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
291 if (IS_ERR(hcd->usb_phy)) { 296 if (IS_ERR(hcd->usb_phy)) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 61f48b17e57b..0420eefa647a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -37,6 +37,21 @@ static unsigned long long quirks;
37module_param(quirks, ullong, S_IRUGO); 37module_param(quirks, ullong, S_IRUGO);
38MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); 38MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
39 39
40static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
41{
42 struct xhci_segment *seg = ring->first_seg;
43
44 if (!td || !td->start_seg)
45 return false;
46 do {
47 if (seg == td->start_seg)
48 return true;
49 seg = seg->next;
50 } while (seg && seg != ring->first_seg);
51
52 return false;
53}
54
40/* TODO: copied from ehci-hcd.c - can this be refactored? */ 55/* TODO: copied from ehci-hcd.c - can this be refactored? */
41/* 56/*
42 * xhci_handshake - spin reading hc until handshake completes or fails 57 * xhci_handshake - spin reading hc until handshake completes or fails
@@ -1571,6 +1586,21 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1571 goto done; 1586 goto done;
1572 } 1587 }
1573 1588
1589 /*
1590 * check ring is not re-allocated since URB was enqueued. If it is, then
1591 * make sure none of the ring related pointers in this URB private data
1592 * are touched, such as td_list, otherwise we overwrite freed data
1593 */
1594 if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
1595 xhci_err(xhci, "Canceled URB td not found on endpoint ring");
1596 for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
1597 td = &urb_priv->td[i];
1598 if (!list_empty(&td->cancelled_td_list))
1599 list_del_init(&td->cancelled_td_list);
1600 }
1601 goto err_giveback;
1602 }
1603
1574 if (xhci->xhc_state & XHCI_STATE_HALTED) { 1604 if (xhci->xhc_state & XHCI_STATE_HALTED) {
1575 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1605 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1576 "HC halted, freeing TD manually."); 1606 "HC halted, freeing TD manually.");
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 82f220631bd7..b5d661644263 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -369,7 +369,7 @@ static unsigned char parport_uss720_frob_control(struct parport *pp, unsigned ch
369 mask &= 0x0f; 369 mask &= 0x0f;
370 val &= 0x0f; 370 val &= 0x0f;
371 d = (priv->reg[1] & (~mask)) ^ val; 371 d = (priv->reg[1] & (~mask)) ^ val;
372 if (set_1284_register(pp, 2, d, GFP_KERNEL)) 372 if (set_1284_register(pp, 2, d, GFP_ATOMIC))
373 return 0; 373 return 0;
374 priv->reg[1] = d; 374 priv->reg[1] = d;
375 return d & 0xf; 375 return d & 0xf;
@@ -379,7 +379,7 @@ static unsigned char parport_uss720_read_status(struct parport *pp)
379{ 379{
380 unsigned char ret; 380 unsigned char ret;
381 381
382 if (get_1284_register(pp, 1, &ret, GFP_KERNEL)) 382 if (get_1284_register(pp, 1, &ret, GFP_ATOMIC))
383 return 0; 383 return 0;
384 return ret & 0xf8; 384 return ret & 0xf8;
385} 385}
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 3be40eaa1ac9..6d9fd5f64903 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -413,6 +413,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
413 spin_unlock_irqrestore(&dev->lock, flags); 413 spin_unlock_irqrestore(&dev->lock, flags);
414 mutex_unlock(&dev->io_mutex); 414 mutex_unlock(&dev->io_mutex);
415 415
416 if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
417 return -EIO;
418
416 return simple_read_from_buffer(buffer, count, ppos, in_buffer, len); 419 return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
417} 420}
418 421
@@ -421,13 +424,13 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
421{ 424{
422 struct usb_yurex *dev; 425 struct usb_yurex *dev;
423 int i, set = 0, retval = 0; 426 int i, set = 0, retval = 0;
424 char buffer[16]; 427 char buffer[16 + 1];
425 char *data = buffer; 428 char *data = buffer;
426 unsigned long long c, c2 = 0; 429 unsigned long long c, c2 = 0;
427 signed long timeout = 0; 430 signed long timeout = 0;
428 DEFINE_WAIT(wait); 431 DEFINE_WAIT(wait);
429 432
430 count = min(sizeof(buffer), count); 433 count = min(sizeof(buffer) - 1, count);
431 dev = file->private_data; 434 dev = file->private_data;
432 435
433 /* verify that we actually have some data to write */ 436 /* verify that we actually have some data to write */
@@ -446,6 +449,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
446 retval = -EFAULT; 449 retval = -EFAULT;
447 goto error; 450 goto error;
448 } 451 }
452 buffer[count] = 0;
449 memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE); 453 memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE);
450 454
451 switch (buffer[0]) { 455 switch (buffer[0]) {
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index eecfd0671362..d045d8458f81 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -107,8 +107,12 @@ static int mtu3_device_enable(struct mtu3 *mtu)
107 (SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN | 107 (SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN |
108 SSUSB_U2_PORT_HOST_SEL)); 108 SSUSB_U2_PORT_HOST_SEL));
109 109
110 if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) 110 if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) {
111 mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL); 111 mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
112 if (mtu->is_u3_ip)
113 mtu3_setbits(ibase, SSUSB_U3_CTRL(0),
114 SSUSB_U3_PORT_DUAL_MODE);
115 }
112 116
113 return ssusb_check_clocks(mtu->ssusb, check_clk); 117 return ssusb_check_clocks(mtu->ssusb, check_clk);
114} 118}
diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h
index 6ee371478d89..a45bb253939f 100644
--- a/drivers/usb/mtu3/mtu3_hw_regs.h
+++ b/drivers/usb/mtu3/mtu3_hw_regs.h
@@ -459,6 +459,7 @@
459 459
460/* U3D_SSUSB_U3_CTRL_0P */ 460/* U3D_SSUSB_U3_CTRL_0P */
461#define SSUSB_U3_PORT_SSP_SPEED BIT(9) 461#define SSUSB_U3_PORT_SSP_SPEED BIT(9)
462#define SSUSB_U3_PORT_DUAL_MODE BIT(7)
462#define SSUSB_U3_PORT_HOST_SEL BIT(2) 463#define SSUSB_U3_PORT_HOST_SEL BIT(2)
463#define SSUSB_U3_PORT_PDN BIT(1) 464#define SSUSB_U3_PORT_PDN BIT(1)
464#define SSUSB_U3_PORT_DIS BIT(0) 465#define SSUSB_U3_PORT_DIS BIT(0)
diff --git a/drivers/usb/serial/io_ti.h b/drivers/usb/serial/io_ti.h
index e53c68261017..9bbcee37524e 100644
--- a/drivers/usb/serial/io_ti.h
+++ b/drivers/usb/serial/io_ti.h
@@ -173,7 +173,7 @@ struct ump_interrupt {
173} __attribute__((packed)); 173} __attribute__((packed));
174 174
175 175
176#define TIUMP_GET_PORT_FROM_CODE(c) (((c) >> 4) - 3) 176#define TIUMP_GET_PORT_FROM_CODE(c) (((c) >> 6) & 0x01)
177#define TIUMP_GET_FUNC_FROM_CODE(c) ((c) & 0x0f) 177#define TIUMP_GET_FUNC_FROM_CODE(c) ((c) & 0x0f)
178#define TIUMP_INTERRUPT_CODE_LSR 0x03 178#define TIUMP_INTERRUPT_CODE_LSR 0x03
179#define TIUMP_INTERRUPT_CODE_MSR 0x04 179#define TIUMP_INTERRUPT_CODE_MSR 0x04
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 3010878f7f8e..e3c5832337e0 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1119,7 +1119,7 @@ static void ti_break(struct tty_struct *tty, int break_state)
1119 1119
1120static int ti_get_port_from_code(unsigned char code) 1120static int ti_get_port_from_code(unsigned char code)
1121{ 1121{
1122 return (code >> 4) - 3; 1122 return (code >> 6) & 0x01;
1123} 1123}
1124 1124
1125static int ti_get_func_from_code(unsigned char code) 1125static int ti_get_func_from_code(unsigned char code)
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index c267f2812a04..e227bb5b794f 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -376,6 +376,15 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
376 return 0; 376 return 0;
377 } 377 }
378 378
379 if ((us->fflags & US_FL_NO_ATA_1X) &&
380 (srb->cmnd[0] == ATA_12 || srb->cmnd[0] == ATA_16)) {
381 memcpy(srb->sense_buffer, usb_stor_sense_invalidCDB,
382 sizeof(usb_stor_sense_invalidCDB));
383 srb->result = SAM_STAT_CHECK_CONDITION;
384 done(srb);
385 return 0;
386 }
387
379 /* enqueue the command and wake up the control thread */ 388 /* enqueue the command and wake up the control thread */
380 srb->scsi_done = done; 389 srb->scsi_done = done;
381 us->srb = srb; 390 us->srb = srb;
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 9e9de5452860..1f7b401c4d04 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -842,6 +842,27 @@ static int uas_slave_configure(struct scsi_device *sdev)
842 sdev->skip_ms_page_8 = 1; 842 sdev->skip_ms_page_8 = 1;
843 sdev->wce_default_on = 1; 843 sdev->wce_default_on = 1;
844 } 844 }
845
846 /*
847 * Some disks return the total number of blocks in response
848 * to READ CAPACITY rather than the highest block number.
849 * If this device makes that mistake, tell the sd driver.
850 */
851 if (devinfo->flags & US_FL_FIX_CAPACITY)
852 sdev->fix_capacity = 1;
853
854 /*
855 * Some devices don't like MODE SENSE with page=0x3f,
856 * which is the command used for checking if a device
857 * is write-protected. Now that we tell the sd driver
858 * to do a 192-byte transfer with this command the
859 * majority of devices work fine, but a few still can't
860 * handle it. The sd driver will simply assume those
861 * devices are write-enabled.
862 */
863 if (devinfo->flags & US_FL_NO_WP_DETECT)
864 sdev->skip_ms_page_3f = 1;
865
845 scsi_change_queue_depth(sdev, devinfo->qdepth - 2); 866 scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
846 return 0; 867 return 0;
847} 868}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 22fcfccf453a..f7f83b21dc74 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2288,6 +2288,13 @@ UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999,
2288 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2288 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2289 US_FL_GO_SLOW ), 2289 US_FL_GO_SLOW ),
2290 2290
2291/* Reported-by: Tim Anderson <tsa@biglakesoftware.com> */
2292UNUSUAL_DEV( 0x2ca3, 0x0031, 0x0000, 0x9999,
2293 "DJI",
2294 "CineSSD",
2295 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2296 US_FL_NO_ATA_1X),
2297
2291/* 2298/*
2292 * Reported by Frederic Marchal <frederic.marchal@wowcompany.com> 2299 * Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
2293 * Mio Moov 330 2300 * Mio Moov 330
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index 95a2b10127db..76299b6ff06d 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -255,12 +255,13 @@ EXPORT_SYMBOL_GPL(typec_altmode_unregister_driver);
255/* API for the port drivers */ 255/* API for the port drivers */
256 256
257/** 257/**
258 * typec_match_altmode - Match SVID to an array of alternate modes 258 * typec_match_altmode - Match SVID and mode to an array of alternate modes
259 * @altmodes: Array of alternate modes 259 * @altmodes: Array of alternate modes
260 * @n: Number of elements in the array, or -1 for NULL termiated arrays 260 * @n: Number of elements in the array, or -1 for NULL terminated arrays
261 * @svid: Standard or Vendor ID to match with 261 * @svid: Standard or Vendor ID to match with
262 * @mode: Mode to match with
262 * 263 *
263 * Return pointer to an alternate mode with SVID mathing @svid, or NULL when no 264 * Return pointer to an alternate mode with SVID matching @svid, or NULL when no
264 * match is found. 265 * match is found.
265 */ 266 */
266struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes, 267struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes,
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index c202975f8097..e61dffb27a0c 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -1484,7 +1484,6 @@ EXPORT_SYMBOL_GPL(typec_set_mode);
1484 * typec_port_register_altmode - Register USB Type-C Port Alternate Mode 1484 * typec_port_register_altmode - Register USB Type-C Port Alternate Mode
1485 * @port: USB Type-C Port that supports the alternate mode 1485 * @port: USB Type-C Port that supports the alternate mode
1486 * @desc: Description of the alternate mode 1486 * @desc: Description of the alternate mode
1487 * @drvdata: Private pointer to driver specific info
1488 * 1487 *
1489 * This routine is used to register an alternate mode that @port is capable of 1488 * This routine is used to register an alternate mode that @port is capable of
1490 * supporting. 1489 * supporting.
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 96c1d8400822..b13c6b4b2c66 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -952,7 +952,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
952 list_for_each_entry_safe(node, n, &d->pending_list, node) { 952 list_for_each_entry_safe(node, n, &d->pending_list, node) {
953 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; 953 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
954 if (msg->iova <= vq_msg->iova && 954 if (msg->iova <= vq_msg->iova &&
955 msg->iova + msg->size - 1 > vq_msg->iova && 955 msg->iova + msg->size - 1 >= vq_msg->iova &&
956 vq_msg->type == VHOST_IOTLB_MISS) { 956 vq_msg->type == VHOST_IOTLB_MISS) {
957 vhost_poll_queue(&node->vq->poll); 957 vhost_poll_queue(&node->vq->poll);
958 list_del(&node->node); 958 list_del(&node->node);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index b459edfacff3..90d387b50ab7 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -79,15 +79,19 @@ config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
79 This value is used to allocate enough space in internal 79 This value is used to allocate enough space in internal
80 tables needed for physical memory administration. 80 tables needed for physical memory administration.
81 81
82config XEN_SCRUB_PAGES 82config XEN_SCRUB_PAGES_DEFAULT
83 bool "Scrub pages before returning them to system" 83 bool "Scrub pages before returning them to system by default"
84 depends on XEN_BALLOON 84 depends on XEN_BALLOON
85 default y 85 default y
86 help 86 help
87 Scrub pages before returning them to the system for reuse by 87 Scrub pages before returning them to the system for reuse by
88 other domains. This makes sure that any confidential data 88 other domains. This makes sure that any confidential data
89 is not accidentally visible to other domains. Is it more 89 is not accidentally visible to other domains. Is it more
90 secure, but slightly less efficient. 90 secure, but slightly less efficient. This can be controlled with
91 xen_scrub_pages=0 parameter and
92 /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
93 This option only sets the default value.
94
91 If in doubt, say yes. 95 If in doubt, say yes.
92 96
93config XEN_DEV_EVTCHN 97config XEN_DEV_EVTCHN
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index d4265c8ebb22..b1357aa4bc55 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -19,15 +19,16 @@ static void enable_hotplug_cpu(int cpu)
19 19
20static void disable_hotplug_cpu(int cpu) 20static void disable_hotplug_cpu(int cpu)
21{ 21{
22 if (cpu_online(cpu)) { 22 if (!cpu_is_hotpluggable(cpu))
23 lock_device_hotplug(); 23 return;
24 lock_device_hotplug();
25 if (cpu_online(cpu))
24 device_offline(get_cpu_device(cpu)); 26 device_offline(get_cpu_device(cpu));
25 unlock_device_hotplug(); 27 if (!cpu_online(cpu) && cpu_present(cpu)) {
26 }
27 if (cpu_present(cpu))
28 xen_arch_unregister_cpu(cpu); 28 xen_arch_unregister_cpu(cpu);
29 29 set_cpu_present(cpu, false);
30 set_cpu_present(cpu, false); 30 }
31 unlock_device_hotplug();
31} 32}
32 33
33static int vcpu_online(unsigned int cpu) 34static int vcpu_online(unsigned int cpu)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 08e4af04d6f2..e6c1934734b7 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -138,7 +138,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
138 clear_evtchn_to_irq_row(row); 138 clear_evtchn_to_irq_row(row);
139 } 139 }
140 140
141 evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq; 141 evtchn_to_irq[row][col] = irq;
142 return 0; 142 return 0;
143} 143}
144 144
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 57390c7666e5..b0b02a501167 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -492,12 +492,19 @@ static bool in_range(struct gntdev_grant_map *map,
492 return true; 492 return true;
493} 493}
494 494
495static void unmap_if_in_range(struct gntdev_grant_map *map, 495static int unmap_if_in_range(struct gntdev_grant_map *map,
496 unsigned long start, unsigned long end) 496 unsigned long start, unsigned long end,
497 bool blockable)
497{ 498{
498 unsigned long mstart, mend; 499 unsigned long mstart, mend;
499 int err; 500 int err;
500 501
502 if (!in_range(map, start, end))
503 return 0;
504
505 if (!blockable)
506 return -EAGAIN;
507
501 mstart = max(start, map->vma->vm_start); 508 mstart = max(start, map->vma->vm_start);
502 mend = min(end, map->vma->vm_end); 509 mend = min(end, map->vma->vm_end);
503 pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", 510 pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
@@ -508,6 +515,8 @@ static void unmap_if_in_range(struct gntdev_grant_map *map,
508 (mstart - map->vma->vm_start) >> PAGE_SHIFT, 515 (mstart - map->vma->vm_start) >> PAGE_SHIFT,
509 (mend - mstart) >> PAGE_SHIFT); 516 (mend - mstart) >> PAGE_SHIFT);
510 WARN_ON(err); 517 WARN_ON(err);
518
519 return 0;
511} 520}
512 521
513static int mn_invl_range_start(struct mmu_notifier *mn, 522static int mn_invl_range_start(struct mmu_notifier *mn,
@@ -519,25 +528,20 @@ static int mn_invl_range_start(struct mmu_notifier *mn,
519 struct gntdev_grant_map *map; 528 struct gntdev_grant_map *map;
520 int ret = 0; 529 int ret = 0;
521 530
522 /* TODO do we really need a mutex here? */
523 if (blockable) 531 if (blockable)
524 mutex_lock(&priv->lock); 532 mutex_lock(&priv->lock);
525 else if (!mutex_trylock(&priv->lock)) 533 else if (!mutex_trylock(&priv->lock))
526 return -EAGAIN; 534 return -EAGAIN;
527 535
528 list_for_each_entry(map, &priv->maps, next) { 536 list_for_each_entry(map, &priv->maps, next) {
529 if (in_range(map, start, end)) { 537 ret = unmap_if_in_range(map, start, end, blockable);
530 ret = -EAGAIN; 538 if (ret)
531 goto out_unlock; 539 goto out_unlock;
532 }
533 unmap_if_in_range(map, start, end);
534 } 540 }
535 list_for_each_entry(map, &priv->freeable_maps, next) { 541 list_for_each_entry(map, &priv->freeable_maps, next) {
536 if (in_range(map, start, end)) { 542 ret = unmap_if_in_range(map, start, end, blockable);
537 ret = -EAGAIN; 543 if (ret)
538 goto out_unlock; 544 goto out_unlock;
539 }
540 unmap_if_in_range(map, start, end);
541 } 545 }
542 546
543out_unlock: 547out_unlock:
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index c93d8ef8df34..5bb01a62f214 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -280,9 +280,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
280 /* 280 /*
281 * The Xenstore watch fires directly after registering it and 281 * The Xenstore watch fires directly after registering it and
282 * after a suspend/resume cycle. So ENOENT is no error but 282 * after a suspend/resume cycle. So ENOENT is no error but
283 * might happen in those cases. 283 * might happen in those cases. ERANGE is observed when we get
284 * an empty value (''), this happens when we acknowledge the
285 * request by writing '\0' below.
284 */ 286 */
285 if (err != -ENOENT) 287 if (err != -ENOENT && err != -ERANGE)
286 pr_err("Error %d reading sysrq code in control/sysrq\n", 288 pr_err("Error %d reading sysrq code in control/sysrq\n",
287 err); 289 err);
288 xenbus_transaction_end(xbt, 1); 290 xenbus_transaction_end(xbt, 1);
diff --git a/drivers/xen/mem-reservation.c b/drivers/xen/mem-reservation.c
index 084799c6180e..3782cf070338 100644
--- a/drivers/xen/mem-reservation.c
+++ b/drivers/xen/mem-reservation.c
@@ -14,6 +14,10 @@
14 14
15#include <xen/interface/memory.h> 15#include <xen/interface/memory.h>
16#include <xen/mem-reservation.h> 16#include <xen/mem-reservation.h>
17#include <linux/moduleparam.h>
18
19bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT);
20core_param(xen_scrub_pages, xen_scrub_pages, bool, 0);
17 21
18/* 22/*
19 * Use one extent per PAGE_SIZE to avoid to break down the page into 23 * Use one extent per PAGE_SIZE to avoid to break down the page into
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 294f35ce9e46..63c1494a8d73 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -44,6 +44,7 @@
44#include <xen/xenbus.h> 44#include <xen/xenbus.h>
45#include <xen/features.h> 45#include <xen/features.h>
46#include <xen/page.h> 46#include <xen/page.h>
47#include <xen/mem-reservation.h>
47 48
48#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) 49#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
49 50
@@ -137,6 +138,7 @@ static DEVICE_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
137static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay); 138static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay);
138static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count); 139static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
139static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count); 140static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
141static DEVICE_BOOL_ATTR(scrub_pages, 0644, xen_scrub_pages);
140 142
141static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr, 143static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr,
142 char *buf) 144 char *buf)
@@ -203,6 +205,7 @@ static struct attribute *balloon_attrs[] = {
203 &dev_attr_max_schedule_delay.attr.attr, 205 &dev_attr_max_schedule_delay.attr.attr,
204 &dev_attr_retry_count.attr.attr, 206 &dev_attr_retry_count.attr.attr,
205 &dev_attr_max_retry_count.attr.attr, 207 &dev_attr_max_retry_count.attr.attr,
208 &dev_attr_scrub_pages.attr.attr,
206 NULL 209 NULL
207}; 210};
208 211
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index f2088838f690..5b471889d723 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -402,10 +402,19 @@ static ssize_t modalias_show(struct device *dev,
402} 402}
403static DEVICE_ATTR_RO(modalias); 403static DEVICE_ATTR_RO(modalias);
404 404
405static ssize_t state_show(struct device *dev,
406 struct device_attribute *attr, char *buf)
407{
408 return sprintf(buf, "%s\n",
409 xenbus_strstate(to_xenbus_device(dev)->state));
410}
411static DEVICE_ATTR_RO(state);
412
405static struct attribute *xenbus_dev_attrs[] = { 413static struct attribute *xenbus_dev_attrs[] = {
406 &dev_attr_nodename.attr, 414 &dev_attr_nodename.attr,
407 &dev_attr_devtype.attr, 415 &dev_attr_devtype.attr,
408 &dev_attr_modalias.attr, 416 &dev_attr_modalias.attr,
417 &dev_attr_state.attr,
409 NULL, 418 NULL,
410}; 419};
411 420