aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-01-06 05:02:29 -0500
committerIngo Molnar <mingo@kernel.org>2016-01-06 05:02:29 -0500
commit567bee2803cb46caeb6011de5b738fde33dc3896 (patch)
tree05bab01377bffa356bfbe06c4b6193b23b7c24ca /drivers
parentaa0b7ae06387d40a988ce16a189082dee6e570bc (diff)
parent093e5840ae76f1082633503964d035f40ed0216d (diff)
Merge branch 'sched/urgent' into sched/core, to pick up fixes before merging new patches
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig4
-rw-r--r--drivers/acpi/nfit.c67
-rw-r--r--drivers/acpi/nfit.h3
-rw-r--r--drivers/acpi/pci_root.c7
-rw-r--r--drivers/acpi/processor_driver.c3
-rw-r--r--drivers/ata/ahci.c22
-rw-r--r--drivers/ata/ahci_mvebu.c5
-rw-r--r--drivers/ata/libahci.c9
-rw-r--r--drivers/ata/libata-eh.c8
-rw-r--r--drivers/ata/sata_fsl.c3
-rw-r--r--drivers/ata/sata_sil.c3
-rw-r--r--drivers/base/memory.c4
-rw-r--r--drivers/base/power/domain.c36
-rw-r--r--drivers/base/power/domain_governor.c3
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c6
-rw-r--r--drivers/block/null_blk.c314
-rw-r--r--drivers/block/rbd.c1
-rw-r--r--drivers/block/xen-blkback/blkback.c15
-rw-r--r--drivers/block/xen-blkback/common.h8
-rw-r--r--drivers/bus/omap-ocp2scp.c2
-rw-r--r--drivers/bus/sunxi-rsb.c8
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c8
-rw-r--r--drivers/clk/clk-gpio.c33
-rw-r--r--drivers/clk/clk-qoriq.c4
-rw-r--r--drivers/clk/clk-scpi.c1
-rw-r--r--drivers/clk/imx/clk-pllv1.c14
-rw-r--r--drivers/clk/imx/clk-pllv2.c9
-rw-r--r--drivers/clk/imx/clk-vf610.c8
-rw-r--r--drivers/clk/mmp/clk-mmp2.c1
-rw-r--r--drivers/clk/mmp/clk-pxa168.c1
-rw-r--r--drivers/clk/mmp/clk-pxa910.c1
-rw-r--r--drivers/clk/sunxi/clk-a10-pll2.c23
-rw-r--r--drivers/clk/ti/clk-816x.c2
-rw-r--r--drivers/clk/ti/clkt_dpll.c4
-rw-r--r--drivers/clk/ti/divider.c16
-rw-r--r--drivers/clk/ti/fapll.c4
-rw-r--r--drivers/clk/ti/mux.c15
-rw-r--r--drivers/clocksource/mmio.c2
-rw-r--r--drivers/cpufreq/Kconfig.arm4
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c3
-rw-r--r--drivers/cpufreq/cpufreq.c21
-rw-r--r--drivers/cpufreq/intel_pstate.c6
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c2
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c2
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c3
-rw-r--r--drivers/crypto/talitos.c2
-rw-r--r--drivers/dma/at_xdmac.c9
-rw-r--r--drivers/dma/bcm2835-dma.c78
-rw-r--r--drivers/dma/edma.c53
-rw-r--r--drivers/dma/mic_x100_dma.c15
-rw-r--r--drivers/fpga/fpga-mgr.c13
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c7
-rw-r--r--drivers/gpio/gpio-ath79.c2
-rw-r--r--drivers/gpio/gpio-generic.c4
-rw-r--r--drivers/gpio/gpio-omap.c2
-rw-r--r--drivers/gpio/gpio-palmas.c2
-rw-r--r--drivers/gpio/gpio-syscon.c6
-rw-r--r--drivers/gpio/gpio-tegra.c105
-rw-r--r--drivers/gpio/gpiolib.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c24
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c127
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h5
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c13
-rw-r--r--drivers/gpu/drm/drm_drv.c5
-rw-r--r--drivers/gpu/drm/drm_fops.c84
-rw-r--r--drivers/gpu/drm/drm_irq.c54
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h29
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c123
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c36
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c135
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c51
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h5
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c19
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c5
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c34
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c7
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h3
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c1
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c63
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c9
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h344
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h344
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h344
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h308
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h474
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c5
-rw-r--r--drivers/gpu/drm/radeon/cik.c11
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c5
-rw-r--r--drivers/gpu/drm/radeon/r100.c12
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c106
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c100
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c10
-rw-r--r--drivers/gpu/drm/radeon/rv730_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/si.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c43
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c64
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c69
-rw-r--r--drivers/gpu/vga/vgaarb.c6
-rw-r--r--drivers/hid/hid-ids.h6
-rw-r--r--drivers/hid/hid-lg.c5
-rw-r--r--drivers/hid/usbhid/hid-quirks.c10
-rw-r--r--drivers/hwmon/Kconfig1
-rw-r--r--drivers/hwmon/tmp102.c16
-rw-r--r--drivers/i2c/busses/i2c-davinci.c11
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c6
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h1
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c16
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c27
-rw-r--r--drivers/i2c/busses/i2c-rcar.c4
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/busses/i2c-st.c2
-rw-r--r--drivers/iio/adc/qcom-spmi-vadc.c4
-rw-r--r--drivers/iio/industrialio-buffer.c2
-rw-r--r--drivers/iio/industrialio-core.c2
-rw-r--r--drivers/iio/light/apds9960.c1
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c6
-rw-r--r--drivers/infiniband/core/cma.c21
-rw-r--r--drivers/infiniband/core/mad.c5
-rw-r--r--drivers/infiniband/core/sa_query.c32
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c27
-rw-r--r--drivers/infiniband/core/verbs.c43
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c19
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c13
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c14
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h10
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c49
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c57
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h49
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c13
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c48
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h5
-rw-r--r--drivers/input/joystick/db9.c1
-rw-r--r--drivers/input/joystick/gamecon.c1
-rw-r--r--drivers/input/joystick/turbografx.c1
-rw-r--r--drivers/input/joystick/walkera0701.c1
-rw-r--r--drivers/input/misc/arizona-haptics.c3
-rw-r--r--drivers/input/mouse/elan_i2c_core.c3
-rw-r--r--drivers/input/serio/parkbd.c1
-rw-r--r--drivers/input/tablet/aiptek.c9
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c34
-rw-r--r--drivers/input/touchscreen/elants_i2c.c21
-rw-r--r--drivers/iommu/amd_iommu_v2.c20
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/iommu/intel-svm.c20
-rw-r--r--drivers/iommu/iommu.c2
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c5
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c23
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c7
-rw-r--r--drivers/isdn/hisax/config.c2
-rw-r--r--drivers/isdn/hisax/hfc_pci.c2
-rw-r--r--drivers/isdn/hisax/hfc_sx.c2
-rw-r--r--drivers/isdn/hisax/q931.c6
-rw-r--r--drivers/lightnvm/Kconfig1
-rw-r--r--drivers/lightnvm/core.c158
-rw-r--r--drivers/lightnvm/gennvm.c105
-rw-r--r--drivers/lightnvm/gennvm.h2
-rw-r--r--drivers/lightnvm/rrpc.c57
-rw-r--r--drivers/md/dm-crypt.c22
-rw-r--r--drivers/md/dm-mpath.c30
-rw-r--r--drivers/md/dm-thin-metadata.c34
-rw-r--r--drivers/md/dm-thin.c6
-rw-r--r--drivers/md/dm.c7
-rw-r--r--drivers/md/md.c33
-rw-r--r--drivers/md/md.h8
-rw-r--r--drivers/md/persistent-data/dm-btree.c101
-rw-r--r--drivers/md/persistent-data/dm-btree.h14
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c32
-rw-r--r--drivers/md/raid10.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c4
-rw-r--r--drivers/media/usb/airspy/airspy.c2
-rw-r--r--drivers/media/usb/hackrf/hackrf.c13
-rw-r--r--drivers/memory/fsl_ifc.c1
-rw-r--r--drivers/misc/cxl/native.c2
-rw-r--r--drivers/mtd/ofpart.c12
-rw-r--r--drivers/mtd/ubi/debug.c2
-rw-r--r--drivers/mtd/ubi/io.c2
-rw-r--r--drivers/mtd/ubi/wl.c53
-rw-r--r--drivers/net/can/bfin_can.c2
-rw-r--r--drivers/net/can/c_can/c_can.c7
-rw-r--r--drivers/net/can/cc770/cc770.c2
-rw-r--r--drivers/net/can/flexcan.c4
-rw-r--r--drivers/net/can/janz-ican3.c1
-rw-r--r--drivers/net/can/m_can/m_can.c7
-rw-r--r--drivers/net/can/pch_can.c3
-rw-r--r--drivers/net/can/rcar_can.c11
-rw-r--r--drivers/net/can/sja1000/sja1000.c4
-rw-r--r--drivers/net/can/sun4i_can.c1
-rw-r--r--drivers/net/can/ti_hecc.c7
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/esd_usb2.c1
-rw-r--r--drivers/net/can/usb/kvaser_usb.c5
-rw-r--r--drivers/net/can/usb/usb_8dev.c4
-rw-r--r--drivers/net/can/xilinx_can.c9
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c69
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c7
-rw-r--r--drivers/net/ethernet/aurora/Kconfig21
-rw-r--r--drivers/net/ethernet/aurora/Makefile1
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c1552
-rw-r--r--drivers/net/ethernet/aurora/nb8800.h316
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c90
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c9
-rw-r--r--drivers/net/ethernet/cadence/macb.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.h5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c22
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c16
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c28
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c9
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c36
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h4
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c30
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c14
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c49
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h11
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c11
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c33
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c5
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c12
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c56
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c63
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c5
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c5
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c11
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c80
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h58
-rw-r--r--drivers/net/ethernet/sfc/ef10.c24
-rw-r--r--drivers/net/ethernet/sfc/efx.h5
-rw-r--r--drivers/net/ethernet/sfc/farch.c2
-rw-r--r--drivers/net/ethernet/sfc/txc43128_phy.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c28
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw.c63
-rw-r--r--drivers/net/geneve.c12
-rw-r--r--drivers/net/hamradio/6pack.c8
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/macvtap.c4
-rw-r--r--drivers/net/phy/broadcom.c2
-rw-r--r--drivers/net/phy/mdio-mux.c7
-rw-r--r--drivers/net/phy/micrel.c13
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/ppp/pppoe.c14
-rw-r--r--drivers/net/ppp/pptp.c6
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/cdc_mbim.c28
-rw-r--r--drivers/net/usb/cdc_ncm.c67
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c21
-rw-r--r--drivers/net/veth.c6
-rw-r--r--drivers/net/virtio_net.c34
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c71
-rw-r--r--drivers/net/vrf.c11
-rw-r--r--drivers/net/vxlan.c75
-rw-r--r--drivers/net/wan/hdlc_fr.c10
-rw-r--r--drivers/net/wan/x25_asy.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c49
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c53
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c47
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c97
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c2
-rw-r--r--drivers/net/xen-netback/netback.c34
-rw-r--r--drivers/nvme/host/Makefile3
-rw-r--r--drivers/nvme/host/lightnvm.c187
-rw-r--r--drivers/nvme/host/nvme.h14
-rw-r--r--drivers/nvme/host/pci.c71
-rw-r--r--drivers/of/address.c5
-rw-r--r--drivers/of/fdt.c7
-rw-r--r--drivers/of/irq.c3
-rw-r--r--drivers/of/of_reserved_mem.c8
-rw-r--r--drivers/parisc/iommu-helpers.h15
-rw-r--r--drivers/pci/host/pcie-altera.c23
-rw-r--r--drivers/pci/host/pcie-designware.c1
-rw-r--r--drivers/pci/host/pcie-hisi.c8
-rw-r--r--drivers/pci/msi.c4
-rw-r--r--drivers/pci/pci-driver.c16
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/phy-bcm-cygnus-pcie.c16
-rw-r--r--drivers/phy/phy-berlin-sata.c20
-rw-r--r--drivers/phy/phy-brcmstb-sata.c17
-rw-r--r--drivers/phy/phy-core.c21
-rw-r--r--drivers/phy/phy-miphy28lp.c16
-rw-r--r--drivers/phy/phy-miphy365x.c16
-rw-r--r--drivers/phy/phy-mt65xx-usb3.c20
-rw-r--r--drivers/phy/phy-rockchip-usb.c17
-rw-r--r--drivers/pinctrl/Kconfig4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c13
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c8
-rw-r--r--drivers/pinctrl/freescale/pinctrl-vf610.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c41
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h3
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c11
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c6
-rw-r--r--drivers/powercap/intel_rapl.c7
-rw-r--r--drivers/remoteproc/remoteproc_core.c2
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c2
-rw-r--r--drivers/rtc/rtc-da9063.c19
-rw-r--r--drivers/rtc/rtc-ds1307.c44
-rw-r--r--drivers/rtc/rtc-rk808.c48
-rw-r--r--drivers/s390/crypto/ap_bus.c4
-rw-r--r--drivers/s390/virtio/virtio_ccw.c62
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/hosts.c11
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/mpt3sas/Kconfig9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c3
-rw-r--r--drivers/scsi/mvsas/mv_init.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c3
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c2
-rw-r--r--drivers/scsi/scsi_debug.c9
-rw-r--r--drivers/scsi/scsi_pm.c20
-rw-r--r--drivers/scsi/scsi_scan.c9
-rw-r--r--drivers/scsi/scsi_sysfs.c22
-rw-r--r--drivers/scsi/sd.c69
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/scsi/ses.c30
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/soc/mediatek/Kconfig1
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c8
-rw-r--r--drivers/spi/spi-bcm63xx.c4
-rw-r--r--drivers/spi/spi-fsl-dspi.c12
-rw-r--r--drivers/spi/spi-mt65xx.c26
-rw-r--r--drivers/spi/spi-pl022.c28
-rw-r--r--drivers/spi/spi.c4
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c4
-rw-r--r--drivers/staging/iio/iio_simple_dummy_events.c2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h1
-rw-r--r--drivers/staging/lustre/lustre/libcfs/module.c17
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c20
-rw-r--r--drivers/target/iscsi/iscsi_target.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c10
-rw-r--r--drivers/target/target_core_sbc.c17
-rw-r--r--drivers/target/target_core_stat.c2
-rw-r--r--drivers/target/target_core_tmr.c7
-rw-r--r--drivers/target/target_core_transport.c26
-rw-r--r--drivers/target/target_core_user.c4
-rw-r--r--drivers/thermal/Kconfig2
-rw-r--r--drivers/thermal/imx_thermal.c56
-rw-r--r--drivers/thermal/of-thermal.c2
-rw-r--r--drivers/thermal/power_allocator.c24
-rw-r--r--drivers/thermal/rcar_thermal.c49
-rw-r--r--drivers/thermal/rockchip_thermal.c328
-rw-r--r--drivers/tty/n_tty.c22
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c8
-rw-r--r--drivers/tty/serial/earlycon.c2
-rw-r--r--drivers/tty/serial/sh-sci.c2
-rw-r--r--drivers/tty/serial/sunhv.c12
-rw-r--r--drivers/tty/tty_buffer.c2
-rw-r--r--drivers/usb/class/cdc-acm.c5
-rw-r--r--drivers/usb/core/config.c3
-rw-r--r--drivers/usb/core/hub.c44
-rw-r--r--drivers/usb/core/port.c4
-rw-r--r--drivers/usb/core/quirks.c9
-rw-r--r--drivers/usb/dwc2/platform.c81
-rw-r--r--drivers/usb/dwc3/gadget.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c6
-rw-r--r--drivers/usb/gadget/function/f_midi.c3
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c2
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c3
-rw-r--r--drivers/usb/host/ohci-at91.c11
-rw-r--r--drivers/usb/host/whci/qset.c4
-rw-r--r--drivers/usb/host/xhci-hub.c47
-rw-r--r--drivers/usb/host/xhci-pci.c8
-rw-r--r--drivers/usb/host/xhci-ring.c3
-rw-r--r--drivers/usb/host/xhci.c8
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/musb_core.c8
-rw-r--r--drivers/usb/phy/phy-msm-usb.c6
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c5
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c11
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ipaq.c3
-rw-r--r--drivers/usb/serial/usb-serial-simple.c1
-rw-r--r--drivers/usb/storage/uas.c4
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/usb/storage/unusual_uas.h2
-rw-r--r--drivers/vfio/Kconfig15
-rw-r--r--drivers/vfio/pci/vfio_pci.c10
-rw-r--r--drivers/vfio/platform/vfio_platform.c1
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c5
-rw-r--r--drivers/vfio/vfio.c188
-rw-r--r--drivers/vhost/vhost.c8
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c13
-rw-r--r--drivers/video/fbdev/omap2/dss/venc.c12
-rw-r--r--drivers/virtio/virtio.c1
-rw-r--r--drivers/virtio/virtio_ring.c48
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/mtk_wdt.c1
-rw-r--r--drivers/watchdog/omap_wdt.c2
-rw-r--r--drivers/watchdog/pnx4008_wdt.c8
-rw-r--r--drivers/watchdog/tegra_wdt.c4
-rw-r--r--drivers/watchdog/w83977f_wdt.c2
-rw-r--r--drivers/xen/events/events_base.c5
-rw-r--r--drivers/xen/events/events_fifo.c23
-rw-r--r--drivers/xen/evtchn.c123
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/xen-pciback/pciback.h1
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c75
-rw-r--r--drivers/xen/xen-pciback/xenbus.c4
-rw-r--r--drivers/xen/xen-scsiback.c2
526 files changed, 8581 insertions, 4024 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 73d039156ea7..795d0ca714bf 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_FB_I810) += video/fbdev/i810/
63obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ 63obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/
64 64
65obj-$(CONFIG_PARPORT) += parport/ 65obj-$(CONFIG_PARPORT) += parport/
66obj-$(CONFIG_NVM) += lightnvm/
66obj-y += base/ block/ misc/ mfd/ nfc/ 67obj-y += base/ block/ misc/ mfd/ nfc/
67obj-$(CONFIG_LIBNVDIMM) += nvdimm/ 68obj-$(CONFIG_LIBNVDIMM) += nvdimm/
68obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ 69obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
@@ -70,7 +71,6 @@ obj-$(CONFIG_NUBUS) += nubus/
70obj-y += macintosh/ 71obj-y += macintosh/
71obj-$(CONFIG_IDE) += ide/ 72obj-$(CONFIG_IDE) += ide/
72obj-$(CONFIG_SCSI) += scsi/ 73obj-$(CONFIG_SCSI) += scsi/
73obj-$(CONFIG_NVM) += lightnvm/
74obj-y += nvme/ 74obj-y += nvme/
75obj-$(CONFIG_ATA) += ata/ 75obj-$(CONFIG_ATA) += ata/
76obj-$(CONFIG_TARGET_CORE) += target/ 76obj-$(CONFIG_TARGET_CORE) += target/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 25dbb76c02cc..5eef4cb4f70e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -58,10 +58,10 @@ config ACPI_CCA_REQUIRED
58 bool 58 bool
59 59
60config ACPI_DEBUGGER 60config ACPI_DEBUGGER
61 bool "In-kernel debugger (EXPERIMENTAL)" 61 bool "AML debugger interface (EXPERIMENTAL)"
62 select ACPI_DEBUG 62 select ACPI_DEBUG
63 help 63 help
64 Enable in-kernel debugging facilities: statistics, internal 64 Enable in-kernel debugging of AML facilities: statistics, internal
65 object dump, single step control method execution. 65 object dump, single step control method execution.
66 This is still under development, currently enabling this only 66 This is still under development, currently enabling this only
67 results in the compilation of the ACPICA debugger files. 67 results in the compilation of the ACPICA debugger files.
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index f7dab53b352a..aa45d4802707 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -233,11 +233,12 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc,
233 struct nfit_table_prev *prev, 233 struct nfit_table_prev *prev,
234 struct acpi_nfit_system_address *spa) 234 struct acpi_nfit_system_address *spa)
235{ 235{
236 size_t length = min_t(size_t, sizeof(*spa), spa->header.length);
236 struct device *dev = acpi_desc->dev; 237 struct device *dev = acpi_desc->dev;
237 struct nfit_spa *nfit_spa; 238 struct nfit_spa *nfit_spa;
238 239
239 list_for_each_entry(nfit_spa, &prev->spas, list) { 240 list_for_each_entry(nfit_spa, &prev->spas, list) {
240 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { 241 if (memcmp(nfit_spa->spa, spa, length) == 0) {
241 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 242 list_move_tail(&nfit_spa->list, &acpi_desc->spas);
242 return true; 243 return true;
243 } 244 }
@@ -259,11 +260,12 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
259 struct nfit_table_prev *prev, 260 struct nfit_table_prev *prev,
260 struct acpi_nfit_memory_map *memdev) 261 struct acpi_nfit_memory_map *memdev)
261{ 262{
263 size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length);
262 struct device *dev = acpi_desc->dev; 264 struct device *dev = acpi_desc->dev;
263 struct nfit_memdev *nfit_memdev; 265 struct nfit_memdev *nfit_memdev;
264 266
265 list_for_each_entry(nfit_memdev, &prev->memdevs, list) 267 list_for_each_entry(nfit_memdev, &prev->memdevs, list)
266 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { 268 if (memcmp(nfit_memdev->memdev, memdev, length) == 0) {
267 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); 269 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
268 return true; 270 return true;
269 } 271 }
@@ -284,11 +286,12 @@ static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
284 struct nfit_table_prev *prev, 286 struct nfit_table_prev *prev,
285 struct acpi_nfit_control_region *dcr) 287 struct acpi_nfit_control_region *dcr)
286{ 288{
289 size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length);
287 struct device *dev = acpi_desc->dev; 290 struct device *dev = acpi_desc->dev;
288 struct nfit_dcr *nfit_dcr; 291 struct nfit_dcr *nfit_dcr;
289 292
290 list_for_each_entry(nfit_dcr, &prev->dcrs, list) 293 list_for_each_entry(nfit_dcr, &prev->dcrs, list)
291 if (memcmp(nfit_dcr->dcr, dcr, sizeof(*dcr)) == 0) { 294 if (memcmp(nfit_dcr->dcr, dcr, length) == 0) {
292 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); 295 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
293 return true; 296 return true;
294 } 297 }
@@ -308,11 +311,12 @@ static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
308 struct nfit_table_prev *prev, 311 struct nfit_table_prev *prev,
309 struct acpi_nfit_data_region *bdw) 312 struct acpi_nfit_data_region *bdw)
310{ 313{
314 size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length);
311 struct device *dev = acpi_desc->dev; 315 struct device *dev = acpi_desc->dev;
312 struct nfit_bdw *nfit_bdw; 316 struct nfit_bdw *nfit_bdw;
313 317
314 list_for_each_entry(nfit_bdw, &prev->bdws, list) 318 list_for_each_entry(nfit_bdw, &prev->bdws, list)
315 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { 319 if (memcmp(nfit_bdw->bdw, bdw, length) == 0) {
316 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); 320 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
317 return true; 321 return true;
318 } 322 }
@@ -332,11 +336,12 @@ static bool add_idt(struct acpi_nfit_desc *acpi_desc,
332 struct nfit_table_prev *prev, 336 struct nfit_table_prev *prev,
333 struct acpi_nfit_interleave *idt) 337 struct acpi_nfit_interleave *idt)
334{ 338{
339 size_t length = min_t(size_t, sizeof(*idt), idt->header.length);
335 struct device *dev = acpi_desc->dev; 340 struct device *dev = acpi_desc->dev;
336 struct nfit_idt *nfit_idt; 341 struct nfit_idt *nfit_idt;
337 342
338 list_for_each_entry(nfit_idt, &prev->idts, list) 343 list_for_each_entry(nfit_idt, &prev->idts, list)
339 if (memcmp(nfit_idt->idt, idt, sizeof(*idt)) == 0) { 344 if (memcmp(nfit_idt->idt, idt, length) == 0) {
340 list_move_tail(&nfit_idt->list, &acpi_desc->idts); 345 list_move_tail(&nfit_idt->list, &acpi_desc->idts);
341 return true; 346 return true;
342 } 347 }
@@ -356,11 +361,12 @@ static bool add_flush(struct acpi_nfit_desc *acpi_desc,
356 struct nfit_table_prev *prev, 361 struct nfit_table_prev *prev,
357 struct acpi_nfit_flush_address *flush) 362 struct acpi_nfit_flush_address *flush)
358{ 363{
364 size_t length = min_t(size_t, sizeof(*flush), flush->header.length);
359 struct device *dev = acpi_desc->dev; 365 struct device *dev = acpi_desc->dev;
360 struct nfit_flush *nfit_flush; 366 struct nfit_flush *nfit_flush;
361 367
362 list_for_each_entry(nfit_flush, &prev->flushes, list) 368 list_for_each_entry(nfit_flush, &prev->flushes, list)
363 if (memcmp(nfit_flush->flush, flush, sizeof(*flush)) == 0) { 369 if (memcmp(nfit_flush->flush, flush, length) == 0) {
364 list_move_tail(&nfit_flush->list, &acpi_desc->flushes); 370 list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
365 return true; 371 return true;
366 } 372 }
@@ -655,7 +661,7 @@ static ssize_t revision_show(struct device *dev,
655 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 661 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
656 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 662 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
657 663
658 return sprintf(buf, "%d\n", acpi_desc->nfit->header.revision); 664 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
659} 665}
660static DEVICE_ATTR_RO(revision); 666static DEVICE_ATTR_RO(revision);
661 667
@@ -1652,7 +1658,6 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
1652 1658
1653 data = (u8 *) acpi_desc->nfit; 1659 data = (u8 *) acpi_desc->nfit;
1654 end = data + sz; 1660 end = data + sz;
1655 data += sizeof(struct acpi_table_nfit);
1656 while (!IS_ERR_OR_NULL(data)) 1661 while (!IS_ERR_OR_NULL(data))
1657 data = add_table(acpi_desc, &prev, data, end); 1662 data = add_table(acpi_desc, &prev, data, end);
1658 1663
@@ -1748,13 +1753,29 @@ static int acpi_nfit_add(struct acpi_device *adev)
1748 return PTR_ERR(acpi_desc); 1753 return PTR_ERR(acpi_desc);
1749 } 1754 }
1750 1755
1751 acpi_desc->nfit = (struct acpi_table_nfit *) tbl; 1756 /*
1757 * Save the acpi header for later and then skip it,
1758 * making nfit point to the first nfit table header.
1759 */
1760 acpi_desc->acpi_header = *tbl;
1761 acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit);
1762 sz -= sizeof(struct acpi_table_nfit);
1752 1763
1753 /* Evaluate _FIT and override with that if present */ 1764 /* Evaluate _FIT and override with that if present */
1754 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 1765 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
1755 if (ACPI_SUCCESS(status) && buf.length > 0) { 1766 if (ACPI_SUCCESS(status) && buf.length > 0) {
1756 acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer; 1767 union acpi_object *obj;
1757 sz = buf.length; 1768 /*
1769 * Adjust for the acpi_object header of the _FIT
1770 */
1771 obj = buf.pointer;
1772 if (obj->type == ACPI_TYPE_BUFFER) {
1773 acpi_desc->nfit =
1774 (struct acpi_nfit_header *)obj->buffer.pointer;
1775 sz = obj->buffer.length;
1776 } else
1777 dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
1778 __func__, (int) obj->type);
1758 } 1779 }
1759 1780
1760 rc = acpi_nfit_init(acpi_desc, sz); 1781 rc = acpi_nfit_init(acpi_desc, sz);
@@ -1777,7 +1798,8 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
1777{ 1798{
1778 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); 1799 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
1779 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 1800 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
1780 struct acpi_table_nfit *nfit_saved; 1801 struct acpi_nfit_header *nfit_saved;
1802 union acpi_object *obj;
1781 struct device *dev = &adev->dev; 1803 struct device *dev = &adev->dev;
1782 acpi_status status; 1804 acpi_status status;
1783 int ret; 1805 int ret;
@@ -1788,7 +1810,7 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
1788 if (!dev->driver) { 1810 if (!dev->driver) {
1789 /* dev->driver may be null if we're being removed */ 1811 /* dev->driver may be null if we're being removed */
1790 dev_dbg(dev, "%s: no driver found for dev\n", __func__); 1812 dev_dbg(dev, "%s: no driver found for dev\n", __func__);
1791 return; 1813 goto out_unlock;
1792 } 1814 }
1793 1815
1794 if (!acpi_desc) { 1816 if (!acpi_desc) {
@@ -1808,12 +1830,19 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
1808 } 1830 }
1809 1831
1810 nfit_saved = acpi_desc->nfit; 1832 nfit_saved = acpi_desc->nfit;
1811 acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer; 1833 obj = buf.pointer;
1812 ret = acpi_nfit_init(acpi_desc, buf.length); 1834 if (obj->type == ACPI_TYPE_BUFFER) {
1813 if (!ret) { 1835 acpi_desc->nfit =
1814 /* Merge failed, restore old nfit, and exit */ 1836 (struct acpi_nfit_header *)obj->buffer.pointer;
1815 acpi_desc->nfit = nfit_saved; 1837 ret = acpi_nfit_init(acpi_desc, obj->buffer.length);
1816 dev_err(dev, "failed to merge updated NFIT\n"); 1838 if (ret) {
1839 /* Merge failed, restore old nfit, and exit */
1840 acpi_desc->nfit = nfit_saved;
1841 dev_err(dev, "failed to merge updated NFIT\n");
1842 }
1843 } else {
1844 /* Bad _FIT, restore old nfit */
1845 dev_err(dev, "Invalid _FIT\n");
1817 } 1846 }
1818 kfree(buf.pointer); 1847 kfree(buf.pointer);
1819 1848
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
index 2ea5c0797c8f..3d549a383659 100644
--- a/drivers/acpi/nfit.h
+++ b/drivers/acpi/nfit.h
@@ -96,7 +96,8 @@ struct nfit_mem {
96 96
97struct acpi_nfit_desc { 97struct acpi_nfit_desc {
98 struct nvdimm_bus_descriptor nd_desc; 98 struct nvdimm_bus_descriptor nd_desc;
99 struct acpi_table_nfit *nfit; 99 struct acpi_table_header acpi_header;
100 struct acpi_nfit_header *nfit;
100 struct mutex spa_map_mutex; 101 struct mutex spa_map_mutex;
101 struct mutex init_mutex; 102 struct mutex init_mutex;
102 struct list_head spa_maps; 103 struct list_head spa_maps;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 850d7bf0c873..ae3fe4e64203 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -768,6 +768,13 @@ static void pci_acpi_root_add_resources(struct acpi_pci_root_info *info)
768 else 768 else
769 continue; 769 continue;
770 770
771 /*
772 * Some legacy x86 host bridge drivers use iomem_resource and
773 * ioport_resource as default resource pool, skip it.
774 */
775 if (res == root)
776 continue;
777
771 conflict = insert_resource_conflict(root, res); 778 conflict = insert_resource_conflict(root, res);
772 if (conflict) { 779 if (conflict) {
773 dev_info(&info->bridge->dev, 780 dev_info(&info->bridge->dev,
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index f4e02ae93f58..11154a330f07 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -200,7 +200,8 @@ static int acpi_pss_perf_init(struct acpi_processor *pr,
200 goto err_remove_sysfs_thermal; 200 goto err_remove_sysfs_thermal;
201 } 201 }
202 202
203 sysfs_remove_link(&pr->cdev->device.kobj, "device"); 203 return 0;
204
204 err_remove_sysfs_thermal: 205 err_remove_sysfs_thermal:
205 sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); 206 sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
206 err_thermal_unregister: 207 err_thermal_unregister:
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index ff02bb4218fc..cdfbcc54821f 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -314,16 +314,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
314 { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */ 314 { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
315 { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */ 315 { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
316 { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */ 316 { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
317 { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
318 { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
319 { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
320 { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
321 { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
322 { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
323 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
324 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
325 { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
326 { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
327 { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */ 317 { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
328 { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */ 318 { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
329 { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */ 319 { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
@@ -350,10 +340,22 @@ static const struct pci_device_id ahci_pci_tbl[] = {
350 { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */ 340 { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
351 { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */ 341 { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
352 { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */ 342 { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
343 { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
353 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ 344 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
354 { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ 345 { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
346 { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
355 { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ 347 { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
356 { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ 348 { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
349 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
350 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
351 { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
352 { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
353 { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
354 { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
355 { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
356 { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
357 { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
358 { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
357 359
358 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 360 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
359 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 361 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 8490d37aee2a..f7a7fa81740e 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -62,6 +62,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
62 writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); 62 writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
63} 63}
64 64
65#ifdef CONFIG_PM_SLEEP
65static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state) 66static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
66{ 67{
67 return ahci_platform_suspend_host(&pdev->dev); 68 return ahci_platform_suspend_host(&pdev->dev);
@@ -81,6 +82,10 @@ static int ahci_mvebu_resume(struct platform_device *pdev)
81 82
82 return ahci_platform_resume_host(&pdev->dev); 83 return ahci_platform_resume_host(&pdev->dev);
83} 84}
85#else
86#define ahci_mvebu_suspend NULL
87#define ahci_mvebu_resume NULL
88#endif
84 89
85static const struct ata_port_info ahci_mvebu_port_info = { 90static const struct ata_port_info ahci_mvebu_port_info = {
86 .flags = AHCI_FLAG_COMMON, 91 .flags = AHCI_FLAG_COMMON,
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 096064cd6c52..4665512dae44 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1273,6 +1273,15 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1273 ata_tf_to_fis(tf, pmp, is_cmd, fis); 1273 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1274 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12)); 1274 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1275 1275
1276 /* set port value for softreset of Port Multiplier */
1277 if (pp->fbs_enabled && pp->fbs_last_dev != pmp) {
1278 tmp = readl(port_mmio + PORT_FBS);
1279 tmp &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
1280 tmp |= pmp << PORT_FBS_DEV_OFFSET;
1281 writel(tmp, port_mmio + PORT_FBS);
1282 pp->fbs_last_dev = pmp;
1283 }
1284
1276 /* issue & wait */ 1285 /* issue & wait */
1277 writel(1, port_mmio + PORT_CMD_ISSUE); 1286 writel(1, port_mmio + PORT_CMD_ISSUE);
1278 1287
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index cb0508af1459..961acc788f44 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1505,12 +1505,20 @@ static const char *ata_err_string(unsigned int err_mask)
1505unsigned int ata_read_log_page(struct ata_device *dev, u8 log, 1505unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1506 u8 page, void *buf, unsigned int sectors) 1506 u8 page, void *buf, unsigned int sectors)
1507{ 1507{
1508 unsigned long ap_flags = dev->link->ap->flags;
1508 struct ata_taskfile tf; 1509 struct ata_taskfile tf;
1509 unsigned int err_mask; 1510 unsigned int err_mask;
1510 bool dma = false; 1511 bool dma = false;
1511 1512
1512 DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page); 1513 DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
1513 1514
1515 /*
1516 * Return error without actually issuing the command on controllers
1517 * which e.g. lockup on a read log page.
1518 */
1519 if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
1520 return AC_ERR_DEV;
1521
1514retry: 1522retry:
1515 ata_tf_init(dev, &tf); 1523 ata_tf_init(dev, &tf);
1516 if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) && 1524 if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 5389579c5120..a723ae929783 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -45,7 +45,8 @@ enum {
45 SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */ 45 SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */
46 46
47 SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | 47 SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
48 ATA_FLAG_PMP | ATA_FLAG_NCQ | ATA_FLAG_AN), 48 ATA_FLAG_PMP | ATA_FLAG_NCQ |
49 ATA_FLAG_AN | ATA_FLAG_NO_LOG_PAGE),
49 50
50 SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH, 51 SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH,
51 SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */ 52 SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index dea6edcbf145..29bcff086bce 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -630,6 +630,9 @@ static void sil_dev_config(struct ata_device *dev)
630 unsigned int n, quirks = 0; 630 unsigned int n, quirks = 0;
631 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 631 unsigned char model_num[ATA_ID_PROD_LEN + 1];
632 632
633 /* This controller doesn't support trim */
634 dev->horkage |= ATA_HORKAGE_NOTRIM;
635
633 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 636 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
634 637
635 for (n = 0; sil_blacklist[n].product; n++) 638 for (n = 0; sil_blacklist[n].product; n++)
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 2804aed3f416..25425d3f2575 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -303,6 +303,10 @@ static int memory_subsys_offline(struct device *dev)
303 if (mem->state == MEM_OFFLINE) 303 if (mem->state == MEM_OFFLINE)
304 return 0; 304 return 0;
305 305
306 /* Can't offline block with non-present sections */
307 if (mem->section_count != sections_per_block)
308 return -EINVAL;
309
306 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); 310 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
307} 311}
308 312
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index e03b1ad25a90..65f50eccd49b 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -390,6 +390,7 @@ static int pm_genpd_runtime_suspend(struct device *dev)
390 struct generic_pm_domain *genpd; 390 struct generic_pm_domain *genpd;
391 bool (*stop_ok)(struct device *__dev); 391 bool (*stop_ok)(struct device *__dev);
392 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 392 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
393 bool runtime_pm = pm_runtime_enabled(dev);
393 ktime_t time_start; 394 ktime_t time_start;
394 s64 elapsed_ns; 395 s64 elapsed_ns;
395 int ret; 396 int ret;
@@ -400,12 +401,19 @@ static int pm_genpd_runtime_suspend(struct device *dev)
400 if (IS_ERR(genpd)) 401 if (IS_ERR(genpd))
401 return -EINVAL; 402 return -EINVAL;
402 403
404 /*
405 * A runtime PM centric subsystem/driver may re-use the runtime PM
406 * callbacks for other purposes than runtime PM. In those scenarios
407 * runtime PM is disabled. Under these circumstances, we shall skip
408 * validating/measuring the PM QoS latency.
409 */
403 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; 410 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
404 if (stop_ok && !stop_ok(dev)) 411 if (runtime_pm && stop_ok && !stop_ok(dev))
405 return -EBUSY; 412 return -EBUSY;
406 413
407 /* Measure suspend latency. */ 414 /* Measure suspend latency. */
408 time_start = ktime_get(); 415 if (runtime_pm)
416 time_start = ktime_get();
409 417
410 ret = genpd_save_dev(genpd, dev); 418 ret = genpd_save_dev(genpd, dev);
411 if (ret) 419 if (ret)
@@ -418,13 +426,15 @@ static int pm_genpd_runtime_suspend(struct device *dev)
418 } 426 }
419 427
420 /* Update suspend latency value if the measured time exceeds it. */ 428 /* Update suspend latency value if the measured time exceeds it. */
421 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 429 if (runtime_pm) {
422 if (elapsed_ns > td->suspend_latency_ns) { 430 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
423 td->suspend_latency_ns = elapsed_ns; 431 if (elapsed_ns > td->suspend_latency_ns) {
424 dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 432 td->suspend_latency_ns = elapsed_ns;
425 elapsed_ns); 433 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
426 genpd->max_off_time_changed = true; 434 elapsed_ns);
427 td->constraint_changed = true; 435 genpd->max_off_time_changed = true;
436 td->constraint_changed = true;
437 }
428 } 438 }
429 439
430 /* 440 /*
@@ -453,6 +463,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
453{ 463{
454 struct generic_pm_domain *genpd; 464 struct generic_pm_domain *genpd;
455 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 465 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
466 bool runtime_pm = pm_runtime_enabled(dev);
456 ktime_t time_start; 467 ktime_t time_start;
457 s64 elapsed_ns; 468 s64 elapsed_ns;
458 int ret; 469 int ret;
@@ -479,14 +490,14 @@ static int pm_genpd_runtime_resume(struct device *dev)
479 490
480 out: 491 out:
481 /* Measure resume latency. */ 492 /* Measure resume latency. */
482 if (timed) 493 if (timed && runtime_pm)
483 time_start = ktime_get(); 494 time_start = ktime_get();
484 495
485 genpd_start_dev(genpd, dev); 496 genpd_start_dev(genpd, dev);
486 genpd_restore_dev(genpd, dev); 497 genpd_restore_dev(genpd, dev);
487 498
488 /* Update resume latency value if the measured time exceeds it. */ 499 /* Update resume latency value if the measured time exceeds it. */
489 if (timed) { 500 if (timed && runtime_pm) {
490 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 501 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
491 if (elapsed_ns > td->resume_latency_ns) { 502 if (elapsed_ns > td->resume_latency_ns) {
492 td->resume_latency_ns = elapsed_ns; 503 td->resume_latency_ns = elapsed_ns;
@@ -1775,10 +1786,10 @@ int genpd_dev_pm_attach(struct device *dev)
1775 } 1786 }
1776 1787
1777 pd = of_genpd_get_from_provider(&pd_args); 1788 pd = of_genpd_get_from_provider(&pd_args);
1789 of_node_put(pd_args.np);
1778 if (IS_ERR(pd)) { 1790 if (IS_ERR(pd)) {
1779 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 1791 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
1780 __func__, PTR_ERR(pd)); 1792 __func__, PTR_ERR(pd));
1781 of_node_put(dev->of_node);
1782 return -EPROBE_DEFER; 1793 return -EPROBE_DEFER;
1783 } 1794 }
1784 1795
@@ -1796,7 +1807,6 @@ int genpd_dev_pm_attach(struct device *dev)
1796 if (ret < 0) { 1807 if (ret < 0) {
1797 dev_err(dev, "failed to add to PM domain %s: %d", 1808 dev_err(dev, "failed to add to PM domain %s: %d",
1798 pd->name, ret); 1809 pd->name, ret);
1799 of_node_put(dev->of_node);
1800 goto out; 1810 goto out;
1801 } 1811 }
1802 1812
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index e60dd12e23aa..1e937ac5f456 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -160,9 +160,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
160 struct gpd_timing_data *td; 160 struct gpd_timing_data *td;
161 s64 constraint_ns; 161 s64 constraint_ns;
162 162
163 if (!pdd->dev->driver)
164 continue;
165
166 /* 163 /*
167 * Check if the device is allowed to be off long enough for the 164 * Check if the device is allowed to be off long enough for the
168 * domain to turn off and on (that's how much time it will 165 * domain to turn off and on (that's how much time it will
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index a28a562f7b7f..3457ac8c03e2 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3810,7 +3810,6 @@ static int mtip_block_initialize(struct driver_data *dd)
3810 sector_t capacity; 3810 sector_t capacity;
3811 unsigned int index = 0; 3811 unsigned int index = 0;
3812 struct kobject *kobj; 3812 struct kobject *kobj;
3813 unsigned char thd_name[16];
3814 3813
3815 if (dd->disk) 3814 if (dd->disk)
3816 goto skip_create_disk; /* hw init done, before rebuild */ 3815 goto skip_create_disk; /* hw init done, before rebuild */
@@ -3958,10 +3957,9 @@ skip_create_disk:
3958 } 3957 }
3959 3958
3960start_service_thread: 3959start_service_thread:
3961 sprintf(thd_name, "mtip_svc_thd_%02d", index);
3962 dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, 3960 dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread,
3963 dd, dd->numa_node, "%s", 3961 dd, dd->numa_node,
3964 thd_name); 3962 "mtip_svc_thd_%02d", index);
3965 3963
3966 if (IS_ERR(dd->mtip_svc_handler)) { 3964 if (IS_ERR(dd->mtip_svc_handler)) {
3967 dev_err(&dd->pdev->dev, "service thread failed to start\n"); 3965 dev_err(&dd->pdev->dev, "service thread failed to start\n");
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 6255d1c4bba4..09e3c0d87ecc 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -8,6 +8,7 @@
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/blk-mq.h> 9#include <linux/blk-mq.h>
10#include <linux/hrtimer.h> 10#include <linux/hrtimer.h>
11#include <linux/lightnvm.h>
11 12
12struct nullb_cmd { 13struct nullb_cmd {
13 struct list_head list; 14 struct list_head list;
@@ -17,6 +18,7 @@ struct nullb_cmd {
17 struct bio *bio; 18 struct bio *bio;
18 unsigned int tag; 19 unsigned int tag;
19 struct nullb_queue *nq; 20 struct nullb_queue *nq;
21 struct hrtimer timer;
20}; 22};
21 23
22struct nullb_queue { 24struct nullb_queue {
@@ -39,23 +41,14 @@ struct nullb {
39 41
40 struct nullb_queue *queues; 42 struct nullb_queue *queues;
41 unsigned int nr_queues; 43 unsigned int nr_queues;
44 char disk_name[DISK_NAME_LEN];
42}; 45};
43 46
44static LIST_HEAD(nullb_list); 47static LIST_HEAD(nullb_list);
45static struct mutex lock; 48static struct mutex lock;
46static int null_major; 49static int null_major;
47static int nullb_indexes; 50static int nullb_indexes;
48 51static struct kmem_cache *ppa_cache;
49struct completion_queue {
50 struct llist_head list;
51 struct hrtimer timer;
52};
53
54/*
55 * These are per-cpu for now, they will need to be configured by the
56 * complete_queues parameter and appropriately mapped.
57 */
58static DEFINE_PER_CPU(struct completion_queue, completion_queues);
59 52
60enum { 53enum {
61 NULL_IRQ_NONE = 0, 54 NULL_IRQ_NONE = 0,
@@ -119,6 +112,10 @@ static int nr_devices = 2;
119module_param(nr_devices, int, S_IRUGO); 112module_param(nr_devices, int, S_IRUGO);
120MODULE_PARM_DESC(nr_devices, "Number of devices to register"); 113MODULE_PARM_DESC(nr_devices, "Number of devices to register");
121 114
115static bool use_lightnvm;
116module_param(use_lightnvm, bool, S_IRUGO);
117MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
118
122static int irqmode = NULL_IRQ_SOFTIRQ; 119static int irqmode = NULL_IRQ_SOFTIRQ;
123 120
124static int null_set_irqmode(const char *str, const struct kernel_param *kp) 121static int null_set_irqmode(const char *str, const struct kernel_param *kp)
@@ -135,8 +132,8 @@ static const struct kernel_param_ops null_irqmode_param_ops = {
135device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO); 132device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
136MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); 133MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
137 134
138static int completion_nsec = 10000; 135static unsigned long completion_nsec = 10000;
139module_param(completion_nsec, int, S_IRUGO); 136module_param(completion_nsec, ulong, S_IRUGO);
140MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); 137MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
141 138
142static int hw_queue_depth = 64; 139static int hw_queue_depth = 64;
@@ -173,6 +170,8 @@ static void free_cmd(struct nullb_cmd *cmd)
173 put_tag(cmd->nq, cmd->tag); 170 put_tag(cmd->nq, cmd->tag);
174} 171}
175 172
173static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
174
176static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) 175static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
177{ 176{
178 struct nullb_cmd *cmd; 177 struct nullb_cmd *cmd;
@@ -183,6 +182,11 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
183 cmd = &nq->cmds[tag]; 182 cmd = &nq->cmds[tag];
184 cmd->tag = tag; 183 cmd->tag = tag;
185 cmd->nq = nq; 184 cmd->nq = nq;
185 if (irqmode == NULL_IRQ_TIMER) {
186 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
187 HRTIMER_MODE_REL);
188 cmd->timer.function = null_cmd_timer_expired;
189 }
186 return cmd; 190 return cmd;
187 } 191 }
188 192
@@ -213,6 +217,11 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
213 217
214static void end_cmd(struct nullb_cmd *cmd) 218static void end_cmd(struct nullb_cmd *cmd)
215{ 219{
220 struct request_queue *q = NULL;
221
222 if (cmd->rq)
223 q = cmd->rq->q;
224
216 switch (queue_mode) { 225 switch (queue_mode) {
217 case NULL_Q_MQ: 226 case NULL_Q_MQ:
218 blk_mq_end_request(cmd->rq, 0); 227 blk_mq_end_request(cmd->rq, 0);
@@ -227,51 +236,29 @@ static void end_cmd(struct nullb_cmd *cmd)
227 } 236 }
228 237
229 free_cmd(cmd); 238 free_cmd(cmd);
239
240 /* Restart queue if needed, as we are freeing a tag */
241 if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
242 unsigned long flags;
243
244 spin_lock_irqsave(q->queue_lock, flags);
245 blk_start_queue_async(q);
246 spin_unlock_irqrestore(q->queue_lock, flags);
247 }
230} 248}
231 249
232static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) 250static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
233{ 251{
234 struct completion_queue *cq; 252 end_cmd(container_of(timer, struct nullb_cmd, timer));
235 struct llist_node *entry;
236 struct nullb_cmd *cmd;
237
238 cq = &per_cpu(completion_queues, smp_processor_id());
239
240 while ((entry = llist_del_all(&cq->list)) != NULL) {
241 entry = llist_reverse_order(entry);
242 do {
243 struct request_queue *q = NULL;
244
245 cmd = container_of(entry, struct nullb_cmd, ll_list);
246 entry = entry->next;
247 if (cmd->rq)
248 q = cmd->rq->q;
249 end_cmd(cmd);
250
251 if (q && !q->mq_ops && blk_queue_stopped(q)) {
252 spin_lock(q->queue_lock);
253 if (blk_queue_stopped(q))
254 blk_start_queue(q);
255 spin_unlock(q->queue_lock);
256 }
257 } while (entry);
258 }
259 253
260 return HRTIMER_NORESTART; 254 return HRTIMER_NORESTART;
261} 255}
262 256
263static void null_cmd_end_timer(struct nullb_cmd *cmd) 257static void null_cmd_end_timer(struct nullb_cmd *cmd)
264{ 258{
265 struct completion_queue *cq = &per_cpu(completion_queues, get_cpu()); 259 ktime_t kt = ktime_set(0, completion_nsec);
266 260
267 cmd->ll_list.next = NULL; 261 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
268 if (llist_add(&cmd->ll_list, &cq->list)) {
269 ktime_t kt = ktime_set(0, completion_nsec);
270
271 hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL_PINNED);
272 }
273
274 put_cpu();
275} 262}
276 263
277static void null_softirq_done_fn(struct request *rq) 264static void null_softirq_done_fn(struct request *rq)
@@ -369,6 +356,10 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
369{ 356{
370 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 357 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
371 358
359 if (irqmode == NULL_IRQ_TIMER) {
360 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
361 cmd->timer.function = null_cmd_timer_expired;
362 }
372 cmd->rq = bd->rq; 363 cmd->rq = bd->rq;
373 cmd->nq = hctx->driver_data; 364 cmd->nq = hctx->driver_data;
374 365
@@ -427,15 +418,157 @@ static void null_del_dev(struct nullb *nullb)
427{ 418{
428 list_del_init(&nullb->list); 419 list_del_init(&nullb->list);
429 420
430 del_gendisk(nullb->disk); 421 if (use_lightnvm)
422 nvm_unregister(nullb->disk_name);
423 else
424 del_gendisk(nullb->disk);
431 blk_cleanup_queue(nullb->q); 425 blk_cleanup_queue(nullb->q);
432 if (queue_mode == NULL_Q_MQ) 426 if (queue_mode == NULL_Q_MQ)
433 blk_mq_free_tag_set(&nullb->tag_set); 427 blk_mq_free_tag_set(&nullb->tag_set);
434 put_disk(nullb->disk); 428 if (!use_lightnvm)
429 put_disk(nullb->disk);
435 cleanup_queues(nullb); 430 cleanup_queues(nullb);
436 kfree(nullb); 431 kfree(nullb);
437} 432}
438 433
434#ifdef CONFIG_NVM
435
436static void null_lnvm_end_io(struct request *rq, int error)
437{
438 struct nvm_rq *rqd = rq->end_io_data;
439 struct nvm_dev *dev = rqd->dev;
440
441 dev->mt->end_io(rqd, error);
442
443 blk_put_request(rq);
444}
445
446static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
447{
448 struct request_queue *q = dev->q;
449 struct request *rq;
450 struct bio *bio = rqd->bio;
451
452 rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
453 if (IS_ERR(rq))
454 return -ENOMEM;
455
456 rq->cmd_type = REQ_TYPE_DRV_PRIV;
457 rq->__sector = bio->bi_iter.bi_sector;
458 rq->ioprio = bio_prio(bio);
459
460 if (bio_has_data(bio))
461 rq->nr_phys_segments = bio_phys_segments(q, bio);
462
463 rq->__data_len = bio->bi_iter.bi_size;
464 rq->bio = rq->biotail = bio;
465
466 rq->end_io_data = rqd;
467
468 blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
469
470 return 0;
471}
472
473static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
474{
475 sector_t size = gb * 1024 * 1024 * 1024ULL;
476 sector_t blksize;
477 struct nvm_id_group *grp;
478
479 id->ver_id = 0x1;
480 id->vmnt = 0;
481 id->cgrps = 1;
482 id->cap = 0x3;
483 id->dom = 0x1;
484
485 id->ppaf.blk_offset = 0;
486 id->ppaf.blk_len = 16;
487 id->ppaf.pg_offset = 16;
488 id->ppaf.pg_len = 16;
489 id->ppaf.sect_offset = 32;
490 id->ppaf.sect_len = 8;
491 id->ppaf.pln_offset = 40;
492 id->ppaf.pln_len = 8;
493 id->ppaf.lun_offset = 48;
494 id->ppaf.lun_len = 8;
495 id->ppaf.ch_offset = 56;
496 id->ppaf.ch_len = 8;
497
498 do_div(size, bs); /* convert size to pages */
499 do_div(size, 256); /* concert size to pgs pr blk */
500 grp = &id->groups[0];
501 grp->mtype = 0;
502 grp->fmtype = 0;
503 grp->num_ch = 1;
504 grp->num_pg = 256;
505 blksize = size;
506 do_div(size, (1 << 16));
507 grp->num_lun = size + 1;
508 do_div(blksize, grp->num_lun);
509 grp->num_blk = blksize;
510 grp->num_pln = 1;
511
512 grp->fpg_sz = bs;
513 grp->csecs = bs;
514 grp->trdt = 25000;
515 grp->trdm = 25000;
516 grp->tprt = 500000;
517 grp->tprm = 500000;
518 grp->tbet = 1500000;
519 grp->tbem = 1500000;
520 grp->mpos = 0x010101; /* single plane rwe */
521 grp->cpar = hw_queue_depth;
522
523 return 0;
524}
525
526static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
527{
528 mempool_t *virtmem_pool;
529
530 virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
531 if (!virtmem_pool) {
532 pr_err("null_blk: Unable to create virtual memory pool\n");
533 return NULL;
534 }
535
536 return virtmem_pool;
537}
538
539static void null_lnvm_destroy_dma_pool(void *pool)
540{
541 mempool_destroy(pool);
542}
543
544static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
545 gfp_t mem_flags, dma_addr_t *dma_handler)
546{
547 return mempool_alloc(pool, mem_flags);
548}
549
550static void null_lnvm_dev_dma_free(void *pool, void *entry,
551 dma_addr_t dma_handler)
552{
553 mempool_free(entry, pool);
554}
555
556static struct nvm_dev_ops null_lnvm_dev_ops = {
557 .identity = null_lnvm_id,
558 .submit_io = null_lnvm_submit_io,
559
560 .create_dma_pool = null_lnvm_create_dma_pool,
561 .destroy_dma_pool = null_lnvm_destroy_dma_pool,
562 .dev_dma_alloc = null_lnvm_dev_dma_alloc,
563 .dev_dma_free = null_lnvm_dev_dma_free,
564
565 /* Simulate nvme protocol restriction */
566 .max_phys_sect = 64,
567};
568#else
569static struct nvm_dev_ops null_lnvm_dev_ops;
570#endif /* CONFIG_NVM */
571
439static int null_open(struct block_device *bdev, fmode_t mode) 572static int null_open(struct block_device *bdev, fmode_t mode)
440{ 573{
441 return 0; 574 return 0;
@@ -575,11 +708,6 @@ static int null_add_dev(void)
575 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); 708 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
576 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); 709 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
577 710
578 disk = nullb->disk = alloc_disk_node(1, home_node);
579 if (!disk) {
580 rv = -ENOMEM;
581 goto out_cleanup_blk_queue;
582 }
583 711
584 mutex_lock(&lock); 712 mutex_lock(&lock);
585 list_add_tail(&nullb->list, &nullb_list); 713 list_add_tail(&nullb->list, &nullb_list);
@@ -589,6 +717,21 @@ static int null_add_dev(void)
589 blk_queue_logical_block_size(nullb->q, bs); 717 blk_queue_logical_block_size(nullb->q, bs);
590 blk_queue_physical_block_size(nullb->q, bs); 718 blk_queue_physical_block_size(nullb->q, bs);
591 719
720 sprintf(nullb->disk_name, "nullb%d", nullb->index);
721
722 if (use_lightnvm) {
723 rv = nvm_register(nullb->q, nullb->disk_name,
724 &null_lnvm_dev_ops);
725 if (rv)
726 goto out_cleanup_blk_queue;
727 goto done;
728 }
729
730 disk = nullb->disk = alloc_disk_node(1, home_node);
731 if (!disk) {
732 rv = -ENOMEM;
733 goto out_cleanup_lightnvm;
734 }
592 size = gb * 1024 * 1024 * 1024ULL; 735 size = gb * 1024 * 1024 * 1024ULL;
593 set_capacity(disk, size >> 9); 736 set_capacity(disk, size >> 9);
594 737
@@ -598,10 +741,15 @@ static int null_add_dev(void)
598 disk->fops = &null_fops; 741 disk->fops = &null_fops;
599 disk->private_data = nullb; 742 disk->private_data = nullb;
600 disk->queue = nullb->q; 743 disk->queue = nullb->q;
601 sprintf(disk->disk_name, "nullb%d", nullb->index); 744 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
745
602 add_disk(disk); 746 add_disk(disk);
747done:
603 return 0; 748 return 0;
604 749
750out_cleanup_lightnvm:
751 if (use_lightnvm)
752 nvm_unregister(nullb->disk_name);
605out_cleanup_blk_queue: 753out_cleanup_blk_queue:
606 blk_cleanup_queue(nullb->q); 754 blk_cleanup_queue(nullb->q);
607out_cleanup_tags: 755out_cleanup_tags:
@@ -617,7 +765,9 @@ out:
617 765
618static int __init null_init(void) 766static int __init null_init(void)
619{ 767{
768 int ret = 0;
620 unsigned int i; 769 unsigned int i;
770 struct nullb *nullb;
621 771
622 if (bs > PAGE_SIZE) { 772 if (bs > PAGE_SIZE) {
623 pr_warn("null_blk: invalid block size\n"); 773 pr_warn("null_blk: invalid block size\n");
@@ -625,6 +775,18 @@ static int __init null_init(void)
625 bs = PAGE_SIZE; 775 bs = PAGE_SIZE;
626 } 776 }
627 777
778 if (use_lightnvm && bs != 4096) {
779 pr_warn("null_blk: LightNVM only supports 4k block size\n");
780 pr_warn("null_blk: defaults block size to 4k\n");
781 bs = 4096;
782 }
783
784 if (use_lightnvm && queue_mode != NULL_Q_MQ) {
785 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
786 pr_warn("null_blk: defaults queue mode to blk-mq\n");
787 queue_mode = NULL_Q_MQ;
788 }
789
628 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { 790 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
629 if (submit_queues < nr_online_nodes) { 791 if (submit_queues < nr_online_nodes) {
630 pr_warn("null_blk: submit_queues param is set to %u.", 792 pr_warn("null_blk: submit_queues param is set to %u.",
@@ -638,32 +800,38 @@ static int __init null_init(void)
638 800
639 mutex_init(&lock); 801 mutex_init(&lock);
640 802
641 /* Initialize a separate list for each CPU for issuing softirqs */
642 for_each_possible_cpu(i) {
643 struct completion_queue *cq = &per_cpu(completion_queues, i);
644
645 init_llist_head(&cq->list);
646
647 if (irqmode != NULL_IRQ_TIMER)
648 continue;
649
650 hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
651 cq->timer.function = null_cmd_timer_expired;
652 }
653
654 null_major = register_blkdev(0, "nullb"); 803 null_major = register_blkdev(0, "nullb");
655 if (null_major < 0) 804 if (null_major < 0)
656 return null_major; 805 return null_major;
657 806
658 for (i = 0; i < nr_devices; i++) { 807 if (use_lightnvm) {
659 if (null_add_dev()) { 808 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
660 unregister_blkdev(null_major, "nullb"); 809 0, 0, NULL);
661 return -EINVAL; 810 if (!ppa_cache) {
811 pr_err("null_blk: unable to create ppa cache\n");
812 ret = -ENOMEM;
813 goto err_ppa;
662 } 814 }
663 } 815 }
664 816
817 for (i = 0; i < nr_devices; i++) {
818 ret = null_add_dev();
819 if (ret)
820 goto err_dev;
821 }
822
665 pr_info("null: module loaded\n"); 823 pr_info("null: module loaded\n");
666 return 0; 824 return 0;
825
826err_dev:
827 while (!list_empty(&nullb_list)) {
828 nullb = list_entry(nullb_list.next, struct nullb, list);
829 null_del_dev(nullb);
830 }
831 kmem_cache_destroy(ppa_cache);
832err_ppa:
833 unregister_blkdev(null_major, "nullb");
834 return ret;
667} 835}
668 836
669static void __exit null_exit(void) 837static void __exit null_exit(void)
@@ -678,6 +846,8 @@ static void __exit null_exit(void)
678 null_del_dev(nullb); 846 null_del_dev(nullb);
679 } 847 }
680 mutex_unlock(&lock); 848 mutex_unlock(&lock);
849
850 kmem_cache_destroy(ppa_cache);
681} 851}
682 852
683module_init(null_init); 853module_init(null_init);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 235708c7c46e..81ea69fee7ca 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3442,6 +3442,7 @@ static void rbd_queue_workfn(struct work_struct *work)
3442 goto err_rq; 3442 goto err_rq;
3443 } 3443 }
3444 img_request->rq = rq; 3444 img_request->rq = rq;
3445 snapc = NULL; /* img_request consumes a ref */
3445 3446
3446 if (op_type == OBJ_OP_DISCARD) 3447 if (op_type == OBJ_OP_DISCARD)
3447 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA, 3448 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index f9099940c272..41fb1a917b17 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -950,6 +950,8 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
950 goto unmap; 950 goto unmap;
951 951
952 for (n = 0, i = 0; n < nseg; n++) { 952 for (n = 0, i = 0; n < nseg; n++) {
953 uint8_t first_sect, last_sect;
954
953 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { 955 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
954 /* Map indirect segments */ 956 /* Map indirect segments */
955 if (segments) 957 if (segments)
@@ -957,15 +959,18 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
957 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); 959 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
958 } 960 }
959 i = n % SEGS_PER_INDIRECT_FRAME; 961 i = n % SEGS_PER_INDIRECT_FRAME;
962
960 pending_req->segments[n]->gref = segments[i].gref; 963 pending_req->segments[n]->gref = segments[i].gref;
961 seg[n].nsec = segments[i].last_sect - 964
962 segments[i].first_sect + 1; 965 first_sect = READ_ONCE(segments[i].first_sect);
963 seg[n].offset = (segments[i].first_sect << 9); 966 last_sect = READ_ONCE(segments[i].last_sect);
964 if ((segments[i].last_sect >= (XEN_PAGE_SIZE >> 9)) || 967 if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
965 (segments[i].last_sect < segments[i].first_sect)) {
966 rc = -EINVAL; 968 rc = -EINVAL;
967 goto unmap; 969 goto unmap;
968 } 970 }
971
972 seg[n].nsec = last_sect - first_sect + 1;
973 seg[n].offset = first_sect << 9;
969 preq->nr_sects += seg[n].nsec; 974 preq->nr_sects += seg[n].nsec;
970 } 975 }
971 976
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 68e87a037b99..c929ae22764c 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -408,8 +408,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
408 struct blkif_x86_32_request *src) 408 struct blkif_x86_32_request *src)
409{ 409{
410 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; 410 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
411 dst->operation = src->operation; 411 dst->operation = READ_ONCE(src->operation);
412 switch (src->operation) { 412 switch (dst->operation) {
413 case BLKIF_OP_READ: 413 case BLKIF_OP_READ:
414 case BLKIF_OP_WRITE: 414 case BLKIF_OP_WRITE:
415 case BLKIF_OP_WRITE_BARRIER: 415 case BLKIF_OP_WRITE_BARRIER:
@@ -456,8 +456,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
456 struct blkif_x86_64_request *src) 456 struct blkif_x86_64_request *src)
457{ 457{
458 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; 458 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
459 dst->operation = src->operation; 459 dst->operation = READ_ONCE(src->operation);
460 switch (src->operation) { 460 switch (dst->operation) {
461 case BLKIF_OP_READ: 461 case BLKIF_OP_READ:
462 case BLKIF_OP_WRITE: 462 case BLKIF_OP_WRITE:
463 case BLKIF_OP_WRITE_BARRIER: 463 case BLKIF_OP_WRITE_BARRIER:
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
index 9f1856948758..bf500e0e7362 100644
--- a/drivers/bus/omap-ocp2scp.c
+++ b/drivers/bus/omap-ocp2scp.c
@@ -117,7 +117,7 @@ static struct platform_driver omap_ocp2scp_driver = {
117 117
118module_platform_driver(omap_ocp2scp_driver); 118module_platform_driver(omap_ocp2scp_driver);
119 119
120MODULE_ALIAS("platform: omap-ocp2scp"); 120MODULE_ALIAS("platform:omap-ocp2scp");
121MODULE_AUTHOR("Texas Instruments Inc."); 121MODULE_AUTHOR("Texas Instruments Inc.");
122MODULE_DESCRIPTION("OMAP OCP2SCP driver"); 122MODULE_DESCRIPTION("OMAP OCP2SCP driver");
123MODULE_LICENSE("GPL v2"); 123MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 846bc29c157d..25996e256110 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -342,13 +342,13 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
342 342
343 ret = _sunxi_rsb_run_xfer(rsb); 343 ret = _sunxi_rsb_run_xfer(rsb);
344 if (ret) 344 if (ret)
345 goto out; 345 goto unlock;
346 346
347 *buf = readl(rsb->regs + RSB_DATA); 347 *buf = readl(rsb->regs + RSB_DATA);
348 348
349unlock:
349 mutex_unlock(&rsb->lock); 350 mutex_unlock(&rsb->lock);
350 351
351out:
352 return ret; 352 return ret;
353} 353}
354 354
@@ -527,9 +527,9 @@ static int sunxi_rsb_init_device_mode(struct sunxi_rsb *rsb)
527 */ 527 */
528 528
529static const struct sunxi_rsb_addr_map sunxi_rsb_addr_maps[] = { 529static const struct sunxi_rsb_addr_map sunxi_rsb_addr_maps[] = {
530 { 0x3e3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */ 530 { 0x3a3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */
531 { 0x745, 0x3a }, /* Secondary PMIC: AXP806, ... */ 531 { 0x745, 0x3a }, /* Secondary PMIC: AXP806, ... */
532 { 0xe89, 0x45 }, /* Peripheral IC: AC100, ... */ 532 { 0xe89, 0x4e }, /* Peripheral IC: AC100, ... */
533}; 533};
534 534
535static u8 sunxi_rsb_get_rtaddr(u16 hwaddr) 535static u8 sunxi_rsb_get_rtaddr(u16 hwaddr)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 55fe9020459f..4cc72fa017c7 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1230,14 +1230,14 @@ static int smi_start_processing(void *send_info,
1230 1230
1231 new_smi->intf = intf; 1231 new_smi->intf = intf;
1232 1232
1233 /* Try to claim any interrupts. */
1234 if (new_smi->irq_setup)
1235 new_smi->irq_setup(new_smi);
1236
1237 /* Set up the timer that drives the interface. */ 1233 /* Set up the timer that drives the interface. */
1238 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi); 1234 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1239 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); 1235 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
1240 1236
1237 /* Try to claim any interrupts. */
1238 if (new_smi->irq_setup)
1239 new_smi->irq_setup(new_smi);
1240
1241 /* 1241 /*
1242 * Check if the user forcefully enabled the daemon. 1242 * Check if the user forcefully enabled the daemon.
1243 */ 1243 */
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 10819e248414..335322dc403f 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -209,6 +209,8 @@ EXPORT_SYMBOL_GPL(clk_register_gpio_mux);
209 209
210struct clk_gpio_delayed_register_data { 210struct clk_gpio_delayed_register_data {
211 const char *gpio_name; 211 const char *gpio_name;
212 int num_parents;
213 const char **parent_names;
212 struct device_node *node; 214 struct device_node *node;
213 struct mutex lock; 215 struct mutex lock;
214 struct clk *clk; 216 struct clk *clk;
@@ -222,8 +224,6 @@ static struct clk *of_clk_gpio_delayed_register_get(
222{ 224{
223 struct clk_gpio_delayed_register_data *data = _data; 225 struct clk_gpio_delayed_register_data *data = _data;
224 struct clk *clk; 226 struct clk *clk;
225 const char **parent_names;
226 int i, num_parents;
227 int gpio; 227 int gpio;
228 enum of_gpio_flags of_flags; 228 enum of_gpio_flags of_flags;
229 229
@@ -248,26 +248,14 @@ static struct clk *of_clk_gpio_delayed_register_get(
248 return ERR_PTR(gpio); 248 return ERR_PTR(gpio);
249 } 249 }
250 250
251 num_parents = of_clk_get_parent_count(data->node); 251 clk = data->clk_register_get(data->node->name, data->parent_names,
252 252 data->num_parents, gpio, of_flags & OF_GPIO_ACTIVE_LOW);
253 parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
254 if (!parent_names) {
255 clk = ERR_PTR(-ENOMEM);
256 goto out;
257 }
258
259 for (i = 0; i < num_parents; i++)
260 parent_names[i] = of_clk_get_parent_name(data->node, i);
261
262 clk = data->clk_register_get(data->node->name, parent_names,
263 num_parents, gpio, of_flags & OF_GPIO_ACTIVE_LOW);
264 if (IS_ERR(clk)) 253 if (IS_ERR(clk))
265 goto out; 254 goto out;
266 255
267 data->clk = clk; 256 data->clk = clk;
268out: 257out:
269 mutex_unlock(&data->lock); 258 mutex_unlock(&data->lock);
270 kfree(parent_names);
271 259
272 return clk; 260 return clk;
273} 261}
@@ -296,11 +284,24 @@ static void __init of_gpio_clk_setup(struct device_node *node,
296 unsigned gpio, bool active_low)) 284 unsigned gpio, bool active_low))
297{ 285{
298 struct clk_gpio_delayed_register_data *data; 286 struct clk_gpio_delayed_register_data *data;
287 const char **parent_names;
288 int i, num_parents;
299 289
300 data = kzalloc(sizeof(*data), GFP_KERNEL); 290 data = kzalloc(sizeof(*data), GFP_KERNEL);
301 if (!data) 291 if (!data)
302 return; 292 return;
303 293
294 num_parents = of_clk_get_parent_count(node);
295
296 parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
297 if (!parent_names)
298 return;
299
300 for (i = 0; i < num_parents; i++)
301 parent_names[i] = of_clk_get_parent_name(node, i);
302
303 data->num_parents = num_parents;
304 data->parent_names = parent_names;
304 data->node = node; 305 data->node = node;
305 data->gpio_name = gpio_name; 306 data->gpio_name = gpio_name;
306 data->clk_register_get = clk_register_get; 307 data->clk_register_get = clk_register_get;
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 1ab0fb81c6a0..7bc1c4527ae4 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -778,8 +778,10 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
778 */ 778 */
779 clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT; 779 clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
780 div = get_pll_div(cg, hwc, clksel); 780 div = get_pll_div(cg, hwc, clksel);
781 if (!div) 781 if (!div) {
782 kfree(hwc);
782 return NULL; 783 return NULL;
784 }
783 785
784 pct80_rate = clk_get_rate(div->clk); 786 pct80_rate = clk_get_rate(div->clk);
785 pct80_rate *= 8; 787 pct80_rate *= 8;
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 0b501a9fef92..cd0f2726f5e0 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -292,6 +292,7 @@ static int scpi_clocks_probe(struct platform_device *pdev)
292 ret = scpi_clk_add(dev, child, match); 292 ret = scpi_clk_add(dev, child, match);
293 if (ret) { 293 if (ret) {
294 scpi_clocks_remove(pdev); 294 scpi_clocks_remove(pdev);
295 of_node_put(child);
295 return ret; 296 return ret;
296 } 297 }
297 } 298 }
diff --git a/drivers/clk/imx/clk-pllv1.c b/drivers/clk/imx/clk-pllv1.c
index 8564e4342c7d..82fe3662b5f6 100644
--- a/drivers/clk/imx/clk-pllv1.c
+++ b/drivers/clk/imx/clk-pllv1.c
@@ -52,7 +52,7 @@ static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
52 unsigned long parent_rate) 52 unsigned long parent_rate)
53{ 53{
54 struct clk_pllv1 *pll = to_clk_pllv1(hw); 54 struct clk_pllv1 *pll = to_clk_pllv1(hw);
55 long long ll; 55 unsigned long long ull;
56 int mfn_abs; 56 int mfn_abs;
57 unsigned int mfi, mfn, mfd, pd; 57 unsigned int mfi, mfn, mfd, pd;
58 u32 reg; 58 u32 reg;
@@ -94,16 +94,16 @@ static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
94 rate = parent_rate * 2; 94 rate = parent_rate * 2;
95 rate /= pd + 1; 95 rate /= pd + 1;
96 96
97 ll = (unsigned long long)rate * mfn_abs; 97 ull = (unsigned long long)rate * mfn_abs;
98 98
99 do_div(ll, mfd + 1); 99 do_div(ull, mfd + 1);
100 100
101 if (mfn_is_negative(pll, mfn)) 101 if (mfn_is_negative(pll, mfn))
102 ll = -ll; 102 ull = (rate * mfi) - ull;
103 else
104 ull = (rate * mfi) + ull;
103 105
104 ll = (rate * mfi) + ll; 106 return ull;
105
106 return ll;
107} 107}
108 108
109static struct clk_ops clk_pllv1_ops = { 109static struct clk_ops clk_pllv1_ops = {
diff --git a/drivers/clk/imx/clk-pllv2.c b/drivers/clk/imx/clk-pllv2.c
index b18f875eac6a..4aeda56ce372 100644
--- a/drivers/clk/imx/clk-pllv2.c
+++ b/drivers/clk/imx/clk-pllv2.c
@@ -79,7 +79,7 @@ static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
79{ 79{
80 long mfi, mfn, mfd, pdf, ref_clk; 80 long mfi, mfn, mfd, pdf, ref_clk;
81 unsigned long dbl; 81 unsigned long dbl;
82 s64 temp; 82 u64 temp;
83 83
84 dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN; 84 dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
85 85
@@ -98,8 +98,9 @@ static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
98 temp = (u64) ref_clk * abs(mfn); 98 temp = (u64) ref_clk * abs(mfn);
99 do_div(temp, mfd + 1); 99 do_div(temp, mfd + 1);
100 if (mfn < 0) 100 if (mfn < 0)
101 temp = -temp; 101 temp = (ref_clk * mfi) - temp;
102 temp = (ref_clk * mfi) + temp; 102 else
103 temp = (ref_clk * mfi) + temp;
103 104
104 return temp; 105 return temp;
105} 106}
@@ -126,7 +127,7 @@ static int __clk_pllv2_set_rate(unsigned long rate, unsigned long parent_rate,
126{ 127{
127 u32 reg; 128 u32 reg;
128 long mfi, pdf, mfn, mfd = 999999; 129 long mfi, pdf, mfn, mfd = 999999;
129 s64 temp64; 130 u64 temp64;
130 unsigned long quad_parent_rate; 131 unsigned long quad_parent_rate;
131 132
132 quad_parent_rate = 4 * parent_rate; 133 quad_parent_rate = 4 * parent_rate;
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
index d1b1c95177bb..0a94d9661d91 100644
--- a/drivers/clk/imx/clk-vf610.c
+++ b/drivers/clk/imx/clk-vf610.c
@@ -335,22 +335,22 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
335 clk[VF610_CLK_SAI0_SEL] = imx_clk_mux("sai0_sel", CCM_CSCMR1, 0, 2, sai_sels, 4); 335 clk[VF610_CLK_SAI0_SEL] = imx_clk_mux("sai0_sel", CCM_CSCMR1, 0, 2, sai_sels, 4);
336 clk[VF610_CLK_SAI0_EN] = imx_clk_gate("sai0_en", "sai0_sel", CCM_CSCDR1, 16); 336 clk[VF610_CLK_SAI0_EN] = imx_clk_gate("sai0_en", "sai0_sel", CCM_CSCDR1, 16);
337 clk[VF610_CLK_SAI0_DIV] = imx_clk_divider("sai0_div", "sai0_en", CCM_CSCDR1, 0, 4); 337 clk[VF610_CLK_SAI0_DIV] = imx_clk_divider("sai0_div", "sai0_en", CCM_CSCDR1, 0, 4);
338 clk[VF610_CLK_SAI0] = imx_clk_gate2("sai0", "sai0_div", CCM_CCGR0, CCM_CCGRx_CGn(15)); 338 clk[VF610_CLK_SAI0] = imx_clk_gate2("sai0", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(15));
339 339
340 clk[VF610_CLK_SAI1_SEL] = imx_clk_mux("sai1_sel", CCM_CSCMR1, 2, 2, sai_sels, 4); 340 clk[VF610_CLK_SAI1_SEL] = imx_clk_mux("sai1_sel", CCM_CSCMR1, 2, 2, sai_sels, 4);
341 clk[VF610_CLK_SAI1_EN] = imx_clk_gate("sai1_en", "sai1_sel", CCM_CSCDR1, 17); 341 clk[VF610_CLK_SAI1_EN] = imx_clk_gate("sai1_en", "sai1_sel", CCM_CSCDR1, 17);
342 clk[VF610_CLK_SAI1_DIV] = imx_clk_divider("sai1_div", "sai1_en", CCM_CSCDR1, 4, 4); 342 clk[VF610_CLK_SAI1_DIV] = imx_clk_divider("sai1_div", "sai1_en", CCM_CSCDR1, 4, 4);
343 clk[VF610_CLK_SAI1] = imx_clk_gate2("sai1", "sai1_div", CCM_CCGR1, CCM_CCGRx_CGn(0)); 343 clk[VF610_CLK_SAI1] = imx_clk_gate2("sai1", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(0));
344 344
345 clk[VF610_CLK_SAI2_SEL] = imx_clk_mux("sai2_sel", CCM_CSCMR1, 4, 2, sai_sels, 4); 345 clk[VF610_CLK_SAI2_SEL] = imx_clk_mux("sai2_sel", CCM_CSCMR1, 4, 2, sai_sels, 4);
346 clk[VF610_CLK_SAI2_EN] = imx_clk_gate("sai2_en", "sai2_sel", CCM_CSCDR1, 18); 346 clk[VF610_CLK_SAI2_EN] = imx_clk_gate("sai2_en", "sai2_sel", CCM_CSCDR1, 18);
347 clk[VF610_CLK_SAI2_DIV] = imx_clk_divider("sai2_div", "sai2_en", CCM_CSCDR1, 8, 4); 347 clk[VF610_CLK_SAI2_DIV] = imx_clk_divider("sai2_div", "sai2_en", CCM_CSCDR1, 8, 4);
348 clk[VF610_CLK_SAI2] = imx_clk_gate2("sai2", "sai2_div", CCM_CCGR1, CCM_CCGRx_CGn(1)); 348 clk[VF610_CLK_SAI2] = imx_clk_gate2("sai2", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(1));
349 349
350 clk[VF610_CLK_SAI3_SEL] = imx_clk_mux("sai3_sel", CCM_CSCMR1, 6, 2, sai_sels, 4); 350 clk[VF610_CLK_SAI3_SEL] = imx_clk_mux("sai3_sel", CCM_CSCMR1, 6, 2, sai_sels, 4);
351 clk[VF610_CLK_SAI3_EN] = imx_clk_gate("sai3_en", "sai3_sel", CCM_CSCDR1, 19); 351 clk[VF610_CLK_SAI3_EN] = imx_clk_gate("sai3_en", "sai3_sel", CCM_CSCDR1, 19);
352 clk[VF610_CLK_SAI3_DIV] = imx_clk_divider("sai3_div", "sai3_en", CCM_CSCDR1, 12, 4); 352 clk[VF610_CLK_SAI3_DIV] = imx_clk_divider("sai3_div", "sai3_en", CCM_CSCDR1, 12, 4);
353 clk[VF610_CLK_SAI3] = imx_clk_gate2("sai3", "sai3_div", CCM_CCGR1, CCM_CCGRx_CGn(2)); 353 clk[VF610_CLK_SAI3] = imx_clk_gate2("sai3", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(2));
354 354
355 clk[VF610_CLK_NFC_SEL] = imx_clk_mux("nfc_sel", CCM_CSCMR1, 12, 2, nfc_sels, 4); 355 clk[VF610_CLK_NFC_SEL] = imx_clk_mux("nfc_sel", CCM_CSCMR1, 12, 2, nfc_sels, 4);
356 clk[VF610_CLK_NFC_EN] = imx_clk_gate("nfc_en", "nfc_sel", CCM_CSCDR2, 9); 356 clk[VF610_CLK_NFC_EN] = imx_clk_gate("nfc_en", "nfc_sel", CCM_CSCDR2, 9);
diff --git a/drivers/clk/mmp/clk-mmp2.c b/drivers/clk/mmp/clk-mmp2.c
index 09d2832fbd78..71fd29348f28 100644
--- a/drivers/clk/mmp/clk-mmp2.c
+++ b/drivers/clk/mmp/clk-mmp2.c
@@ -9,6 +9,7 @@
9 * warranty of any kind, whether express or implied. 9 * warranty of any kind, whether express or implied.
10 */ 10 */
11 11
12#include <linux/clk.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/spinlock.h> 15#include <linux/spinlock.h>
diff --git a/drivers/clk/mmp/clk-pxa168.c b/drivers/clk/mmp/clk-pxa168.c
index 93e967c0f972..75244915df05 100644
--- a/drivers/clk/mmp/clk-pxa168.c
+++ b/drivers/clk/mmp/clk-pxa168.c
@@ -9,6 +9,7 @@
9 * warranty of any kind, whether express or implied. 9 * warranty of any kind, whether express or implied.
10 */ 10 */
11 11
12#include <linux/clk.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/spinlock.h> 15#include <linux/spinlock.h>
diff --git a/drivers/clk/mmp/clk-pxa910.c b/drivers/clk/mmp/clk-pxa910.c
index 993abcdb32cc..37ba04ba1368 100644
--- a/drivers/clk/mmp/clk-pxa910.c
+++ b/drivers/clk/mmp/clk-pxa910.c
@@ -9,6 +9,7 @@
9 * warranty of any kind, whether express or implied. 9 * warranty of any kind, whether express or implied.
10 */ 10 */
11 11
12#include <linux/clk.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/spinlock.h> 15#include <linux/spinlock.h>
diff --git a/drivers/clk/sunxi/clk-a10-pll2.c b/drivers/clk/sunxi/clk-a10-pll2.c
index 5484c31ec568..0ee1f363e4be 100644
--- a/drivers/clk/sunxi/clk-a10-pll2.c
+++ b/drivers/clk/sunxi/clk-a10-pll2.c
@@ -41,15 +41,10 @@
41 41
42#define SUN4I_PLL2_OUTPUTS 4 42#define SUN4I_PLL2_OUTPUTS 4
43 43
44struct sun4i_pll2_data {
45 u32 post_div_offset;
46 u32 pre_div_flags;
47};
48
49static DEFINE_SPINLOCK(sun4i_a10_pll2_lock); 44static DEFINE_SPINLOCK(sun4i_a10_pll2_lock);
50 45
51static void __init sun4i_pll2_setup(struct device_node *node, 46static void __init sun4i_pll2_setup(struct device_node *node,
52 struct sun4i_pll2_data *data) 47 int post_div_offset)
53{ 48{
54 const char *clk_name = node->name, *parent; 49 const char *clk_name = node->name, *parent;
55 struct clk **clks, *base_clk, *prediv_clk; 50 struct clk **clks, *base_clk, *prediv_clk;
@@ -76,7 +71,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
76 parent, 0, reg, 71 parent, 0, reg,
77 SUN4I_PLL2_PRE_DIV_SHIFT, 72 SUN4I_PLL2_PRE_DIV_SHIFT,
78 SUN4I_PLL2_PRE_DIV_WIDTH, 73 SUN4I_PLL2_PRE_DIV_WIDTH,
79 data->pre_div_flags, 74 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
80 &sun4i_a10_pll2_lock); 75 &sun4i_a10_pll2_lock);
81 if (!prediv_clk) { 76 if (!prediv_clk) {
82 pr_err("Couldn't register the prediv clock\n"); 77 pr_err("Couldn't register the prediv clock\n");
@@ -127,7 +122,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
127 */ 122 */
128 val = readl(reg); 123 val = readl(reg);
129 val &= ~(SUN4I_PLL2_POST_DIV_MASK << SUN4I_PLL2_POST_DIV_SHIFT); 124 val &= ~(SUN4I_PLL2_POST_DIV_MASK << SUN4I_PLL2_POST_DIV_SHIFT);
130 val |= (SUN4I_PLL2_POST_DIV_VALUE - data->post_div_offset) << SUN4I_PLL2_POST_DIV_SHIFT; 125 val |= (SUN4I_PLL2_POST_DIV_VALUE - post_div_offset) << SUN4I_PLL2_POST_DIV_SHIFT;
131 writel(val, reg); 126 writel(val, reg);
132 127
133 of_property_read_string_index(node, "clock-output-names", 128 of_property_read_string_index(node, "clock-output-names",
@@ -191,25 +186,17 @@ err_unmap:
191 iounmap(reg); 186 iounmap(reg);
192} 187}
193 188
194static struct sun4i_pll2_data sun4i_a10_pll2_data = {
195 .pre_div_flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
196};
197
198static void __init sun4i_a10_pll2_setup(struct device_node *node) 189static void __init sun4i_a10_pll2_setup(struct device_node *node)
199{ 190{
200 sun4i_pll2_setup(node, &sun4i_a10_pll2_data); 191 sun4i_pll2_setup(node, 0);
201} 192}
202 193
203CLK_OF_DECLARE(sun4i_a10_pll2, "allwinner,sun4i-a10-pll2-clk", 194CLK_OF_DECLARE(sun4i_a10_pll2, "allwinner,sun4i-a10-pll2-clk",
204 sun4i_a10_pll2_setup); 195 sun4i_a10_pll2_setup);
205 196
206static struct sun4i_pll2_data sun5i_a13_pll2_data = {
207 .post_div_offset = 1,
208};
209
210static void __init sun5i_a13_pll2_setup(struct device_node *node) 197static void __init sun5i_a13_pll2_setup(struct device_node *node)
211{ 198{
212 sun4i_pll2_setup(node, &sun5i_a13_pll2_data); 199 sun4i_pll2_setup(node, 1);
213} 200}
214 201
215CLK_OF_DECLARE(sun5i_a13_pll2, "allwinner,sun5i-a13-pll2-clk", 202CLK_OF_DECLARE(sun5i_a13_pll2, "allwinner,sun5i-a13-pll2-clk",
diff --git a/drivers/clk/ti/clk-816x.c b/drivers/clk/ti/clk-816x.c
index 1dfad0c712cd..2a5d84fdddc5 100644
--- a/drivers/clk/ti/clk-816x.c
+++ b/drivers/clk/ti/clk-816x.c
@@ -20,6 +20,8 @@ static struct ti_dt_clk dm816x_clks[] = {
20 DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"), 20 DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"),
21 DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"), 21 DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
22 DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"), 22 DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
23 DT_CLK(NULL, "timer_32k_ck", "sysclk18_ck"),
24 DT_CLK(NULL, "timer_ext_ck", "tclkin_ck"),
23 DT_CLK(NULL, "mpu_ck", "mpu_ck"), 25 DT_CLK(NULL, "mpu_ck", "mpu_ck"),
24 DT_CLK(NULL, "timer1_fck", "timer1_fck"), 26 DT_CLK(NULL, "timer1_fck", "timer1_fck"),
25 DT_CLK(NULL, "timer2_fck", "timer2_fck"), 27 DT_CLK(NULL, "timer2_fck", "timer2_fck"),
diff --git a/drivers/clk/ti/clkt_dpll.c b/drivers/clk/ti/clkt_dpll.c
index 9023ca9caf84..b5cc6f66ae5d 100644
--- a/drivers/clk/ti/clkt_dpll.c
+++ b/drivers/clk/ti/clkt_dpll.c
@@ -240,7 +240,7 @@ u8 omap2_init_dpll_parent(struct clk_hw *hw)
240 */ 240 */
241unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk) 241unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
242{ 242{
243 long long dpll_clk; 243 u64 dpll_clk;
244 u32 dpll_mult, dpll_div, v; 244 u32 dpll_mult, dpll_div, v;
245 struct dpll_data *dd; 245 struct dpll_data *dd;
246 246
@@ -262,7 +262,7 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
262 dpll_div = v & dd->div1_mask; 262 dpll_div = v & dd->div1_mask;
263 dpll_div >>= __ffs(dd->div1_mask); 263 dpll_div >>= __ffs(dd->div1_mask);
264 264
265 dpll_clk = (long long)clk_get_rate(dd->clk_ref) * dpll_mult; 265 dpll_clk = (u64)clk_get_rate(dd->clk_ref) * dpll_mult;
266 do_div(dpll_clk, dpll_div + 1); 266 do_div(dpll_clk, dpll_div + 1);
267 267
268 return dpll_clk; 268 return dpll_clk;
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 5b1726829e6d..df2558350fc1 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -214,7 +214,6 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
214{ 214{
215 struct clk_divider *divider; 215 struct clk_divider *divider;
216 unsigned int div, value; 216 unsigned int div, value;
217 unsigned long flags = 0;
218 u32 val; 217 u32 val;
219 218
220 if (!hw || !rate) 219 if (!hw || !rate)
@@ -228,9 +227,6 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
228 if (value > div_mask(divider)) 227 if (value > div_mask(divider))
229 value = div_mask(divider); 228 value = div_mask(divider);
230 229
231 if (divider->lock)
232 spin_lock_irqsave(divider->lock, flags);
233
234 if (divider->flags & CLK_DIVIDER_HIWORD_MASK) { 230 if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
235 val = div_mask(divider) << (divider->shift + 16); 231 val = div_mask(divider) << (divider->shift + 16);
236 } else { 232 } else {
@@ -240,9 +236,6 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
240 val |= value << divider->shift; 236 val |= value << divider->shift;
241 ti_clk_ll_ops->clk_writel(val, divider->reg); 237 ti_clk_ll_ops->clk_writel(val, divider->reg);
242 238
243 if (divider->lock)
244 spin_unlock_irqrestore(divider->lock, flags);
245
246 return 0; 239 return 0;
247} 240}
248 241
@@ -256,8 +249,7 @@ static struct clk *_register_divider(struct device *dev, const char *name,
256 const char *parent_name, 249 const char *parent_name,
257 unsigned long flags, void __iomem *reg, 250 unsigned long flags, void __iomem *reg,
258 u8 shift, u8 width, u8 clk_divider_flags, 251 u8 shift, u8 width, u8 clk_divider_flags,
259 const struct clk_div_table *table, 252 const struct clk_div_table *table)
260 spinlock_t *lock)
261{ 253{
262 struct clk_divider *div; 254 struct clk_divider *div;
263 struct clk *clk; 255 struct clk *clk;
@@ -288,7 +280,6 @@ static struct clk *_register_divider(struct device *dev, const char *name,
288 div->shift = shift; 280 div->shift = shift;
289 div->width = width; 281 div->width = width;
290 div->flags = clk_divider_flags; 282 div->flags = clk_divider_flags;
291 div->lock = lock;
292 div->hw.init = &init; 283 div->hw.init = &init;
293 div->table = table; 284 div->table = table;
294 285
@@ -421,7 +412,7 @@ struct clk *ti_clk_register_divider(struct ti_clk *setup)
421 412
422 clk = _register_divider(NULL, setup->name, div->parent, 413 clk = _register_divider(NULL, setup->name, div->parent,
423 flags, (void __iomem *)reg, div->bit_shift, 414 flags, (void __iomem *)reg, div->bit_shift,
424 width, div_flags, table, NULL); 415 width, div_flags, table);
425 416
426 if (IS_ERR(clk)) 417 if (IS_ERR(clk))
427 kfree(table); 418 kfree(table);
@@ -584,8 +575,7 @@ static void __init of_ti_divider_clk_setup(struct device_node *node)
584 goto cleanup; 575 goto cleanup;
585 576
586 clk = _register_divider(NULL, node->name, parent_name, flags, reg, 577 clk = _register_divider(NULL, node->name, parent_name, flags, reg,
587 shift, width, clk_divider_flags, table, 578 shift, width, clk_divider_flags, table);
588 NULL);
589 579
590 if (!IS_ERR(clk)) { 580 if (!IS_ERR(clk)) {
591 of_clk_add_provider(node, of_clk_src_simple_get, clk); 581 of_clk_add_provider(node, of_clk_src_simple_get, clk);
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index f4b2e9888bdf..66a0d0ed8b55 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -168,7 +168,7 @@ static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw,
168{ 168{
169 struct fapll_data *fd = to_fapll(hw); 169 struct fapll_data *fd = to_fapll(hw);
170 u32 fapll_n, fapll_p, v; 170 u32 fapll_n, fapll_p, v;
171 long long rate; 171 u64 rate;
172 172
173 if (ti_fapll_clock_is_bypass(fd)) 173 if (ti_fapll_clock_is_bypass(fd))
174 return parent_rate; 174 return parent_rate;
@@ -314,7 +314,7 @@ static unsigned long ti_fapll_synth_recalc_rate(struct clk_hw *hw,
314{ 314{
315 struct fapll_synth *synth = to_synth(hw); 315 struct fapll_synth *synth = to_synth(hw);
316 u32 synth_div_m; 316 u32 synth_div_m;
317 long long rate; 317 u64 rate;
318 318
319 /* The audio_pll_clk1 is hardwired to produce 32.768KiHz clock */ 319 /* The audio_pll_clk1 is hardwired to produce 32.768KiHz clock */
320 if (!synth->div) 320 if (!synth->div)
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 69f08a1d047d..dab9ba88b9d6 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -69,7 +69,6 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
69{ 69{
70 struct clk_mux *mux = to_clk_mux(hw); 70 struct clk_mux *mux = to_clk_mux(hw);
71 u32 val; 71 u32 val;
72 unsigned long flags = 0;
73 72
74 if (mux->table) { 73 if (mux->table) {
75 index = mux->table[index]; 74 index = mux->table[index];
@@ -81,9 +80,6 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
81 index++; 80 index++;
82 } 81 }
83 82
84 if (mux->lock)
85 spin_lock_irqsave(mux->lock, flags);
86
87 if (mux->flags & CLK_MUX_HIWORD_MASK) { 83 if (mux->flags & CLK_MUX_HIWORD_MASK) {
88 val = mux->mask << (mux->shift + 16); 84 val = mux->mask << (mux->shift + 16);
89 } else { 85 } else {
@@ -93,9 +89,6 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
93 val |= index << mux->shift; 89 val |= index << mux->shift;
94 ti_clk_ll_ops->clk_writel(val, mux->reg); 90 ti_clk_ll_ops->clk_writel(val, mux->reg);
95 91
96 if (mux->lock)
97 spin_unlock_irqrestore(mux->lock, flags);
98
99 return 0; 92 return 0;
100} 93}
101 94
@@ -109,7 +102,7 @@ static struct clk *_register_mux(struct device *dev, const char *name,
109 const char **parent_names, u8 num_parents, 102 const char **parent_names, u8 num_parents,
110 unsigned long flags, void __iomem *reg, 103 unsigned long flags, void __iomem *reg,
111 u8 shift, u32 mask, u8 clk_mux_flags, 104 u8 shift, u32 mask, u8 clk_mux_flags,
112 u32 *table, spinlock_t *lock) 105 u32 *table)
113{ 106{
114 struct clk_mux *mux; 107 struct clk_mux *mux;
115 struct clk *clk; 108 struct clk *clk;
@@ -133,7 +126,6 @@ static struct clk *_register_mux(struct device *dev, const char *name,
133 mux->shift = shift; 126 mux->shift = shift;
134 mux->mask = mask; 127 mux->mask = mask;
135 mux->flags = clk_mux_flags; 128 mux->flags = clk_mux_flags;
136 mux->lock = lock;
137 mux->table = table; 129 mux->table = table;
138 mux->hw.init = &init; 130 mux->hw.init = &init;
139 131
@@ -175,7 +167,7 @@ struct clk *ti_clk_register_mux(struct ti_clk *setup)
175 167
176 return _register_mux(NULL, setup->name, mux->parents, mux->num_parents, 168 return _register_mux(NULL, setup->name, mux->parents, mux->num_parents,
177 flags, (void __iomem *)reg, mux->bit_shift, mask, 169 flags, (void __iomem *)reg, mux->bit_shift, mask,
178 mux_flags, NULL, NULL); 170 mux_flags, NULL);
179} 171}
180 172
181/** 173/**
@@ -227,8 +219,7 @@ static void of_mux_clk_setup(struct device_node *node)
227 mask = (1 << fls(mask)) - 1; 219 mask = (1 << fls(mask)) - 1;
228 220
229 clk = _register_mux(NULL, node->name, parent_names, num_parents, 221 clk = _register_mux(NULL, node->name, parent_names, num_parents,
230 flags, reg, shift, mask, clk_mux_flags, NULL, 222 flags, reg, shift, mask, clk_mux_flags, NULL);
231 NULL);
232 223
233 if (!IS_ERR(clk)) 224 if (!IS_ERR(clk))
234 of_clk_add_provider(node, of_clk_src_simple_get, clk); 225 of_clk_add_provider(node, of_clk_src_simple_get, clk);
diff --git a/drivers/clocksource/mmio.c b/drivers/clocksource/mmio.c
index 1593ade2a815..c4f7d7a9b689 100644
--- a/drivers/clocksource/mmio.c
+++ b/drivers/clocksource/mmio.c
@@ -55,7 +55,7 @@ int __init clocksource_mmio_init(void __iomem *base, const char *name,
55{ 55{
56 struct clocksource_mmio *cs; 56 struct clocksource_mmio *cs;
57 57
58 if (bits > 32 || bits < 16) 58 if (bits > 64 || bits < 16)
59 return -EINVAL; 59 return -EINVAL;
60 60
61 cs = kzalloc(sizeof(struct clocksource_mmio), GFP_KERNEL); 61 cs = kzalloc(sizeof(struct clocksource_mmio), GFP_KERNEL);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 8014c2307332..b1f8a73e5a94 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -202,7 +202,7 @@ config ARM_SA1110_CPUFREQ
202 202
203config ARM_SCPI_CPUFREQ 203config ARM_SCPI_CPUFREQ
204 tristate "SCPI based CPUfreq driver" 204 tristate "SCPI based CPUfreq driver"
205 depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL 205 depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
206 help 206 help
207 This adds the CPUfreq driver support for ARM big.LITTLE platforms 207 This adds the CPUfreq driver support for ARM big.LITTLE platforms
208 using SCPI protocol for CPU power management. 208 using SCPI protocol for CPU power management.
@@ -226,7 +226,7 @@ config ARM_TEGRA20_CPUFREQ
226 226
227config ARM_TEGRA124_CPUFREQ 227config ARM_TEGRA124_CPUFREQ
228 tristate "Tegra124 CPUFreq support" 228 tristate "Tegra124 CPUFreq support"
229 depends on ARCH_TEGRA && CPUFREQ_DT 229 depends on ARCH_TEGRA && CPUFREQ_DT && REGULATOR
230 default y 230 default y
231 help 231 help
232 This adds the CPUFreq driver support for Tegra124 SOCs. 232 This adds the CPUFreq driver support for Tegra124 SOCs.
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index e8cb334094b0..7c0bdfb1a2ca 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -98,10 +98,11 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
98 policy->max = cpu->perf_caps.highest_perf; 98 policy->max = cpu->perf_caps.highest_perf;
99 policy->cpuinfo.min_freq = policy->min; 99 policy->cpuinfo.min_freq = policy->min;
100 policy->cpuinfo.max_freq = policy->max; 100 policy->cpuinfo.max_freq = policy->max;
101 policy->shared_type = cpu->shared_type;
101 102
102 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 103 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
103 cpumask_copy(policy->cpus, cpu->shared_cpu_map); 104 cpumask_copy(policy->cpus, cpu->shared_cpu_map);
104 else { 105 else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
105 /* Support only SW_ANY for now. */ 106 /* Support only SW_ANY for now. */
106 pr_debug("Unsupported CPU co-ord type\n"); 107 pr_debug("Unsupported CPU co-ord type\n");
107 return -EFAULT; 108 return -EFAULT;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 7c48e7316d91..8412ce5f93a7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -976,10 +976,14 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
976 976
977 new_policy.governor = gov; 977 new_policy.governor = gov;
978 978
979 /* Use the default policy if its valid. */ 979 /* Use the default policy if there is no last_policy. */
980 if (cpufreq_driver->setpolicy) 980 if (cpufreq_driver->setpolicy) {
981 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); 981 if (policy->last_policy)
982 982 new_policy.policy = policy->last_policy;
983 else
984 cpufreq_parse_governor(gov->name, &new_policy.policy,
985 NULL);
986 }
983 /* set default policy */ 987 /* set default policy */
984 return cpufreq_set_policy(policy, &new_policy); 988 return cpufreq_set_policy(policy, &new_policy);
985} 989}
@@ -1330,6 +1334,8 @@ static void cpufreq_offline_prepare(unsigned int cpu)
1330 if (has_target()) 1334 if (has_target())
1331 strncpy(policy->last_governor, policy->governor->name, 1335 strncpy(policy->last_governor, policy->governor->name,
1332 CPUFREQ_NAME_LEN); 1336 CPUFREQ_NAME_LEN);
1337 else
1338 policy->last_policy = policy->policy;
1333 } else if (cpu == policy->cpu) { 1339 } else if (cpu == policy->cpu) {
1334 /* Nominate new CPU */ 1340 /* Nominate new CPU */
1335 policy->cpu = cpumask_any(policy->cpus); 1341 policy->cpu = cpumask_any(policy->cpus);
@@ -1401,13 +1407,10 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1401 } 1407 }
1402 1408
1403 cpumask_clear_cpu(cpu, policy->real_cpus); 1409 cpumask_clear_cpu(cpu, policy->real_cpus);
1410 remove_cpu_dev_symlink(policy, cpu);
1404 1411
1405 if (cpumask_empty(policy->real_cpus)) { 1412 if (cpumask_empty(policy->real_cpus))
1406 cpufreq_policy_free(policy, true); 1413 cpufreq_policy_free(policy, true);
1407 return;
1408 }
1409
1410 remove_cpu_dev_symlink(policy, cpu);
1411} 1414}
1412 1415
1413static void handle_update(struct work_struct *work) 1416static void handle_update(struct work_struct *work)
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 001a532e342e..98fb8821382d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1101,6 +1101,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1101 policy->max >= policy->cpuinfo.max_freq) { 1101 policy->max >= policy->cpuinfo.max_freq) {
1102 pr_debug("intel_pstate: set performance\n"); 1102 pr_debug("intel_pstate: set performance\n");
1103 limits = &performance_limits; 1103 limits = &performance_limits;
1104 if (hwp_active)
1105 intel_pstate_hwp_set();
1104 return 0; 1106 return 0;
1105 } 1107 }
1106 1108
@@ -1108,7 +1110,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1108 limits = &powersave_limits; 1110 limits = &powersave_limits;
1109 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1111 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
1110 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); 1112 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
1111 limits->max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; 1113 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
1114 policy->cpuinfo.max_freq);
1112 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100); 1115 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100);
1113 1116
1114 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 1117 /* Normalize user input to [min_policy_pct, max_policy_pct] */
@@ -1120,6 +1123,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1120 limits->max_sysfs_pct); 1123 limits->max_sysfs_pct);
1121 limits->max_perf_pct = max(limits->min_policy_pct, 1124 limits->max_perf_pct = max(limits->min_policy_pct,
1122 limits->max_perf_pct); 1125 limits->max_perf_pct);
1126 limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
1123 1127
1124 /* Make sure min_perf_pct <= max_perf_pct */ 1128 /* Make sure min_perf_pct <= max_perf_pct */
1125 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 1129 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 733aa5153e74..68ef8fd9482f 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -648,7 +648,7 @@ late_initcall(s3c_cpufreq_initcall);
648 * 648 *
649 * Register the given set of PLLs with the system. 649 * Register the given set of PLLs with the system.
650 */ 650 */
651int __init s3c_plltab_register(struct cpufreq_frequency_table *plls, 651int s3c_plltab_register(struct cpufreq_frequency_table *plls,
652 unsigned int plls_no) 652 unsigned int plls_no)
653{ 653{
654 struct cpufreq_frequency_table *vals; 654 struct cpufreq_frequency_table *vals;
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 2c3b16fd3a01..de5e89b2eaaa 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -31,7 +31,7 @@ static struct scpi_ops *scpi_ops;
31 31
32static struct scpi_dvfs_info *scpi_get_dvfs_info(struct device *cpu_dev) 32static struct scpi_dvfs_info *scpi_get_dvfs_info(struct device *cpu_dev)
33{ 33{
34 u8 domain = topology_physical_package_id(cpu_dev->id); 34 int domain = topology_physical_package_id(cpu_dev->id);
35 35
36 if (domain < 0) 36 if (domain < 0)
37 return ERR_PTR(-EINVAL); 37 return ERR_PTR(-EINVAL);
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 73ef49922788..7038f364acb5 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -409,7 +409,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
409 processed += to_process; 409 processed += to_process;
410 } while (processed < nbytes); 410 } while (processed < nbytes);
411 411
412 rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, 412 rc = crypto_memneq(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
413 authsize) ? -EBADMSG : 0; 413 authsize) ? -EBADMSG : 0;
414out: 414out:
415 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 415 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index eee624f589b6..abd465f479c4 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -21,6 +21,7 @@
21 21
22#include <crypto/internal/aead.h> 22#include <crypto/internal/aead.h>
23#include <crypto/aes.h> 23#include <crypto/aes.h>
24#include <crypto/algapi.h>
24#include <crypto/scatterwalk.h> 25#include <crypto/scatterwalk.h>
25#include <linux/module.h> 26#include <linux/module.h>
26#include <linux/types.h> 27#include <linux/types.h>
@@ -418,7 +419,7 @@ mac:
418 itag, req->src, req->assoclen + nbytes, 419 itag, req->src, req->assoclen + nbytes,
419 crypto_aead_authsize(crypto_aead_reqtfm(req)), 420 crypto_aead_authsize(crypto_aead_reqtfm(req)),
420 SCATTERWALK_FROM_SG); 421 SCATTERWALK_FROM_SG);
421 rc = memcmp(itag, otag, 422 rc = crypto_memneq(itag, otag,
422 crypto_aead_authsize(crypto_aead_reqtfm(req))) ? 423 crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
423 -EBADMSG : 0; 424 -EBADMSG : 0;
424 } 425 }
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 46f531e19ccf..b6f9f42e2985 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -977,7 +977,7 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
977 } else 977 } else
978 oicv = (char *)&edesc->link_tbl[0]; 978 oicv = (char *)&edesc->link_tbl[0];
979 979
980 err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0; 980 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
981 } 981 }
982 982
983 kfree(edesc); 983 kfree(edesc);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 7f039de143f0..370c661c7d7b 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -156,7 +156,7 @@
156#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */ 156#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
157#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23) 157#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
158#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23) 158#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
159#define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */ 159#define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */
160#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */ 160#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
161#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */ 161#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
162#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */ 162#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
@@ -965,7 +965,9 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
965 NULL, 965 NULL,
966 src_addr, dst_addr, 966 src_addr, dst_addr,
967 xt, xt->sgl); 967 xt, xt->sgl);
968 for (i = 0; i < xt->numf; i++) 968
969 /* Length of the block is (BLEN+1) microblocks. */
970 for (i = 0; i < xt->numf - 1; i++)
969 at_xdmac_increment_block_count(chan, first); 971 at_xdmac_increment_block_count(chan, first);
970 972
971 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", 973 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
@@ -1086,6 +1088,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1086 /* Check remaining length and change data width if needed. */ 1088 /* Check remaining length and change data width if needed. */
1087 dwidth = at_xdmac_align_width(chan, 1089 dwidth = at_xdmac_align_width(chan,
1088 src_addr | dst_addr | xfer_size); 1090 src_addr | dst_addr | xfer_size);
1091 chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
1089 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth); 1092 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1090 1093
1091 ublen = xfer_size >> dwidth; 1094 ublen = xfer_size >> dwidth;
@@ -1333,7 +1336,7 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
1333 * since we don't care about the stride anymore. 1336 * since we don't care about the stride anymore.
1334 */ 1337 */
1335 if ((i == (sg_len - 1)) && 1338 if ((i == (sg_len - 1)) &&
1336 sg_dma_len(ppsg) == sg_dma_len(psg)) { 1339 sg_dma_len(psg) == sg_dma_len(sg)) {
1337 dev_dbg(chan2dev(chan), 1340 dev_dbg(chan2dev(chan),
1338 "%s: desc 0x%p can be merged with desc 0x%p\n", 1341 "%s: desc 0x%p can be merged with desc 0x%p\n",
1339 __func__, desc, pdesc); 1342 __func__, desc, pdesc);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index c92d6a70ccf3..996c4b00d323 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -31,6 +31,7 @@
31 */ 31 */
32#include <linux/dmaengine.h> 32#include <linux/dmaengine.h>
33#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
34#include <linux/dmapool.h>
34#include <linux/err.h> 35#include <linux/err.h>
35#include <linux/init.h> 36#include <linux/init.h>
36#include <linux/interrupt.h> 37#include <linux/interrupt.h>
@@ -62,6 +63,11 @@ struct bcm2835_dma_cb {
62 uint32_t pad[2]; 63 uint32_t pad[2];
63}; 64};
64 65
66struct bcm2835_cb_entry {
67 struct bcm2835_dma_cb *cb;
68 dma_addr_t paddr;
69};
70
65struct bcm2835_chan { 71struct bcm2835_chan {
66 struct virt_dma_chan vc; 72 struct virt_dma_chan vc;
67 struct list_head node; 73 struct list_head node;
@@ -72,18 +78,18 @@ struct bcm2835_chan {
72 78
73 int ch; 79 int ch;
74 struct bcm2835_desc *desc; 80 struct bcm2835_desc *desc;
81 struct dma_pool *cb_pool;
75 82
76 void __iomem *chan_base; 83 void __iomem *chan_base;
77 int irq_number; 84 int irq_number;
78}; 85};
79 86
80struct bcm2835_desc { 87struct bcm2835_desc {
88 struct bcm2835_chan *c;
81 struct virt_dma_desc vd; 89 struct virt_dma_desc vd;
82 enum dma_transfer_direction dir; 90 enum dma_transfer_direction dir;
83 91
84 unsigned int control_block_size; 92 struct bcm2835_cb_entry *cb_list;
85 struct bcm2835_dma_cb *control_block_base;
86 dma_addr_t control_block_base_phys;
87 93
88 unsigned int frames; 94 unsigned int frames;
89 size_t size; 95 size_t size;
@@ -143,10 +149,13 @@ static inline struct bcm2835_desc *to_bcm2835_dma_desc(
143static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) 149static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
144{ 150{
145 struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd); 151 struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
146 dma_free_coherent(desc->vd.tx.chan->device->dev, 152 int i;
147 desc->control_block_size, 153
148 desc->control_block_base, 154 for (i = 0; i < desc->frames; i++)
149 desc->control_block_base_phys); 155 dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb,
156 desc->cb_list[i].paddr);
157
158 kfree(desc->cb_list);
150 kfree(desc); 159 kfree(desc);
151} 160}
152 161
@@ -199,7 +208,7 @@ static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
199 208
200 c->desc = d = to_bcm2835_dma_desc(&vd->tx); 209 c->desc = d = to_bcm2835_dma_desc(&vd->tx);
201 210
202 writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR); 211 writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR);
203 writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); 212 writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
204} 213}
205 214
@@ -232,9 +241,16 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
232static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) 241static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
233{ 242{
234 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 243 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
244 struct device *dev = c->vc.chan.device->dev;
245
246 dev_dbg(dev, "Allocating DMA channel %d\n", c->ch);
235 247
236 dev_dbg(c->vc.chan.device->dev, 248 c->cb_pool = dma_pool_create(dev_name(dev), dev,
237 "Allocating DMA channel %d\n", c->ch); 249 sizeof(struct bcm2835_dma_cb), 0, 0);
250 if (!c->cb_pool) {
251 dev_err(dev, "unable to allocate descriptor pool\n");
252 return -ENOMEM;
253 }
238 254
239 return request_irq(c->irq_number, 255 return request_irq(c->irq_number,
240 bcm2835_dma_callback, 0, "DMA IRQ", c); 256 bcm2835_dma_callback, 0, "DMA IRQ", c);
@@ -246,6 +262,7 @@ static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
246 262
247 vchan_free_chan_resources(&c->vc); 263 vchan_free_chan_resources(&c->vc);
248 free_irq(c->irq_number, c); 264 free_irq(c->irq_number, c);
265 dma_pool_destroy(c->cb_pool);
249 266
250 dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); 267 dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
251} 268}
@@ -261,8 +278,7 @@ static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
261 size_t size; 278 size_t size;
262 279
263 for (size = i = 0; i < d->frames; i++) { 280 for (size = i = 0; i < d->frames; i++) {
264 struct bcm2835_dma_cb *control_block = 281 struct bcm2835_dma_cb *control_block = d->cb_list[i].cb;
265 &d->control_block_base[i];
266 size_t this_size = control_block->length; 282 size_t this_size = control_block->length;
267 dma_addr_t dma; 283 dma_addr_t dma;
268 284
@@ -343,6 +359,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
343 dma_addr_t dev_addr; 359 dma_addr_t dev_addr;
344 unsigned int es, sync_type; 360 unsigned int es, sync_type;
345 unsigned int frame; 361 unsigned int frame;
362 int i;
346 363
347 /* Grab configuration */ 364 /* Grab configuration */
348 if (!is_slave_direction(direction)) { 365 if (!is_slave_direction(direction)) {
@@ -374,27 +391,31 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
374 if (!d) 391 if (!d)
375 return NULL; 392 return NULL;
376 393
394 d->c = c;
377 d->dir = direction; 395 d->dir = direction;
378 d->frames = buf_len / period_len; 396 d->frames = buf_len / period_len;
379 397
380 /* Allocate memory for control blocks */ 398 d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL);
381 d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb); 399 if (!d->cb_list) {
382 d->control_block_base = dma_zalloc_coherent(chan->device->dev,
383 d->control_block_size, &d->control_block_base_phys,
384 GFP_NOWAIT);
385
386 if (!d->control_block_base) {
387 kfree(d); 400 kfree(d);
388 return NULL; 401 return NULL;
389 } 402 }
403 /* Allocate memory for control blocks */
404 for (i = 0; i < d->frames; i++) {
405 struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
406
407 cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC,
408 &cb_entry->paddr);
409 if (!cb_entry->cb)
410 goto error_cb;
411 }
390 412
391 /* 413 /*
392 * Iterate over all frames, create a control block 414 * Iterate over all frames, create a control block
393 * for each frame and link them together. 415 * for each frame and link them together.
394 */ 416 */
395 for (frame = 0; frame < d->frames; frame++) { 417 for (frame = 0; frame < d->frames; frame++) {
396 struct bcm2835_dma_cb *control_block = 418 struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb;
397 &d->control_block_base[frame];
398 419
399 /* Setup adresses */ 420 /* Setup adresses */
400 if (d->dir == DMA_DEV_TO_MEM) { 421 if (d->dir == DMA_DEV_TO_MEM) {
@@ -428,12 +449,21 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
428 * This DMA engine driver currently only supports cyclic DMA. 449 * This DMA engine driver currently only supports cyclic DMA.
429 * Therefore, wrap around at number of frames. 450 * Therefore, wrap around at number of frames.
430 */ 451 */
431 control_block->next = d->control_block_base_phys + 452 control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr;
432 sizeof(struct bcm2835_dma_cb)
433 * ((frame + 1) % d->frames);
434 } 453 }
435 454
436 return vchan_tx_prep(&c->vc, &d->vd, flags); 455 return vchan_tx_prep(&c->vc, &d->vd, flags);
456error_cb:
457 i--;
458 for (; i >= 0; i--) {
459 struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
460
461 dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr);
462 }
463
464 kfree(d->cb_list);
465 kfree(d);
466 return NULL;
437} 467}
438 468
439static int bcm2835_dma_slave_config(struct dma_chan *chan, 469static int bcm2835_dma_slave_config(struct dma_chan *chan,
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 0675e268d577..16fe773fb846 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1752,16 +1752,14 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
1752 return ret; 1752 return ret;
1753} 1753}
1754 1754
1755static bool edma_is_memcpy_channel(int ch_num, u16 *memcpy_channels) 1755static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels)
1756{ 1756{
1757 s16 *memcpy_ch = memcpy_channels;
1758
1759 if (!memcpy_channels) 1757 if (!memcpy_channels)
1760 return false; 1758 return false;
1761 while (*memcpy_ch != -1) { 1759 while (*memcpy_channels != -1) {
1762 if (*memcpy_ch == ch_num) 1760 if (*memcpy_channels == ch_num)
1763 return true; 1761 return true;
1764 memcpy_ch++; 1762 memcpy_channels++;
1765 } 1763 }
1766 return false; 1764 return false;
1767} 1765}
@@ -1775,7 +1773,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
1775{ 1773{
1776 struct dma_device *s_ddev = &ecc->dma_slave; 1774 struct dma_device *s_ddev = &ecc->dma_slave;
1777 struct dma_device *m_ddev = NULL; 1775 struct dma_device *m_ddev = NULL;
1778 s16 *memcpy_channels = ecc->info->memcpy_channels; 1776 s32 *memcpy_channels = ecc->info->memcpy_channels;
1779 int i, j; 1777 int i, j;
1780 1778
1781 dma_cap_zero(s_ddev->cap_mask); 1779 dma_cap_zero(s_ddev->cap_mask);
@@ -1996,16 +1994,16 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
1996 prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz); 1994 prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
1997 if (prop) { 1995 if (prop) {
1998 const char pname[] = "ti,edma-memcpy-channels"; 1996 const char pname[] = "ti,edma-memcpy-channels";
1999 size_t nelm = sz / sizeof(s16); 1997 size_t nelm = sz / sizeof(s32);
2000 s16 *memcpy_ch; 1998 s32 *memcpy_ch;
2001 1999
2002 memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s16), 2000 memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32),
2003 GFP_KERNEL); 2001 GFP_KERNEL);
2004 if (!memcpy_ch) 2002 if (!memcpy_ch)
2005 return ERR_PTR(-ENOMEM); 2003 return ERR_PTR(-ENOMEM);
2006 2004
2007 ret = of_property_read_u16_array(dev->of_node, pname, 2005 ret = of_property_read_u32_array(dev->of_node, pname,
2008 (u16 *)memcpy_ch, nelm); 2006 (u32 *)memcpy_ch, nelm);
2009 if (ret) 2007 if (ret)
2010 return ERR_PTR(ret); 2008 return ERR_PTR(ret);
2011 2009
@@ -2017,31 +2015,50 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
2017 &sz); 2015 &sz);
2018 if (prop) { 2016 if (prop) {
2019 const char pname[] = "ti,edma-reserved-slot-ranges"; 2017 const char pname[] = "ti,edma-reserved-slot-ranges";
2018 u32 (*tmp)[2];
2020 s16 (*rsv_slots)[2]; 2019 s16 (*rsv_slots)[2];
2021 size_t nelm = sz / sizeof(*rsv_slots); 2020 size_t nelm = sz / sizeof(*tmp);
2022 struct edma_rsv_info *rsv_info; 2021 struct edma_rsv_info *rsv_info;
2022 int i;
2023 2023
2024 if (!nelm) 2024 if (!nelm)
2025 return info; 2025 return info;
2026 2026
2027 tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL);
2028 if (!tmp)
2029 return ERR_PTR(-ENOMEM);
2030
2027 rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL); 2031 rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
2028 if (!rsv_info) 2032 if (!rsv_info) {
2033 kfree(tmp);
2029 return ERR_PTR(-ENOMEM); 2034 return ERR_PTR(-ENOMEM);
2035 }
2030 2036
2031 rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots), 2037 rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
2032 GFP_KERNEL); 2038 GFP_KERNEL);
2033 if (!rsv_slots) 2039 if (!rsv_slots) {
2040 kfree(tmp);
2034 return ERR_PTR(-ENOMEM); 2041 return ERR_PTR(-ENOMEM);
2042 }
2035 2043
2036 ret = of_property_read_u16_array(dev->of_node, pname, 2044 ret = of_property_read_u32_array(dev->of_node, pname,
2037 (u16 *)rsv_slots, nelm * 2); 2045 (u32 *)tmp, nelm * 2);
2038 if (ret) 2046 if (ret) {
2047 kfree(tmp);
2039 return ERR_PTR(ret); 2048 return ERR_PTR(ret);
2049 }
2040 2050
2051 for (i = 0; i < nelm; i++) {
2052 rsv_slots[i][0] = tmp[i][0];
2053 rsv_slots[i][1] = tmp[i][1];
2054 }
2041 rsv_slots[nelm][0] = -1; 2055 rsv_slots[nelm][0] = -1;
2042 rsv_slots[nelm][1] = -1; 2056 rsv_slots[nelm][1] = -1;
2057
2043 info->rsv = rsv_info; 2058 info->rsv = rsv_info;
2044 info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots; 2059 info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
2060
2061 kfree(tmp);
2045 } 2062 }
2046 2063
2047 return info; 2064 return info;
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 068e920ecb68..cddfa8dbf4bd 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -317,6 +317,7 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
317 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); 317 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
318 struct device *dev = mic_dma_ch_to_device(mic_ch); 318 struct device *dev = mic_dma_ch_to_device(mic_ch);
319 int result; 319 int result;
320 struct dma_async_tx_descriptor *tx = NULL;
320 321
321 if (!len && !flags) 322 if (!len && !flags)
322 return NULL; 323 return NULL;
@@ -324,10 +325,13 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
324 spin_lock(&mic_ch->prep_lock); 325 spin_lock(&mic_ch->prep_lock);
325 result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len); 326 result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len);
326 if (result >= 0) 327 if (result >= 0)
327 return allocate_tx(mic_ch); 328 tx = allocate_tx(mic_ch);
328 dev_err(dev, "Error enqueueing dma, error=%d\n", result); 329
330 if (!tx)
331 dev_err(dev, "Error enqueueing dma, error=%d\n", result);
332
329 spin_unlock(&mic_ch->prep_lock); 333 spin_unlock(&mic_ch->prep_lock);
330 return NULL; 334 return tx;
331} 335}
332 336
333static struct dma_async_tx_descriptor * 337static struct dma_async_tx_descriptor *
@@ -335,13 +339,14 @@ mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags)
335{ 339{
336 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); 340 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
337 int ret; 341 int ret;
342 struct dma_async_tx_descriptor *tx = NULL;
338 343
339 spin_lock(&mic_ch->prep_lock); 344 spin_lock(&mic_ch->prep_lock);
340 ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0); 345 ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0);
341 if (!ret) 346 if (!ret)
342 return allocate_tx(mic_ch); 347 tx = allocate_tx(mic_ch);
343 spin_unlock(&mic_ch->prep_lock); 348 spin_unlock(&mic_ch->prep_lock);
344 return NULL; 349 return tx;
345} 350}
346 351
347/* Return the status of the transaction */ 352/* Return the status of the transaction */
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index a24f5cb877e0..953dc9195937 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -122,12 +122,10 @@ int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
122 } 122 }
123 123
124 ret = fpga_mgr_buf_load(mgr, flags, fw->data, fw->size); 124 ret = fpga_mgr_buf_load(mgr, flags, fw->data, fw->size);
125 if (ret)
126 return ret;
127 125
128 release_firmware(fw); 126 release_firmware(fw);
129 127
130 return 0; 128 return ret;
131} 129}
132EXPORT_SYMBOL_GPL(fpga_mgr_firmware_load); 130EXPORT_SYMBOL_GPL(fpga_mgr_firmware_load);
133 131
@@ -256,7 +254,6 @@ int fpga_mgr_register(struct device *dev, const char *name,
256 void *priv) 254 void *priv)
257{ 255{
258 struct fpga_manager *mgr; 256 struct fpga_manager *mgr;
259 const char *dt_label;
260 int id, ret; 257 int id, ret;
261 258
262 if (!mops || !mops->write_init || !mops->write || 259 if (!mops || !mops->write_init || !mops->write ||
@@ -300,11 +297,9 @@ int fpga_mgr_register(struct device *dev, const char *name,
300 mgr->dev.id = id; 297 mgr->dev.id = id;
301 dev_set_drvdata(dev, mgr); 298 dev_set_drvdata(dev, mgr);
302 299
303 dt_label = of_get_property(mgr->dev.of_node, "label", NULL); 300 ret = dev_set_name(&mgr->dev, "fpga%d", id);
304 if (dt_label) 301 if (ret)
305 ret = dev_set_name(&mgr->dev, "%s", dt_label); 302 goto error_device;
306 else
307 ret = dev_set_name(&mgr->dev, "fpga%d", id);
308 303
309 ret = device_add(&mgr->dev); 304 ret = device_add(&mgr->dev);
310 if (ret) 305 if (ret)
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index 6ed7c0fb3378..6b186829087c 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -113,13 +113,16 @@ static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
113 113
114static int mmio_74xx_gpio_probe(struct platform_device *pdev) 114static int mmio_74xx_gpio_probe(struct platform_device *pdev)
115{ 115{
116 const struct of_device_id *of_id = 116 const struct of_device_id *of_id;
117 of_match_device(mmio_74xx_gpio_ids, &pdev->dev);
118 struct mmio_74xx_gpio_priv *priv; 117 struct mmio_74xx_gpio_priv *priv;
119 struct resource *res; 118 struct resource *res;
120 void __iomem *dat; 119 void __iomem *dat;
121 int err; 120 int err;
122 121
122 of_id = of_match_device(mmio_74xx_gpio_ids, &pdev->dev);
123 if (!of_id)
124 return -ENODEV;
125
123 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 126 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
124 if (!priv) 127 if (!priv)
125 return -ENOMEM; 128 return -ENOMEM;
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index e5827a56ff3b..5eaea8b812cf 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -113,7 +113,7 @@ static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
113 __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR); 113 __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR);
114 114
115 __raw_writel( 115 __raw_writel(
116 __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & BIT(offset), 116 __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & ~BIT(offset),
117 ctrl->base + AR71XX_GPIO_REG_OE); 117 ctrl->base + AR71XX_GPIO_REG_OE);
118 118
119 spin_unlock_irqrestore(&ctrl->lock, flags); 119 spin_unlock_irqrestore(&ctrl->lock, flags);
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index bd5193c67a9c..88ae70ddb127 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -141,9 +141,9 @@ static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
141 unsigned long pinmask = bgc->pin2mask(bgc, gpio); 141 unsigned long pinmask = bgc->pin2mask(bgc, gpio);
142 142
143 if (bgc->dir & pinmask) 143 if (bgc->dir & pinmask)
144 return bgc->read_reg(bgc->reg_set) & pinmask; 144 return !!(bgc->read_reg(bgc->reg_set) & pinmask);
145 else 145 else
146 return bgc->read_reg(bgc->reg_dat) & pinmask; 146 return !!(bgc->read_reg(bgc->reg_dat) & pinmask);
147} 147}
148 148
149static int bgpio_get(struct gpio_chip *gc, unsigned int gpio) 149static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 56d2d026e62e..f7fbb46d5d79 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1122,8 +1122,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
1122 /* MPUIO is a bit different, reading IRQ status clears it */ 1122 /* MPUIO is a bit different, reading IRQ status clears it */
1123 if (bank->is_mpuio) { 1123 if (bank->is_mpuio) {
1124 irqc->irq_ack = dummy_irq_chip.irq_ack; 1124 irqc->irq_ack = dummy_irq_chip.irq_ack;
1125 irqc->irq_mask = irq_gc_mask_set_bit;
1126 irqc->irq_unmask = irq_gc_mask_clr_bit;
1127 if (!bank->regs->wkup_en) 1125 if (!bank->regs->wkup_en)
1128 irqc->irq_set_wake = NULL; 1126 irqc->irq_set_wake = NULL;
1129 } 1127 }
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index 171a6389f9ce..52b447c071cb 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -167,6 +167,8 @@ static int palmas_gpio_probe(struct platform_device *pdev)
167 const struct palmas_device_data *dev_data; 167 const struct palmas_device_data *dev_data;
168 168
169 match = of_match_device(of_palmas_gpio_match, &pdev->dev); 169 match = of_match_device(of_palmas_gpio_match, &pdev->dev);
170 if (!match)
171 return -ENODEV;
170 dev_data = match->data; 172 dev_data = match->data;
171 if (!dev_data) 173 if (!dev_data)
172 dev_data = &palmas_dev_data; 174 dev_data = &palmas_dev_data;
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index 045a952576c7..7b25fdf64802 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -187,11 +187,15 @@ MODULE_DEVICE_TABLE(of, syscon_gpio_ids);
187static int syscon_gpio_probe(struct platform_device *pdev) 187static int syscon_gpio_probe(struct platform_device *pdev)
188{ 188{
189 struct device *dev = &pdev->dev; 189 struct device *dev = &pdev->dev;
190 const struct of_device_id *of_id = of_match_device(syscon_gpio_ids, dev); 190 const struct of_device_id *of_id;
191 struct syscon_gpio_priv *priv; 191 struct syscon_gpio_priv *priv;
192 struct device_node *np = dev->of_node; 192 struct device_node *np = dev->of_node;
193 int ret; 193 int ret;
194 194
195 of_id = of_match_device(syscon_gpio_ids, dev);
196 if (!of_id)
197 return -ENODEV;
198
195 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 199 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
196 if (!priv) 200 if (!priv)
197 return -ENOMEM; 201 return -ENOMEM;
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 027e5f47dd28..896bf29776b0 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -375,6 +375,60 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
375} 375}
376#endif 376#endif
377 377
378#ifdef CONFIG_DEBUG_FS
379
380#include <linux/debugfs.h>
381#include <linux/seq_file.h>
382
383static int dbg_gpio_show(struct seq_file *s, void *unused)
384{
385 int i;
386 int j;
387
388 for (i = 0; i < tegra_gpio_bank_count; i++) {
389 for (j = 0; j < 4; j++) {
390 int gpio = tegra_gpio_compose(i, j, 0);
391 seq_printf(s,
392 "%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
393 i, j,
394 tegra_gpio_readl(GPIO_CNF(gpio)),
395 tegra_gpio_readl(GPIO_OE(gpio)),
396 tegra_gpio_readl(GPIO_OUT(gpio)),
397 tegra_gpio_readl(GPIO_IN(gpio)),
398 tegra_gpio_readl(GPIO_INT_STA(gpio)),
399 tegra_gpio_readl(GPIO_INT_ENB(gpio)),
400 tegra_gpio_readl(GPIO_INT_LVL(gpio)));
401 }
402 }
403 return 0;
404}
405
406static int dbg_gpio_open(struct inode *inode, struct file *file)
407{
408 return single_open(file, dbg_gpio_show, &inode->i_private);
409}
410
411static const struct file_operations debug_fops = {
412 .open = dbg_gpio_open,
413 .read = seq_read,
414 .llseek = seq_lseek,
415 .release = single_release,
416};
417
418static void tegra_gpio_debuginit(void)
419{
420 (void) debugfs_create_file("tegra_gpio", S_IRUGO,
421 NULL, NULL, &debug_fops);
422}
423
424#else
425
426static inline void tegra_gpio_debuginit(void)
427{
428}
429
430#endif
431
378static struct irq_chip tegra_gpio_irq_chip = { 432static struct irq_chip tegra_gpio_irq_chip = {
379 .name = "GPIO", 433 .name = "GPIO",
380 .irq_ack = tegra_gpio_irq_ack, 434 .irq_ack = tegra_gpio_irq_ack,
@@ -519,6 +573,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
519 spin_lock_init(&bank->lvl_lock[j]); 573 spin_lock_init(&bank->lvl_lock[j]);
520 } 574 }
521 575
576 tegra_gpio_debuginit();
577
522 return 0; 578 return 0;
523} 579}
524 580
@@ -536,52 +592,3 @@ static int __init tegra_gpio_init(void)
536 return platform_driver_register(&tegra_gpio_driver); 592 return platform_driver_register(&tegra_gpio_driver);
537} 593}
538postcore_initcall(tegra_gpio_init); 594postcore_initcall(tegra_gpio_init);
539
540#ifdef CONFIG_DEBUG_FS
541
542#include <linux/debugfs.h>
543#include <linux/seq_file.h>
544
545static int dbg_gpio_show(struct seq_file *s, void *unused)
546{
547 int i;
548 int j;
549
550 for (i = 0; i < tegra_gpio_bank_count; i++) {
551 for (j = 0; j < 4; j++) {
552 int gpio = tegra_gpio_compose(i, j, 0);
553 seq_printf(s,
554 "%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
555 i, j,
556 tegra_gpio_readl(GPIO_CNF(gpio)),
557 tegra_gpio_readl(GPIO_OE(gpio)),
558 tegra_gpio_readl(GPIO_OUT(gpio)),
559 tegra_gpio_readl(GPIO_IN(gpio)),
560 tegra_gpio_readl(GPIO_INT_STA(gpio)),
561 tegra_gpio_readl(GPIO_INT_ENB(gpio)),
562 tegra_gpio_readl(GPIO_INT_LVL(gpio)));
563 }
564 }
565 return 0;
566}
567
568static int dbg_gpio_open(struct inode *inode, struct file *file)
569{
570 return single_open(file, dbg_gpio_show, &inode->i_private);
571}
572
573static const struct file_operations debug_fops = {
574 .open = dbg_gpio_open,
575 .read = seq_read,
576 .llseek = seq_lseek,
577 .release = single_release,
578};
579
580static int __init tegra_gpio_debuginit(void)
581{
582 (void) debugfs_create_file("tegra_gpio", S_IRUGO,
583 NULL, NULL, &debug_fops);
584 return 0;
585}
586late_initcall(tegra_gpio_debuginit);
587#endif
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index a18f00fc1bb8..4e4c3083ae56 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -233,7 +233,7 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name)
233 for (i = 0; i != chip->ngpio; ++i) { 233 for (i = 0; i != chip->ngpio; ++i) {
234 struct gpio_desc *gpio = &chip->desc[i]; 234 struct gpio_desc *gpio = &chip->desc[i];
235 235
236 if (!gpio->name) 236 if (!gpio->name || !name)
237 continue; 237 continue;
238 238
239 if (!strcmp(gpio->name, name)) { 239 if (!strcmp(gpio->name, name)) {
@@ -1279,7 +1279,13 @@ static int _gpiod_get_raw_value(const struct gpio_desc *desc)
1279 chip = desc->chip; 1279 chip = desc->chip;
1280 offset = gpio_chip_hwgpio(desc); 1280 offset = gpio_chip_hwgpio(desc);
1281 value = chip->get ? chip->get(chip, offset) : -EIO; 1281 value = chip->get ? chip->get(chip, offset) : -EIO;
1282 value = value < 0 ? value : !!value; 1282 /*
1283 * FIXME: fix all drivers to clamp to [0,1] or return negative,
1284 * then change this to:
1285 * value = value < 0 ? value : !!value;
1286 * so we can properly propagate error codes.
1287 */
1288 value = !!value;
1283 trace_gpio_value(desc_to_gpio(desc), 1, value); 1289 trace_gpio_value(desc_to_gpio(desc), 1, value);
1284 return value; 1290 return value;
1285} 1291}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 306f75700bf8..048cfe073dae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -496,6 +496,7 @@ struct amdgpu_bo_va_mapping {
496 496
497/* bo virtual addresses in a specific vm */ 497/* bo virtual addresses in a specific vm */
498struct amdgpu_bo_va { 498struct amdgpu_bo_va {
499 struct mutex mutex;
499 /* protected by bo being reserved */ 500 /* protected by bo being reserved */
500 struct list_head bo_list; 501 struct list_head bo_list;
501 struct fence *last_pt_update; 502 struct fence *last_pt_update;
@@ -538,6 +539,7 @@ struct amdgpu_bo {
538 /* Constant after initialization */ 539 /* Constant after initialization */
539 struct amdgpu_device *adev; 540 struct amdgpu_device *adev;
540 struct drm_gem_object gem_base; 541 struct drm_gem_object gem_base;
542 struct amdgpu_bo *parent;
541 543
542 struct ttm_bo_kmap_obj dma_buf_vmap; 544 struct ttm_bo_kmap_obj dma_buf_vmap;
543 pid_t pid; 545 pid_t pid;
@@ -928,8 +930,6 @@ struct amdgpu_vm_id {
928}; 930};
929 931
930struct amdgpu_vm { 932struct amdgpu_vm {
931 struct mutex mutex;
932
933 struct rb_root va; 933 struct rb_root va;
934 934
935 /* protecting invalidated */ 935 /* protecting invalidated */
@@ -956,6 +956,8 @@ struct amdgpu_vm {
956 struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; 956 struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
957 /* for interval tree */ 957 /* for interval tree */
958 spinlock_t it_lock; 958 spinlock_t it_lock;
959 /* protecting freed */
960 spinlock_t freed_lock;
959}; 961};
960 962
961struct amdgpu_vm_manager { 963struct amdgpu_vm_manager {
@@ -1262,7 +1264,8 @@ struct amdgpu_cs_parser {
1262 struct ww_acquire_ctx ticket; 1264 struct ww_acquire_ctx ticket;
1263 1265
1264 /* user fence */ 1266 /* user fence */
1265 struct amdgpu_user_fence uf; 1267 struct amdgpu_user_fence uf;
1268 struct amdgpu_bo_list_entry uf_entry;
1266}; 1269};
1267 1270
1268struct amdgpu_job { 1271struct amdgpu_job {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 3afcf0237c25..25a3e2485cc2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -127,6 +127,37 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
127 return 0; 127 return 0;
128} 128}
129 129
130static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
131 struct drm_amdgpu_cs_chunk_fence *fence_data)
132{
133 struct drm_gem_object *gobj;
134 uint32_t handle;
135
136 handle = fence_data->handle;
137 gobj = drm_gem_object_lookup(p->adev->ddev, p->filp,
138 fence_data->handle);
139 if (gobj == NULL)
140 return -EINVAL;
141
142 p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
143 p->uf.offset = fence_data->offset;
144
145 if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) {
146 drm_gem_object_unreference_unlocked(gobj);
147 return -EINVAL;
148 }
149
150 p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo);
151 p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
152 p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
153 p->uf_entry.priority = 0;
154 p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
155 p->uf_entry.tv.shared = true;
156
157 drm_gem_object_unreference_unlocked(gobj);
158 return 0;
159}
160
130int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) 161int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
131{ 162{
132 union drm_amdgpu_cs *cs = data; 163 union drm_amdgpu_cs *cs = data;
@@ -207,26 +238,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
207 238
208 case AMDGPU_CHUNK_ID_FENCE: 239 case AMDGPU_CHUNK_ID_FENCE:
209 size = sizeof(struct drm_amdgpu_cs_chunk_fence); 240 size = sizeof(struct drm_amdgpu_cs_chunk_fence);
210 if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) { 241 if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
211 uint32_t handle;
212 struct drm_gem_object *gobj;
213 struct drm_amdgpu_cs_chunk_fence *fence_data;
214
215 fence_data = (void *)p->chunks[i].kdata;
216 handle = fence_data->handle;
217 gobj = drm_gem_object_lookup(p->adev->ddev,
218 p->filp, handle);
219 if (gobj == NULL) {
220 ret = -EINVAL;
221 goto free_partial_kdata;
222 }
223
224 p->uf.bo = gem_to_amdgpu_bo(gobj);
225 p->uf.offset = fence_data->offset;
226 } else {
227 ret = -EINVAL; 242 ret = -EINVAL;
228 goto free_partial_kdata; 243 goto free_partial_kdata;
229 } 244 }
245
246 ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata);
247 if (ret)
248 goto free_partial_kdata;
249
230 break; 250 break;
231 251
232 case AMDGPU_CHUNK_ID_DEPENDENCIES: 252 case AMDGPU_CHUNK_ID_DEPENDENCIES:
@@ -389,6 +409,9 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
389 p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm, 409 p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm,
390 &p->validated); 410 &p->validated);
391 411
412 if (p->uf.bo)
413 list_add(&p->uf_entry.tv.head, &p->validated);
414
392 if (need_mmap_lock) 415 if (need_mmap_lock)
393 down_read(&current->mm->mmap_sem); 416 down_read(&current->mm->mmap_sem);
394 417
@@ -486,8 +509,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
486 for (i = 0; i < parser->num_ibs; i++) 509 for (i = 0; i < parser->num_ibs; i++)
487 amdgpu_ib_free(parser->adev, &parser->ibs[i]); 510 amdgpu_ib_free(parser->adev, &parser->ibs[i]);
488 kfree(parser->ibs); 511 kfree(parser->ibs);
489 if (parser->uf.bo) 512 amdgpu_bo_unref(&parser->uf.bo);
490 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); 513 amdgpu_bo_unref(&parser->uf_entry.robj);
491} 514}
492 515
493static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, 516static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@@ -776,7 +799,7 @@ static int amdgpu_cs_free_job(struct amdgpu_job *job)
776 amdgpu_ib_free(job->adev, &job->ibs[i]); 799 amdgpu_ib_free(job->adev, &job->ibs[i]);
777 kfree(job->ibs); 800 kfree(job->ibs);
778 if (job->uf.bo) 801 if (job->uf.bo)
779 drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base); 802 amdgpu_bo_unref(&job->uf.bo);
780 return 0; 803 return 0;
781} 804}
782 805
@@ -784,8 +807,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
784{ 807{
785 struct amdgpu_device *adev = dev->dev_private; 808 struct amdgpu_device *adev = dev->dev_private;
786 union drm_amdgpu_cs *cs = data; 809 union drm_amdgpu_cs *cs = data;
787 struct amdgpu_fpriv *fpriv = filp->driver_priv;
788 struct amdgpu_vm *vm = &fpriv->vm;
789 struct amdgpu_cs_parser parser = {}; 810 struct amdgpu_cs_parser parser = {};
790 bool reserved_buffers = false; 811 bool reserved_buffers = false;
791 int i, r; 812 int i, r;
@@ -803,7 +824,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
803 r = amdgpu_cs_handle_lockup(adev, r); 824 r = amdgpu_cs_handle_lockup(adev, r);
804 return r; 825 return r;
805 } 826 }
806 mutex_lock(&vm->mutex);
807 r = amdgpu_cs_parser_relocs(&parser); 827 r = amdgpu_cs_parser_relocs(&parser);
808 if (r == -ENOMEM) 828 if (r == -ENOMEM)
809 DRM_ERROR("Not enough memory for command submission!\n"); 829 DRM_ERROR("Not enough memory for command submission!\n");
@@ -888,7 +908,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
888 908
889out: 909out:
890 amdgpu_cs_parser_fini(&parser, r, reserved_buffers); 910 amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
891 mutex_unlock(&vm->mutex);
892 r = amdgpu_cs_handle_lockup(adev, r); 911 r = amdgpu_cs_handle_lockup(adev, r);
893 return r; 912 return r;
894} 913}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index e173a5a02f0d..5580d3420c3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -73,6 +73,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
73 struct drm_crtc *crtc = &amdgpuCrtc->base; 73 struct drm_crtc *crtc = &amdgpuCrtc->base;
74 unsigned long flags; 74 unsigned long flags;
75 unsigned i; 75 unsigned i;
76 int vpos, hpos, stat, min_udelay;
77 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
76 78
77 amdgpu_flip_wait_fence(adev, &work->excl); 79 amdgpu_flip_wait_fence(adev, &work->excl);
78 for (i = 0; i < work->shared_count; ++i) 80 for (i = 0; i < work->shared_count; ++i)
@@ -81,6 +83,41 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
81 /* We borrow the event spin lock for protecting flip_status */ 83 /* We borrow the event spin lock for protecting flip_status */
82 spin_lock_irqsave(&crtc->dev->event_lock, flags); 84 spin_lock_irqsave(&crtc->dev->event_lock, flags);
83 85
86 /* If this happens to execute within the "virtually extended" vblank
87 * interval before the start of the real vblank interval then it needs
88 * to delay programming the mmio flip until the real vblank is entered.
89 * This prevents completing a flip too early due to the way we fudge
90 * our vblank counter and vblank timestamps in order to work around the
91 * problem that the hw fires vblank interrupts before actual start of
92 * vblank (when line buffer refilling is done for a frame). It
93 * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for
94 * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts.
95 *
96 * In practice this won't execute very often unless on very fast
97 * machines because the time window for this to happen is very small.
98 */
99 for (;;) {
100 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
101 * start in hpos, and to the "fudged earlier" vblank start in
102 * vpos.
103 */
104 stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id,
105 GET_DISTANCE_TO_VBLANKSTART,
106 &vpos, &hpos, NULL, NULL,
107 &crtc->hwmode);
108
109 if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
110 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
111 !(vpos >= 0 && hpos <= 0))
112 break;
113
114 /* Sleep at least until estimated real start of hw vblank */
115 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
116 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
117 usleep_range(min_udelay, 2 * min_udelay);
118 spin_lock_irqsave(&crtc->dev->event_lock, flags);
119 };
120
84 /* do the flip (mmio) */ 121 /* do the flip (mmio) */
85 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); 122 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
86 /* set the flip status */ 123 /* set the flip status */
@@ -109,7 +146,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
109 } else 146 } else
110 DRM_ERROR("failed to reserve buffer after flip\n"); 147 DRM_ERROR("failed to reserve buffer after flip\n");
111 148
112 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 149 amdgpu_bo_unref(&work->old_rbo);
113 kfree(work->shared); 150 kfree(work->shared);
114 kfree(work); 151 kfree(work);
115} 152}
@@ -148,8 +185,8 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
148 obj = old_amdgpu_fb->obj; 185 obj = old_amdgpu_fb->obj;
149 186
150 /* take a reference to the old object */ 187 /* take a reference to the old object */
151 drm_gem_object_reference(obj);
152 work->old_rbo = gem_to_amdgpu_bo(obj); 188 work->old_rbo = gem_to_amdgpu_bo(obj);
189 amdgpu_bo_ref(work->old_rbo);
153 190
154 new_amdgpu_fb = to_amdgpu_framebuffer(fb); 191 new_amdgpu_fb = to_amdgpu_framebuffer(fb);
155 obj = new_amdgpu_fb->obj; 192 obj = new_amdgpu_fb->obj;
@@ -222,7 +259,7 @@ pflip_cleanup:
222 amdgpu_bo_unreserve(new_rbo); 259 amdgpu_bo_unreserve(new_rbo);
223 260
224cleanup: 261cleanup:
225 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 262 amdgpu_bo_unref(&work->old_rbo);
226 fence_put(work->excl); 263 fence_put(work->excl);
227 for (i = 0; i < work->shared_count; ++i) 264 for (i = 0; i < work->shared_count; ++i)
228 fence_put(work->shared[i]); 265 fence_put(work->shared[i]);
@@ -712,6 +749,15 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
712 * \param dev Device to query. 749 * \param dev Device to query.
713 * \param pipe Crtc to query. 750 * \param pipe Crtc to query.
714 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). 751 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
752 * For driver internal use only also supports these flags:
753 *
754 * USE_REAL_VBLANKSTART to use the real start of vblank instead
755 * of a fudged earlier start of vblank.
756 *
757 * GET_DISTANCE_TO_VBLANKSTART to return distance to the
758 * fudged earlier start of vblank in *vpos and the distance
759 * to true start of vblank in *hpos.
760 *
715 * \param *vpos Location where vertical scanout position should be stored. 761 * \param *vpos Location where vertical scanout position should be stored.
716 * \param *hpos Location where horizontal scanout position should go. 762 * \param *hpos Location where horizontal scanout position should go.
717 * \param *stime Target location for timestamp taken immediately before 763 * \param *stime Target location for timestamp taken immediately before
@@ -776,10 +822,40 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
776 vbl_end = 0; 822 vbl_end = 0;
777 } 823 }
778 824
825 /* Called from driver internal vblank counter query code? */
826 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
827 /* Caller wants distance from real vbl_start in *hpos */
828 *hpos = *vpos - vbl_start;
829 }
830
831 /* Fudge vblank to start a few scanlines earlier to handle the
832 * problem that vblank irqs fire a few scanlines before start
833 * of vblank. Some driver internal callers need the true vblank
834 * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
835 *
836 * The cause of the "early" vblank irq is that the irq is triggered
837 * by the line buffer logic when the line buffer read position enters
838 * the vblank, whereas our crtc scanout position naturally lags the
839 * line buffer read position.
840 */
841 if (!(flags & USE_REAL_VBLANKSTART))
842 vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
843
779 /* Test scanout position against vblank region. */ 844 /* Test scanout position against vblank region. */
780 if ((*vpos < vbl_start) && (*vpos >= vbl_end)) 845 if ((*vpos < vbl_start) && (*vpos >= vbl_end))
781 in_vbl = false; 846 in_vbl = false;
782 847
848 /* In vblank? */
849 if (in_vbl)
850 ret |= DRM_SCANOUTPOS_IN_VBLANK;
851
852 /* Called from driver internal vblank counter query code? */
853 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
854 /* Caller wants distance from fudged earlier vbl_start */
855 *vpos -= vbl_start;
856 return ret;
857 }
858
783 /* Check if inside vblank area and apply corrective offsets: 859 /* Check if inside vblank area and apply corrective offsets:
784 * vpos will then be >=0 in video scanout area, but negative 860 * vpos will then be >=0 in video scanout area, but negative
785 * within vblank area, counting down the number of lines until 861 * within vblank area, counting down the number of lines until
@@ -795,32 +871,6 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
795 /* Correct for shifted end of vbl at vbl_end. */ 871 /* Correct for shifted end of vbl at vbl_end. */
796 *vpos = *vpos - vbl_end; 872 *vpos = *vpos - vbl_end;
797 873
798 /* In vblank? */
799 if (in_vbl)
800 ret |= DRM_SCANOUTPOS_IN_VBLANK;
801
802 /* Is vpos outside nominal vblank area, but less than
803 * 1/100 of a frame height away from start of vblank?
804 * If so, assume this isn't a massively delayed vblank
805 * interrupt, but a vblank interrupt that fired a few
806 * microseconds before true start of vblank. Compensate
807 * by adding a full frame duration to the final timestamp.
808 * Happens, e.g., on ATI R500, R600.
809 *
810 * We only do this if DRM_CALLED_FROM_VBLIRQ.
811 */
812 if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
813 vbl_start = mode->crtc_vdisplay;
814 vtotal = mode->crtc_vtotal;
815
816 if (vbl_start - *vpos < vtotal / 100) {
817 *vpos -= vtotal;
818
819 /* Signal this correction as "applied". */
820 ret |= 0x8;
821 }
822 }
823
824 return ret; 874 return ret;
825} 875}
826 876
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 00c5b580f56c..9c253c535d26 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -115,12 +115,9 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
115 struct amdgpu_vm *vm = &fpriv->vm; 115 struct amdgpu_vm *vm = &fpriv->vm;
116 struct amdgpu_bo_va *bo_va; 116 struct amdgpu_bo_va *bo_va;
117 int r; 117 int r;
118 mutex_lock(&vm->mutex);
119 r = amdgpu_bo_reserve(rbo, false); 118 r = amdgpu_bo_reserve(rbo, false);
120 if (r) { 119 if (r)
121 mutex_unlock(&vm->mutex);
122 return r; 120 return r;
123 }
124 121
125 bo_va = amdgpu_vm_bo_find(vm, rbo); 122 bo_va = amdgpu_vm_bo_find(vm, rbo);
126 if (!bo_va) { 123 if (!bo_va) {
@@ -129,7 +126,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
129 ++bo_va->ref_count; 126 ++bo_va->ref_count;
130 } 127 }
131 amdgpu_bo_unreserve(rbo); 128 amdgpu_bo_unreserve(rbo);
132 mutex_unlock(&vm->mutex);
133 return 0; 129 return 0;
134} 130}
135 131
@@ -142,10 +138,8 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
142 struct amdgpu_vm *vm = &fpriv->vm; 138 struct amdgpu_vm *vm = &fpriv->vm;
143 struct amdgpu_bo_va *bo_va; 139 struct amdgpu_bo_va *bo_va;
144 int r; 140 int r;
145 mutex_lock(&vm->mutex);
146 r = amdgpu_bo_reserve(rbo, true); 141 r = amdgpu_bo_reserve(rbo, true);
147 if (r) { 142 if (r) {
148 mutex_unlock(&vm->mutex);
149 dev_err(adev->dev, "leaking bo va because " 143 dev_err(adev->dev, "leaking bo va because "
150 "we fail to reserve bo (%d)\n", r); 144 "we fail to reserve bo (%d)\n", r);
151 return; 145 return;
@@ -157,7 +151,6 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
157 } 151 }
158 } 152 }
159 amdgpu_bo_unreserve(rbo); 153 amdgpu_bo_unreserve(rbo);
160 mutex_unlock(&vm->mutex);
161} 154}
162 155
163static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) 156static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
@@ -242,8 +235,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
242 AMDGPU_GEM_USERPTR_REGISTER)) 235 AMDGPU_GEM_USERPTR_REGISTER))
243 return -EINVAL; 236 return -EINVAL;
244 237
245 if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) || 238 if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && (
246 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) { 239 !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
240 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) {
247 241
248 /* if we want to write to it we must require anonymous 242 /* if we want to write to it we must require anonymous
249 memory and install a MMU notifier */ 243 memory and install a MMU notifier */
@@ -483,6 +477,14 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
483 if (domain == AMDGPU_GEM_DOMAIN_CPU) 477 if (domain == AMDGPU_GEM_DOMAIN_CPU)
484 goto error_unreserve; 478 goto error_unreserve;
485 } 479 }
480 list_for_each_entry(entry, &duplicates, head) {
481 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
482 /* if anything is swapped out don't swap it in here,
483 just abort and wait for the next CS */
484 if (domain == AMDGPU_GEM_DOMAIN_CPU)
485 goto error_unreserve;
486 }
487
486 r = amdgpu_vm_update_page_directory(adev, bo_va->vm); 488 r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
487 if (r) 489 if (r)
488 goto error_unreserve; 490 goto error_unreserve;
@@ -553,7 +555,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
553 gobj = drm_gem_object_lookup(dev, filp, args->handle); 555 gobj = drm_gem_object_lookup(dev, filp, args->handle);
554 if (gobj == NULL) 556 if (gobj == NULL)
555 return -ENOENT; 557 return -ENOENT;
556 mutex_lock(&fpriv->vm.mutex);
557 rbo = gem_to_amdgpu_bo(gobj); 558 rbo = gem_to_amdgpu_bo(gobj);
558 INIT_LIST_HEAD(&list); 559 INIT_LIST_HEAD(&list);
559 INIT_LIST_HEAD(&duplicates); 560 INIT_LIST_HEAD(&duplicates);
@@ -568,7 +569,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
568 } 569 }
569 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); 570 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
570 if (r) { 571 if (r) {
571 mutex_unlock(&fpriv->vm.mutex);
572 drm_gem_object_unreference_unlocked(gobj); 572 drm_gem_object_unreference_unlocked(gobj);
573 return r; 573 return r;
574 } 574 }
@@ -577,7 +577,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
577 if (!bo_va) { 577 if (!bo_va) {
578 ttm_eu_backoff_reservation(&ticket, &list); 578 ttm_eu_backoff_reservation(&ticket, &list);
579 drm_gem_object_unreference_unlocked(gobj); 579 drm_gem_object_unreference_unlocked(gobj);
580 mutex_unlock(&fpriv->vm.mutex);
581 return -ENOENT; 580 return -ENOENT;
582 } 581 }
583 582
@@ -602,7 +601,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
602 ttm_eu_backoff_reservation(&ticket, &list); 601 ttm_eu_backoff_reservation(&ticket, &list);
603 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 602 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
604 amdgpu_gem_va_update_vm(adev, bo_va, args->operation); 603 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
605 mutex_unlock(&fpriv->vm.mutex); 604
606 drm_gem_object_unreference_unlocked(gobj); 605 drm_gem_object_unreference_unlocked(gobj);
607 return r; 606 return r;
608} 607}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 1618e2294a16..e23843f4d877 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -611,13 +611,59 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev,
611u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) 611u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
612{ 612{
613 struct amdgpu_device *adev = dev->dev_private; 613 struct amdgpu_device *adev = dev->dev_private;
614 int vpos, hpos, stat;
615 u32 count;
614 616
615 if (pipe >= adev->mode_info.num_crtc) { 617 if (pipe >= adev->mode_info.num_crtc) {
616 DRM_ERROR("Invalid crtc %u\n", pipe); 618 DRM_ERROR("Invalid crtc %u\n", pipe);
617 return -EINVAL; 619 return -EINVAL;
618 } 620 }
619 621
620 return amdgpu_display_vblank_get_counter(adev, pipe); 622 /* The hw increments its frame counter at start of vsync, not at start
623 * of vblank, as is required by DRM core vblank counter handling.
624 * Cook the hw count here to make it appear to the caller as if it
625 * incremented at start of vblank. We measure distance to start of
626 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
627 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
628 * result by 1 to give the proper appearance to caller.
629 */
630 if (adev->mode_info.crtcs[pipe]) {
631 /* Repeat readout if needed to provide stable result if
632 * we cross start of vsync during the queries.
633 */
634 do {
635 count = amdgpu_display_vblank_get_counter(adev, pipe);
636 /* Ask amdgpu_get_crtc_scanoutpos to return vpos as
637 * distance to start of vblank, instead of regular
638 * vertical scanout pos.
639 */
640 stat = amdgpu_get_crtc_scanoutpos(
641 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
642 &vpos, &hpos, NULL, NULL,
643 &adev->mode_info.crtcs[pipe]->base.hwmode);
644 } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
645
646 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
647 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
648 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
649 } else {
650 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
651 pipe, vpos);
652
653 /* Bump counter if we are at >= leading edge of vblank,
654 * but before vsync where vpos would turn negative and
655 * the hw counter really increments.
656 */
657 if (vpos >= 0)
658 count++;
659 }
660 } else {
661 /* Fallback to use value as is. */
662 count = amdgpu_display_vblank_get_counter(adev, pipe);
663 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
664 }
665
666 return count;
621} 667}
622 668
623/** 669/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index b62c1710cab6..064ebb347074 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -407,6 +407,7 @@ struct amdgpu_crtc {
407 u32 line_time; 407 u32 line_time;
408 u32 wm_low; 408 u32 wm_low;
409 u32 wm_high; 409 u32 wm_high;
410 u32 lb_vblank_lead_lines;
410 struct drm_display_mode hw_mode; 411 struct drm_display_mode hw_mode;
411}; 412};
412 413
@@ -528,6 +529,10 @@ struct amdgpu_framebuffer {
528#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \ 529#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
529 ((em) == ATOM_ENCODER_MODE_DP_MST)) 530 ((em) == ATOM_ENCODER_MODE_DP_MST))
530 531
532/* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */
533#define USE_REAL_VBLANKSTART (1 << 30)
534#define GET_DISTANCE_TO_VBLANKSTART (1 << 31)
535
531void amdgpu_link_encoder_connector(struct drm_device *dev); 536void amdgpu_link_encoder_connector(struct drm_device *dev);
532 537
533struct drm_connector * 538struct drm_connector *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 0d524384ff79..c3ce103b6a33 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -100,6 +100,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
100 list_del_init(&bo->list); 100 list_del_init(&bo->list);
101 mutex_unlock(&bo->adev->gem.mutex); 101 mutex_unlock(&bo->adev->gem.mutex);
102 drm_gem_object_release(&bo->gem_base); 102 drm_gem_object_release(&bo->gem_base);
103 amdgpu_bo_unref(&bo->parent);
103 kfree(bo->metadata); 104 kfree(bo->metadata);
104 kfree(bo); 105 kfree(bo);
105} 106}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index d4bac5f49939..8a1752ff3d8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -587,9 +587,13 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
587 uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); 587 uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
588 int r; 588 int r;
589 589
590 if (gtt->userptr) 590 if (gtt->userptr) {
591 amdgpu_ttm_tt_pin_userptr(ttm); 591 r = amdgpu_ttm_tt_pin_userptr(ttm);
592 592 if (r) {
593 DRM_ERROR("failed to pin userptr\n");
594 return r;
595 }
596 }
593 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); 597 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
594 if (!ttm->num_pages) { 598 if (!ttm->num_pages) {
595 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", 599 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
@@ -797,11 +801,12 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
797 if (mem && mem->mem_type != TTM_PL_SYSTEM) 801 if (mem && mem->mem_type != TTM_PL_SYSTEM)
798 flags |= AMDGPU_PTE_VALID; 802 flags |= AMDGPU_PTE_VALID;
799 803
800 if (mem && mem->mem_type == TTM_PL_TT) 804 if (mem && mem->mem_type == TTM_PL_TT) {
801 flags |= AMDGPU_PTE_SYSTEM; 805 flags |= AMDGPU_PTE_SYSTEM;
802 806
803 if (!ttm || ttm->caching_state == tt_cached) 807 if (ttm->caching_state == tt_cached)
804 flags |= AMDGPU_PTE_SNOOPED; 808 flags |= AMDGPU_PTE_SNOOPED;
809 }
805 810
806 if (adev->asic_type >= CHIP_TOPAZ) 811 if (adev->asic_type >= CHIP_TOPAZ)
807 flags |= AMDGPU_PTE_EXECUTABLE; 812 flags |= AMDGPU_PTE_EXECUTABLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 03f0c3bae516..a745eeeb5d82 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -392,7 +392,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
392 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ 392 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
393 ib->ptr[ib->length_dw++] = handle; 393 ib->ptr[ib->length_dw++] = handle;
394 394
395 ib->ptr[ib->length_dw++] = 0x00000030; /* len */ 395 if ((ring->adev->vce.fw_version >> 24) >= 52)
396 ib->ptr[ib->length_dw++] = 0x00000040; /* len */
397 else
398 ib->ptr[ib->length_dw++] = 0x00000030; /* len */
396 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ 399 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
397 ib->ptr[ib->length_dw++] = 0x00000000; 400 ib->ptr[ib->length_dw++] = 0x00000000;
398 ib->ptr[ib->length_dw++] = 0x00000042; 401 ib->ptr[ib->length_dw++] = 0x00000042;
@@ -404,6 +407,12 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
404 ib->ptr[ib->length_dw++] = 0x00000100; 407 ib->ptr[ib->length_dw++] = 0x00000100;
405 ib->ptr[ib->length_dw++] = 0x0000000c; 408 ib->ptr[ib->length_dw++] = 0x0000000c;
406 ib->ptr[ib->length_dw++] = 0x00000000; 409 ib->ptr[ib->length_dw++] = 0x00000000;
410 if ((ring->adev->vce.fw_version >> 24) >= 52) {
411 ib->ptr[ib->length_dw++] = 0x00000000;
412 ib->ptr[ib->length_dw++] = 0x00000000;
413 ib->ptr[ib->length_dw++] = 0x00000000;
414 ib->ptr[ib->length_dw++] = 0x00000000;
415 }
407 416
408 ib->ptr[ib->length_dw++] = 0x00000014; /* len */ 417 ib->ptr[ib->length_dw++] = 0x00000014; /* len */
409 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ 418 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 159ce54bbd8d..b53d273eb7a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -885,17 +885,21 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
885 struct amdgpu_bo_va_mapping *mapping; 885 struct amdgpu_bo_va_mapping *mapping;
886 int r; 886 int r;
887 887
888 spin_lock(&vm->freed_lock);
888 while (!list_empty(&vm->freed)) { 889 while (!list_empty(&vm->freed)) {
889 mapping = list_first_entry(&vm->freed, 890 mapping = list_first_entry(&vm->freed,
890 struct amdgpu_bo_va_mapping, list); 891 struct amdgpu_bo_va_mapping, list);
891 list_del(&mapping->list); 892 list_del(&mapping->list);
892 893 spin_unlock(&vm->freed_lock);
893 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL); 894 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
894 kfree(mapping); 895 kfree(mapping);
895 if (r) 896 if (r)
896 return r; 897 return r;
897 898
899 spin_lock(&vm->freed_lock);
898 } 900 }
901 spin_unlock(&vm->freed_lock);
902
899 return 0; 903 return 0;
900 904
901} 905}
@@ -922,8 +926,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
922 bo_va = list_first_entry(&vm->invalidated, 926 bo_va = list_first_entry(&vm->invalidated,
923 struct amdgpu_bo_va, vm_status); 927 struct amdgpu_bo_va, vm_status);
924 spin_unlock(&vm->status_lock); 928 spin_unlock(&vm->status_lock);
925 929 mutex_lock(&bo_va->mutex);
926 r = amdgpu_vm_bo_update(adev, bo_va, NULL); 930 r = amdgpu_vm_bo_update(adev, bo_va, NULL);
931 mutex_unlock(&bo_va->mutex);
927 if (r) 932 if (r)
928 return r; 933 return r;
929 934
@@ -967,7 +972,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
967 INIT_LIST_HEAD(&bo_va->valids); 972 INIT_LIST_HEAD(&bo_va->valids);
968 INIT_LIST_HEAD(&bo_va->invalids); 973 INIT_LIST_HEAD(&bo_va->invalids);
969 INIT_LIST_HEAD(&bo_va->vm_status); 974 INIT_LIST_HEAD(&bo_va->vm_status);
970 975 mutex_init(&bo_va->mutex);
971 list_add_tail(&bo_va->bo_list, &bo->va); 976 list_add_tail(&bo_va->bo_list, &bo->va);
972 977
973 return bo_va; 978 return bo_va;
@@ -1045,7 +1050,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1045 mapping->offset = offset; 1050 mapping->offset = offset;
1046 mapping->flags = flags; 1051 mapping->flags = flags;
1047 1052
1053 mutex_lock(&bo_va->mutex);
1048 list_add(&mapping->list, &bo_va->invalids); 1054 list_add(&mapping->list, &bo_va->invalids);
1055 mutex_unlock(&bo_va->mutex);
1049 spin_lock(&vm->it_lock); 1056 spin_lock(&vm->it_lock);
1050 interval_tree_insert(&mapping->it, &vm->va); 1057 interval_tree_insert(&mapping->it, &vm->va);
1051 spin_unlock(&vm->it_lock); 1058 spin_unlock(&vm->it_lock);
@@ -1076,6 +1083,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1076 if (r) 1083 if (r)
1077 goto error_free; 1084 goto error_free;
1078 1085
1086 /* Keep a reference to the page table to avoid freeing
1087 * them up in the wrong order.
1088 */
1089 pt->parent = amdgpu_bo_ref(vm->page_directory);
1090
1079 r = amdgpu_vm_clear_bo(adev, pt); 1091 r = amdgpu_vm_clear_bo(adev, pt);
1080 if (r) { 1092 if (r) {
1081 amdgpu_bo_unref(&pt); 1093 amdgpu_bo_unref(&pt);
@@ -1121,7 +1133,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1121 bool valid = true; 1133 bool valid = true;
1122 1134
1123 saddr /= AMDGPU_GPU_PAGE_SIZE; 1135 saddr /= AMDGPU_GPU_PAGE_SIZE;
1124 1136 mutex_lock(&bo_va->mutex);
1125 list_for_each_entry(mapping, &bo_va->valids, list) { 1137 list_for_each_entry(mapping, &bo_va->valids, list) {
1126 if (mapping->it.start == saddr) 1138 if (mapping->it.start == saddr)
1127 break; 1139 break;
@@ -1135,20 +1147,25 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1135 break; 1147 break;
1136 } 1148 }
1137 1149
1138 if (&mapping->list == &bo_va->invalids) 1150 if (&mapping->list == &bo_va->invalids) {
1151 mutex_unlock(&bo_va->mutex);
1139 return -ENOENT; 1152 return -ENOENT;
1153 }
1140 } 1154 }
1141 1155 mutex_unlock(&bo_va->mutex);
1142 list_del(&mapping->list); 1156 list_del(&mapping->list);
1143 spin_lock(&vm->it_lock); 1157 spin_lock(&vm->it_lock);
1144 interval_tree_remove(&mapping->it, &vm->va); 1158 interval_tree_remove(&mapping->it, &vm->va);
1145 spin_unlock(&vm->it_lock); 1159 spin_unlock(&vm->it_lock);
1146 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1160 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1147 1161
1148 if (valid) 1162 if (valid) {
1163 spin_lock(&vm->freed_lock);
1149 list_add(&mapping->list, &vm->freed); 1164 list_add(&mapping->list, &vm->freed);
1150 else 1165 spin_unlock(&vm->freed_lock);
1166 } else {
1151 kfree(mapping); 1167 kfree(mapping);
1168 }
1152 1169
1153 return 0; 1170 return 0;
1154} 1171}
@@ -1181,7 +1198,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1181 interval_tree_remove(&mapping->it, &vm->va); 1198 interval_tree_remove(&mapping->it, &vm->va);
1182 spin_unlock(&vm->it_lock); 1199 spin_unlock(&vm->it_lock);
1183 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1200 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1201 spin_lock(&vm->freed_lock);
1184 list_add(&mapping->list, &vm->freed); 1202 list_add(&mapping->list, &vm->freed);
1203 spin_unlock(&vm->freed_lock);
1185 } 1204 }
1186 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { 1205 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1187 list_del(&mapping->list); 1206 list_del(&mapping->list);
@@ -1190,8 +1209,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1190 spin_unlock(&vm->it_lock); 1209 spin_unlock(&vm->it_lock);
1191 kfree(mapping); 1210 kfree(mapping);
1192 } 1211 }
1193
1194 fence_put(bo_va->last_pt_update); 1212 fence_put(bo_va->last_pt_update);
1213 mutex_destroy(&bo_va->mutex);
1195 kfree(bo_va); 1214 kfree(bo_va);
1196} 1215}
1197 1216
@@ -1236,13 +1255,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1236 vm->ids[i].id = 0; 1255 vm->ids[i].id = 0;
1237 vm->ids[i].flushed_updates = NULL; 1256 vm->ids[i].flushed_updates = NULL;
1238 } 1257 }
1239 mutex_init(&vm->mutex);
1240 vm->va = RB_ROOT; 1258 vm->va = RB_ROOT;
1241 spin_lock_init(&vm->status_lock); 1259 spin_lock_init(&vm->status_lock);
1242 INIT_LIST_HEAD(&vm->invalidated); 1260 INIT_LIST_HEAD(&vm->invalidated);
1243 INIT_LIST_HEAD(&vm->cleared); 1261 INIT_LIST_HEAD(&vm->cleared);
1244 INIT_LIST_HEAD(&vm->freed); 1262 INIT_LIST_HEAD(&vm->freed);
1245 spin_lock_init(&vm->it_lock); 1263 spin_lock_init(&vm->it_lock);
1264 spin_lock_init(&vm->freed_lock);
1246 pd_size = amdgpu_vm_directory_size(adev); 1265 pd_size = amdgpu_vm_directory_size(adev);
1247 pd_entries = amdgpu_vm_num_pdes(adev); 1266 pd_entries = amdgpu_vm_num_pdes(adev);
1248 1267
@@ -1320,7 +1339,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1320 fence_put(vm->ids[i].flushed_updates); 1339 fence_put(vm->ids[i].flushed_updates);
1321 } 1340 }
1322 1341
1323 mutex_destroy(&vm->mutex);
1324} 1342}
1325 1343
1326/** 1344/**
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index cb0f7747e3dc..4dcc8fba5792 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1250,7 +1250,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1250 u32 pixel_period; 1250 u32 pixel_period;
1251 u32 line_time = 0; 1251 u32 line_time = 0;
1252 u32 latency_watermark_a = 0, latency_watermark_b = 0; 1252 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1253 u32 tmp, wm_mask; 1253 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1254 1254
1255 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1255 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1256 pixel_period = 1000000 / (u32)mode->clock; 1256 pixel_period = 1000000 / (u32)mode->clock;
@@ -1333,6 +1333,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1333 (adev->mode_info.disp_priority == 2)) { 1333 (adev->mode_info.disp_priority == 2)) {
1334 DRM_DEBUG_KMS("force priority to high\n"); 1334 DRM_DEBUG_KMS("force priority to high\n");
1335 } 1335 }
1336 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1336 } 1337 }
1337 1338
1338 /* select wm A */ 1339 /* select wm A */
@@ -1357,6 +1358,8 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1357 amdgpu_crtc->line_time = line_time; 1358 amdgpu_crtc->line_time = line_time;
1358 amdgpu_crtc->wm_high = latency_watermark_a; 1359 amdgpu_crtc->wm_high = latency_watermark_a;
1359 amdgpu_crtc->wm_low = latency_watermark_b; 1360 amdgpu_crtc->wm_low = latency_watermark_b;
1361 /* Save number of lines the linebuffer leads before the scanout */
1362 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1360} 1363}
1361 1364
1362/** 1365/**
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 5af3721851d6..8f1e51128b33 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1238,7 +1238,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1238 u32 pixel_period; 1238 u32 pixel_period;
1239 u32 line_time = 0; 1239 u32 line_time = 0;
1240 u32 latency_watermark_a = 0, latency_watermark_b = 0; 1240 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1241 u32 tmp, wm_mask; 1241 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1242 1242
1243 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1243 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1244 pixel_period = 1000000 / (u32)mode->clock; 1244 pixel_period = 1000000 / (u32)mode->clock;
@@ -1321,6 +1321,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1321 (adev->mode_info.disp_priority == 2)) { 1321 (adev->mode_info.disp_priority == 2)) {
1322 DRM_DEBUG_KMS("force priority to high\n"); 1322 DRM_DEBUG_KMS("force priority to high\n");
1323 } 1323 }
1324 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1324 } 1325 }
1325 1326
1326 /* select wm A */ 1327 /* select wm A */
@@ -1345,6 +1346,8 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1345 amdgpu_crtc->line_time = line_time; 1346 amdgpu_crtc->line_time = line_time;
1346 amdgpu_crtc->wm_high = latency_watermark_a; 1347 amdgpu_crtc->wm_high = latency_watermark_a;
1347 amdgpu_crtc->wm_low = latency_watermark_b; 1348 amdgpu_crtc->wm_low = latency_watermark_b;
1349 /* Save number of lines the linebuffer leads before the scanout */
1350 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1348} 1351}
1349 1352
1350/** 1353/**
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 4f7b49a6dc50..42d954dc436d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1193,7 +1193,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1193 u32 pixel_period; 1193 u32 pixel_period;
1194 u32 line_time = 0; 1194 u32 line_time = 0;
1195 u32 latency_watermark_a = 0, latency_watermark_b = 0; 1195 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1196 u32 tmp, wm_mask; 1196 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1197 1197
1198 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1198 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1199 pixel_period = 1000000 / (u32)mode->clock; 1199 pixel_period = 1000000 / (u32)mode->clock;
@@ -1276,6 +1276,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1276 (adev->mode_info.disp_priority == 2)) { 1276 (adev->mode_info.disp_priority == 2)) {
1277 DRM_DEBUG_KMS("force priority to high\n"); 1277 DRM_DEBUG_KMS("force priority to high\n");
1278 } 1278 }
1279 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1279 } 1280 }
1280 1281
1281 /* select wm A */ 1282 /* select wm A */
@@ -1302,6 +1303,8 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1302 amdgpu_crtc->line_time = line_time; 1303 amdgpu_crtc->line_time = line_time;
1303 amdgpu_crtc->wm_high = latency_watermark_a; 1304 amdgpu_crtc->wm_high = latency_watermark_a;
1304 amdgpu_crtc->wm_low = latency_watermark_b; 1305 amdgpu_crtc->wm_low = latency_watermark_b;
1306 /* Save number of lines the linebuffer leads before the scanout */
1307 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1305} 1308}
1306 1309
1307/** 1310/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 7427d8cd4c43..ed8abb58a785 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -513,7 +513,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
513 WREG32(mmVM_L2_CNTL3, tmp); 513 WREG32(mmVM_L2_CNTL3, tmp);
514 /* setup context0 */ 514 /* setup context0 */
515 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); 515 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
516 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1); 516 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
517 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); 517 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
518 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 518 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
519 (u32)(adev->dummy_page.addr >> 12)); 519 (u32)(adev->dummy_page.addr >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index cb0e50ebb528..d39028440814 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -657,7 +657,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
657 WREG32(mmVM_L2_CNTL4, tmp); 657 WREG32(mmVM_L2_CNTL4, tmp);
658 /* setup context0 */ 658 /* setup context0 */
659 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); 659 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
660 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1); 660 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
661 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); 661 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
662 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 662 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
663 (u32)(adev->dummy_page.addr >> 12)); 663 (u32)(adev->dummy_page.addr >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6a52db6ad8d7..370c6c9d81c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -40,6 +40,9 @@
40 40
41#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 41#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
42#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 42#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
43#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
44#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
45#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
43 46
44#define VCE_V3_0_FW_SIZE (384 * 1024) 47#define VCE_V3_0_FW_SIZE (384 * 1024)
45#define VCE_V3_0_STACK_SIZE (64 * 1024) 48#define VCE_V3_0_STACK_SIZE (64 * 1024)
@@ -130,9 +133,11 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
130 133
131 /* set BUSY flag */ 134 /* set BUSY flag */
132 WREG32_P(mmVCE_STATUS, 1, ~1); 135 WREG32_P(mmVCE_STATUS, 1, ~1);
133 136 if (adev->asic_type >= CHIP_STONEY)
134 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, 137 WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
135 ~VCE_VCPU_CNTL__CLK_EN_MASK); 138 else
139 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
140 ~VCE_VCPU_CNTL__CLK_EN_MASK);
136 141
137 WREG32_P(mmVCE_SOFT_RESET, 142 WREG32_P(mmVCE_SOFT_RESET,
138 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 143 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
@@ -391,8 +396,12 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
391 WREG32(mmVCE_LMI_SWAP_CNTL, 0); 396 WREG32(mmVCE_LMI_SWAP_CNTL, 0);
392 WREG32(mmVCE_LMI_SWAP_CNTL1, 0); 397 WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
393 WREG32(mmVCE_LMI_VM_CTRL, 0); 398 WREG32(mmVCE_LMI_VM_CTRL, 0);
394 399 if (adev->asic_type >= CHIP_STONEY) {
395 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); 400 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
401 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
402 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
403 } else
404 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
396 offset = AMDGPU_VCE_FIRMWARE_OFFSET; 405 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
397 size = VCE_V3_0_FW_SIZE; 406 size = VCE_V3_0_FW_SIZE;
398 WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); 407 WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
@@ -576,6 +585,11 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
576 struct amdgpu_iv_entry *entry) 585 struct amdgpu_iv_entry *entry)
577{ 586{
578 DRM_DEBUG("IH: VCE\n"); 587 DRM_DEBUG("IH: VCE\n");
588
589 WREG32_P(mmVCE_SYS_INT_STATUS,
590 VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
591 ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
592
579 switch (entry->src_data) { 593 switch (entry->src_data) {
580 case 0: 594 case 0:
581 amdgpu_fence_process(&adev->vce.ring[0]); 595 amdgpu_fence_process(&adev->vce.ring[0]);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index ea30d6ad4c13..3a4820e863ec 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -30,8 +30,7 @@
30#define CREATE_TRACE_POINTS 30#define CREATE_TRACE_POINTS
31#include "gpu_sched_trace.h" 31#include "gpu_sched_trace.h"
32 32
33static struct amd_sched_job * 33static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
34amd_sched_entity_pop_job(struct amd_sched_entity *entity);
35static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); 34static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36 35
37struct kmem_cache *sched_fence_slab; 36struct kmem_cache *sched_fence_slab;
@@ -64,36 +63,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
64} 63}
65 64
66/** 65/**
67 * Select next job from a specified run queue with round robin policy. 66 * Select an entity which could provide a job to run
68 * Return NULL if nothing available. 67 *
68 * @rq The run queue to check.
69 *
70 * Try to find a ready entity, returns NULL if none found.
69 */ 71 */
70static struct amd_sched_job * 72static struct amd_sched_entity *
71amd_sched_rq_select_job(struct amd_sched_rq *rq) 73amd_sched_rq_select_entity(struct amd_sched_rq *rq)
72{ 74{
73 struct amd_sched_entity *entity; 75 struct amd_sched_entity *entity;
74 struct amd_sched_job *sched_job;
75 76
76 spin_lock(&rq->lock); 77 spin_lock(&rq->lock);
77 78
78 entity = rq->current_entity; 79 entity = rq->current_entity;
79 if (entity) { 80 if (entity) {
80 list_for_each_entry_continue(entity, &rq->entities, list) { 81 list_for_each_entry_continue(entity, &rq->entities, list) {
81 sched_job = amd_sched_entity_pop_job(entity); 82 if (amd_sched_entity_is_ready(entity)) {
82 if (sched_job) {
83 rq->current_entity = entity; 83 rq->current_entity = entity;
84 spin_unlock(&rq->lock); 84 spin_unlock(&rq->lock);
85 return sched_job; 85 return entity;
86 } 86 }
87 } 87 }
88 } 88 }
89 89
90 list_for_each_entry(entity, &rq->entities, list) { 90 list_for_each_entry(entity, &rq->entities, list) {
91 91
92 sched_job = amd_sched_entity_pop_job(entity); 92 if (amd_sched_entity_is_ready(entity)) {
93 if (sched_job) {
94 rq->current_entity = entity; 93 rq->current_entity = entity;
95 spin_unlock(&rq->lock); 94 spin_unlock(&rq->lock);
96 return sched_job; 95 return entity;
97 } 96 }
98 97
99 if (entity == rq->current_entity) 98 if (entity == rq->current_entity)
@@ -177,6 +176,24 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
177} 176}
178 177
179/** 178/**
179 * Check if entity is ready
180 *
181 * @entity The pointer to a valid scheduler entity
182 *
183 * Return true if entity could provide a job.
184 */
185static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
186{
187 if (kfifo_is_empty(&entity->job_queue))
188 return false;
189
190 if (ACCESS_ONCE(entity->dependency))
191 return false;
192
193 return true;
194}
195
196/**
180 * Destroy a context entity 197 * Destroy a context entity
181 * 198 *
182 * @sched Pointer to scheduler instance 199 * @sched Pointer to scheduler instance
@@ -211,32 +228,53 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
211 amd_sched_wakeup(entity->sched); 228 amd_sched_wakeup(entity->sched);
212} 229}
213 230
231static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
232{
233 struct amd_gpu_scheduler *sched = entity->sched;
234 struct fence * fence = entity->dependency;
235 struct amd_sched_fence *s_fence;
236
237 if (fence->context == entity->fence_context) {
238 /* We can ignore fences from ourself */
239 fence_put(entity->dependency);
240 return false;
241 }
242
243 s_fence = to_amd_sched_fence(fence);
244 if (s_fence && s_fence->sched == sched) {
245 /* Fence is from the same scheduler */
246 if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
247 /* Ignore it when it is already scheduled */
248 fence_put(entity->dependency);
249 return false;
250 }
251
252 /* Wait for fence to be scheduled */
253 entity->cb.func = amd_sched_entity_wakeup;
254 list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
255 return true;
256 }
257
258 if (!fence_add_callback(entity->dependency, &entity->cb,
259 amd_sched_entity_wakeup))
260 return true;
261
262 fence_put(entity->dependency);
263 return false;
264}
265
214static struct amd_sched_job * 266static struct amd_sched_job *
215amd_sched_entity_pop_job(struct amd_sched_entity *entity) 267amd_sched_entity_pop_job(struct amd_sched_entity *entity)
216{ 268{
217 struct amd_gpu_scheduler *sched = entity->sched; 269 struct amd_gpu_scheduler *sched = entity->sched;
218 struct amd_sched_job *sched_job; 270 struct amd_sched_job *sched_job;
219 271
220 if (ACCESS_ONCE(entity->dependency))
221 return NULL;
222
223 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) 272 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
224 return NULL; 273 return NULL;
225 274
226 while ((entity->dependency = sched->ops->dependency(sched_job))) { 275 while ((entity->dependency = sched->ops->dependency(sched_job)))
227 276 if (amd_sched_entity_add_dependency_cb(entity))
228 if (entity->dependency->context == entity->fence_context) {
229 /* We can ignore fences from ourself */
230 fence_put(entity->dependency);
231 continue;
232 }
233
234 if (fence_add_callback(entity->dependency, &entity->cb,
235 amd_sched_entity_wakeup))
236 fence_put(entity->dependency);
237 else
238 return NULL; 277 return NULL;
239 }
240 278
241 return sched_job; 279 return sched_job;
242} 280}
@@ -250,6 +288,7 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
250 */ 288 */
251static bool amd_sched_entity_in(struct amd_sched_job *sched_job) 289static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
252{ 290{
291 struct amd_gpu_scheduler *sched = sched_job->sched;
253 struct amd_sched_entity *entity = sched_job->s_entity; 292 struct amd_sched_entity *entity = sched_job->s_entity;
254 bool added, first = false; 293 bool added, first = false;
255 294
@@ -264,7 +303,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
264 303
265 /* first job wakes up scheduler */ 304 /* first job wakes up scheduler */
266 if (first) 305 if (first)
267 amd_sched_wakeup(sched_job->sched); 306 amd_sched_wakeup(sched);
268 307
269 return added; 308 return added;
270} 309}
@@ -280,9 +319,9 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
280{ 319{
281 struct amd_sched_entity *entity = sched_job->s_entity; 320 struct amd_sched_entity *entity = sched_job->s_entity;
282 321
322 trace_amd_sched_job(sched_job);
283 wait_event(entity->sched->job_scheduled, 323 wait_event(entity->sched->job_scheduled,
284 amd_sched_entity_in(sched_job)); 324 amd_sched_entity_in(sched_job));
285 trace_amd_sched_job(sched_job);
286} 325}
287 326
288/** 327/**
@@ -304,22 +343,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
304} 343}
305 344
306/** 345/**
307 * Select next to run 346 * Select next entity to process
308*/ 347*/
309static struct amd_sched_job * 348static struct amd_sched_entity *
310amd_sched_select_job(struct amd_gpu_scheduler *sched) 349amd_sched_select_entity(struct amd_gpu_scheduler *sched)
311{ 350{
312 struct amd_sched_job *sched_job; 351 struct amd_sched_entity *entity;
313 352
314 if (!amd_sched_ready(sched)) 353 if (!amd_sched_ready(sched))
315 return NULL; 354 return NULL;
316 355
317 /* Kernel run queue has higher priority than normal run queue*/ 356 /* Kernel run queue has higher priority than normal run queue*/
318 sched_job = amd_sched_rq_select_job(&sched->kernel_rq); 357 entity = amd_sched_rq_select_entity(&sched->kernel_rq);
319 if (sched_job == NULL) 358 if (entity == NULL)
320 sched_job = amd_sched_rq_select_job(&sched->sched_rq); 359 entity = amd_sched_rq_select_entity(&sched->sched_rq);
321 360
322 return sched_job; 361 return entity;
323} 362}
324 363
325static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) 364static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
@@ -381,13 +420,16 @@ static int amd_sched_main(void *param)
381 unsigned long flags; 420 unsigned long flags;
382 421
383 wait_event_interruptible(sched->wake_up_worker, 422 wait_event_interruptible(sched->wake_up_worker,
384 kthread_should_stop() || 423 (entity = amd_sched_select_entity(sched)) ||
385 (sched_job = amd_sched_select_job(sched))); 424 kthread_should_stop());
386 425
426 if (!entity)
427 continue;
428
429 sched_job = amd_sched_entity_pop_job(entity);
387 if (!sched_job) 430 if (!sched_job)
388 continue; 431 continue;
389 432
390 entity = sched_job->s_entity;
391 s_fence = sched_job->s_fence; 433 s_fence = sched_job->s_fence;
392 434
393 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { 435 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
@@ -400,6 +442,7 @@ static int amd_sched_main(void *param)
400 442
401 atomic_inc(&sched->hw_rq_count); 443 atomic_inc(&sched->hw_rq_count);
402 fence = sched->ops->run_job(sched_job); 444 fence = sched->ops->run_job(sched_job);
445 amd_sched_fence_scheduled(s_fence);
403 if (fence) { 446 if (fence) {
404 r = fence_add_callback(fence, &s_fence->cb, 447 r = fence_add_callback(fence, &s_fence->cb,
405 amd_sched_process_job); 448 amd_sched_process_job);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 939692b14f4b..a0f0ae53aacd 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -27,6 +27,8 @@
27#include <linux/kfifo.h> 27#include <linux/kfifo.h>
28#include <linux/fence.h> 28#include <linux/fence.h>
29 29
30#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS
31
30struct amd_gpu_scheduler; 32struct amd_gpu_scheduler;
31struct amd_sched_rq; 33struct amd_sched_rq;
32 34
@@ -68,6 +70,7 @@ struct amd_sched_rq {
68struct amd_sched_fence { 70struct amd_sched_fence {
69 struct fence base; 71 struct fence base;
70 struct fence_cb cb; 72 struct fence_cb cb;
73 struct list_head scheduled_cb;
71 struct amd_gpu_scheduler *sched; 74 struct amd_gpu_scheduler *sched;
72 spinlock_t lock; 75 spinlock_t lock;
73 void *owner; 76 void *owner;
@@ -134,7 +137,7 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
134 137
135struct amd_sched_fence *amd_sched_fence_create( 138struct amd_sched_fence *amd_sched_fence_create(
136 struct amd_sched_entity *s_entity, void *owner); 139 struct amd_sched_entity *s_entity, void *owner);
140void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
137void amd_sched_fence_signal(struct amd_sched_fence *fence); 141void amd_sched_fence_signal(struct amd_sched_fence *fence);
138 142
139
140#endif 143#endif
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 8d2130b9ff05..87c78eecea64 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -35,6 +35,8 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
35 fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); 35 fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
36 if (fence == NULL) 36 if (fence == NULL)
37 return NULL; 37 return NULL;
38
39 INIT_LIST_HEAD(&fence->scheduled_cb);
38 fence->owner = owner; 40 fence->owner = owner;
39 fence->sched = s_entity->sched; 41 fence->sched = s_entity->sched;
40 spin_lock_init(&fence->lock); 42 spin_lock_init(&fence->lock);
@@ -55,6 +57,17 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence)
55 FENCE_TRACE(&fence->base, "was already signaled\n"); 57 FENCE_TRACE(&fence->base, "was already signaled\n");
56} 58}
57 59
60void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
61{
62 struct fence_cb *cur, *tmp;
63
64 set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags);
65 list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) {
66 list_del_init(&cur->node);
67 cur->func(&s_fence->base, cur);
68 }
69}
70
58static const char *amd_sched_fence_get_driver_name(struct fence *fence) 71static const char *amd_sched_fence_get_driver_name(struct fence *fence)
59{ 72{
60 return "amd_sched"; 73 return "amd_sched";
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 9362609df38a..7dd6728dd092 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -160,6 +160,11 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
160 goto out_unlock; 160 goto out_unlock;
161 } 161 }
162 162
163 if (!file_priv->allowed_master) {
164 ret = drm_new_set_master(dev, file_priv);
165 goto out_unlock;
166 }
167
163 file_priv->minor->master = drm_master_get(file_priv->master); 168 file_priv->minor->master = drm_master_get(file_priv->master);
164 file_priv->is_master = 1; 169 file_priv->is_master = 1;
165 if (dev->driver->master_set) { 170 if (dev->driver->master_set) {
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index c59ce4d0ef75..6b5625e66119 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -126,6 +126,60 @@ static int drm_cpu_valid(void)
126} 126}
127 127
128/** 128/**
129 * drm_new_set_master - Allocate a new master object and become master for the
130 * associated master realm.
131 *
132 * @dev: The associated device.
133 * @fpriv: File private identifying the client.
134 *
135 * This function must be called with dev::struct_mutex held.
136 * Returns negative error code on failure. Zero on success.
137 */
138int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
139{
140 struct drm_master *old_master;
141 int ret;
142
143 lockdep_assert_held_once(&dev->master_mutex);
144
145 /* create a new master */
146 fpriv->minor->master = drm_master_create(fpriv->minor);
147 if (!fpriv->minor->master)
148 return -ENOMEM;
149
150 /* take another reference for the copy in the local file priv */
151 old_master = fpriv->master;
152 fpriv->master = drm_master_get(fpriv->minor->master);
153
154 if (dev->driver->master_create) {
155 ret = dev->driver->master_create(dev, fpriv->master);
156 if (ret)
157 goto out_err;
158 }
159 if (dev->driver->master_set) {
160 ret = dev->driver->master_set(dev, fpriv, true);
161 if (ret)
162 goto out_err;
163 }
164
165 fpriv->is_master = 1;
166 fpriv->allowed_master = 1;
167 fpriv->authenticated = 1;
168 if (old_master)
169 drm_master_put(&old_master);
170
171 return 0;
172
173out_err:
174 /* drop both references and restore old master on failure */
175 drm_master_put(&fpriv->minor->master);
176 drm_master_put(&fpriv->master);
177 fpriv->master = old_master;
178
179 return ret;
180}
181
182/**
129 * Called whenever a process opens /dev/drm. 183 * Called whenever a process opens /dev/drm.
130 * 184 *
131 * \param filp file pointer. 185 * \param filp file pointer.
@@ -189,35 +243,9 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
189 mutex_lock(&dev->master_mutex); 243 mutex_lock(&dev->master_mutex);
190 if (drm_is_primary_client(priv) && !priv->minor->master) { 244 if (drm_is_primary_client(priv) && !priv->minor->master) {
191 /* create a new master */ 245 /* create a new master */
192 priv->minor->master = drm_master_create(priv->minor); 246 ret = drm_new_set_master(dev, priv);
193 if (!priv->minor->master) { 247 if (ret)
194 ret = -ENOMEM;
195 goto out_close; 248 goto out_close;
196 }
197
198 priv->is_master = 1;
199 /* take another reference for the copy in the local file priv */
200 priv->master = drm_master_get(priv->minor->master);
201 priv->authenticated = 1;
202
203 if (dev->driver->master_create) {
204 ret = dev->driver->master_create(dev, priv->master);
205 if (ret) {
206 /* drop both references if this fails */
207 drm_master_put(&priv->minor->master);
208 drm_master_put(&priv->master);
209 goto out_close;
210 }
211 }
212 if (dev->driver->master_set) {
213 ret = dev->driver->master_set(dev, priv, true);
214 if (ret) {
215 /* drop both references if this fails */
216 drm_master_put(&priv->minor->master);
217 drm_master_put(&priv->master);
218 goto out_close;
219 }
220 }
221 } else if (drm_is_primary_client(priv)) { 249 } else if (drm_is_primary_client(priv)) {
222 /* get a reference to the master */ 250 /* get a reference to the master */
223 priv->master = drm_master_get(priv->minor->master); 251 priv->master = drm_master_get(priv->minor->master);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 2151ea551d3b..607f493ae801 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -980,7 +980,8 @@ static void send_vblank_event(struct drm_device *dev,
980 struct drm_pending_vblank_event *e, 980 struct drm_pending_vblank_event *e,
981 unsigned long seq, struct timeval *now) 981 unsigned long seq, struct timeval *now)
982{ 982{
983 WARN_ON_SMP(!spin_is_locked(&dev->event_lock)); 983 assert_spin_locked(&dev->event_lock);
984
984 e->event.sequence = seq; 985 e->event.sequence = seq;
985 e->event.tv_sec = now->tv_sec; 986 e->event.tv_sec = now->tv_sec;
986 e->event.tv_usec = now->tv_usec; 987 e->event.tv_usec = now->tv_usec;
@@ -993,6 +994,57 @@ static void send_vblank_event(struct drm_device *dev,
993} 994}
994 995
995/** 996/**
997 * drm_arm_vblank_event - arm vblank event after pageflip
998 * @dev: DRM device
999 * @pipe: CRTC index
1000 * @e: the event to prepare to send
1001 *
1002 * A lot of drivers need to generate vblank events for the very next vblank
1003 * interrupt. For example when the page flip interrupt happens when the page
1004 * flip gets armed, but not when it actually executes within the next vblank
1005 * period. This helper function implements exactly the required vblank arming
1006 * behaviour.
1007 *
1008 * Caller must hold event lock. Caller must also hold a vblank reference for
1009 * the event @e, which will be dropped when the next vblank arrives.
1010 *
1011 * This is the legacy version of drm_crtc_arm_vblank_event().
1012 */
1013void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
1014 struct drm_pending_vblank_event *e)
1015{
1016 assert_spin_locked(&dev->event_lock);
1017
1018 e->pipe = pipe;
1019 e->event.sequence = drm_vblank_count(dev, pipe);
1020 list_add_tail(&e->base.link, &dev->vblank_event_list);
1021}
1022EXPORT_SYMBOL(drm_arm_vblank_event);
1023
1024/**
1025 * drm_crtc_arm_vblank_event - arm vblank event after pageflip
1026 * @crtc: the source CRTC of the vblank event
1027 * @e: the event to send
1028 *
1029 * A lot of drivers need to generate vblank events for the very next vblank
1030 * interrupt. For example when the page flip interrupt happens when the page
1031 * flip gets armed, but not when it actually executes within the next vblank
1032 * period. This helper function implements exactly the required vblank arming
1033 * behaviour.
1034 *
1035 * Caller must hold event lock. Caller must also hold a vblank reference for
1036 * the event @e, which will be dropped when the next vblank arrives.
1037 *
1038 * This is the native KMS version of drm_arm_vblank_event().
1039 */
1040void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
1041 struct drm_pending_vblank_event *e)
1042{
1043 drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
1044}
1045EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
1046
1047/**
996 * drm_send_vblank_event - helper to send vblank event after pageflip 1048 * drm_send_vblank_event - helper to send vblank event after pageflip
997 * @dev: DRM device 1049 * @dev: DRM device
998 * @pipe: CRTC index 1050 * @pipe: CRTC index
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index a18164f2f6d2..f8b5fcfa91a2 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -229,7 +229,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
229 mode_flags |= DRM_MODE_FLAG_3D_MASK; 229 mode_flags |= DRM_MODE_FLAG_3D_MASK;
230 230
231 list_for_each_entry(mode, &connector->modes, head) { 231 list_for_each_entry(mode, &connector->modes, head) {
232 mode->status = drm_mode_validate_basic(mode); 232 if (mode->status == MODE_OK)
233 mode->status = drm_mode_validate_basic(mode);
233 234
234 if (mode->status == MODE_OK) 235 if (mode->status == MODE_OK)
235 mode->status = drm_mode_validate_size(mode, maxX, maxY); 236 mode->status = drm_mode_validate_size(mode, maxX, maxY);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index b3ba27fd9a6b..e69357172ffb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -55,6 +55,9 @@ static int exynos_crtc_atomic_check(struct drm_crtc *crtc,
55{ 55{
56 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 56 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
57 57
58 if (!state->enable)
59 return 0;
60
58 if (exynos_crtc->ops->atomic_check) 61 if (exynos_crtc->ops->atomic_check)
59 return exynos_crtc->ops->atomic_check(exynos_crtc, state); 62 return exynos_crtc->ops->atomic_check(exynos_crtc, state);
60 63
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a3b22bdacd44..8aab974b0564 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2734,6 +2734,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
2734 return "AUX_C"; 2734 return "AUX_C";
2735 case POWER_DOMAIN_AUX_D: 2735 case POWER_DOMAIN_AUX_D:
2736 return "AUX_D"; 2736 return "AUX_D";
2737 case POWER_DOMAIN_GMBUS:
2738 return "GMBUS";
2737 case POWER_DOMAIN_INIT: 2739 case POWER_DOMAIN_INIT:
2738 return "INIT"; 2740 return "INIT";
2739 default: 2741 default:
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 95bb27de774f..f4af19a0d569 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -199,6 +199,7 @@ enum intel_display_power_domain {
199 POWER_DOMAIN_AUX_B, 199 POWER_DOMAIN_AUX_B,
200 POWER_DOMAIN_AUX_C, 200 POWER_DOMAIN_AUX_C,
201 POWER_DOMAIN_AUX_D, 201 POWER_DOMAIN_AUX_D,
202 POWER_DOMAIN_GMBUS,
202 POWER_DOMAIN_INIT, 203 POWER_DOMAIN_INIT,
203 204
204 POWER_DOMAIN_NUM, 205 POWER_DOMAIN_NUM,
@@ -2192,8 +2193,17 @@ struct drm_i915_gem_request {
2192 struct drm_i915_private *i915; 2193 struct drm_i915_private *i915;
2193 struct intel_engine_cs *ring; 2194 struct intel_engine_cs *ring;
2194 2195
2195 /** GEM sequence number associated with this request. */ 2196 /** GEM sequence number associated with the previous request,
2196 uint32_t seqno; 2197 * when the HWS breadcrumb is equal to this the GPU is processing
2198 * this request.
2199 */
2200 u32 previous_seqno;
2201
2202 /** GEM sequence number associated with this request,
2203 * when the HWS breadcrumb is equal or greater than this the GPU
2204 * has finished processing this request.
2205 */
2206 u32 seqno;
2197 2207
2198 /** Position in the ringbuffer of the start of the request */ 2208 /** Position in the ringbuffer of the start of the request */
2199 u32 head; 2209 u32 head;
@@ -2838,6 +2848,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
2838 2848
2839int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 2849int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
2840 u32 flags); 2850 u32 flags);
2851void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
2841int __must_check i915_vma_unbind(struct i915_vma *vma); 2852int __must_check i915_vma_unbind(struct i915_vma *vma);
2842/* 2853/*
2843 * BEWARE: Do not use the function below unless you can _absolutely_ 2854 * BEWARE: Do not use the function below unless you can _absolutely_
@@ -2909,15 +2920,17 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
2909 return (int32_t)(seq1 - seq2) >= 0; 2920 return (int32_t)(seq1 - seq2) >= 0;
2910} 2921}
2911 2922
2923static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
2924 bool lazy_coherency)
2925{
2926 u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
2927 return i915_seqno_passed(seqno, req->previous_seqno);
2928}
2929
2912static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, 2930static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
2913 bool lazy_coherency) 2931 bool lazy_coherency)
2914{ 2932{
2915 u32 seqno; 2933 u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
2916
2917 BUG_ON(req == NULL);
2918
2919 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
2920
2921 return i915_seqno_passed(seqno, req->seqno); 2934 return i915_seqno_passed(seqno, req->seqno);
2922} 2935}
2923 2936
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 91bb1fc27420..f56af0aaafde 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1146,23 +1146,74 @@ static bool missed_irq(struct drm_i915_private *dev_priv,
1146 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); 1146 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1147} 1147}
1148 1148
1149static int __i915_spin_request(struct drm_i915_gem_request *req) 1149static unsigned long local_clock_us(unsigned *cpu)
1150{
1151 unsigned long t;
1152
1153 /* Cheaply and approximately convert from nanoseconds to microseconds.
1154 * The result and subsequent calculations are also defined in the same
1155 * approximate microseconds units. The principal source of timing
1156 * error here is from the simple truncation.
1157 *
1158 * Note that local_clock() is only defined wrt to the current CPU;
1159 * the comparisons are no longer valid if we switch CPUs. Instead of
1160 * blocking preemption for the entire busywait, we can detect the CPU
1161 * switch and use that as indicator of system load and a reason to
1162 * stop busywaiting, see busywait_stop().
1163 */
1164 *cpu = get_cpu();
1165 t = local_clock() >> 10;
1166 put_cpu();
1167
1168 return t;
1169}
1170
1171static bool busywait_stop(unsigned long timeout, unsigned cpu)
1172{
1173 unsigned this_cpu;
1174
1175 if (time_after(local_clock_us(&this_cpu), timeout))
1176 return true;
1177
1178 return this_cpu != cpu;
1179}
1180
1181static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
1150{ 1182{
1151 unsigned long timeout; 1183 unsigned long timeout;
1184 unsigned cpu;
1185
1186 /* When waiting for high frequency requests, e.g. during synchronous
1187 * rendering split between the CPU and GPU, the finite amount of time
1188 * required to set up the irq and wait upon it limits the response
1189 * rate. By busywaiting on the request completion for a short while we
1190 * can service the high frequency waits as quick as possible. However,
1191 * if it is a slow request, we want to sleep as quickly as possible.
1192 * The tradeoff between waiting and sleeping is roughly the time it
1193 * takes to sleep on a request, on the order of a microsecond.
1194 */
1152 1195
1153 if (i915_gem_request_get_ring(req)->irq_refcount) 1196 if (req->ring->irq_refcount)
1154 return -EBUSY; 1197 return -EBUSY;
1155 1198
1156 timeout = jiffies + 1; 1199 /* Only spin if we know the GPU is processing this request */
1200 if (!i915_gem_request_started(req, true))
1201 return -EAGAIN;
1202
1203 timeout = local_clock_us(&cpu) + 5;
1157 while (!need_resched()) { 1204 while (!need_resched()) {
1158 if (i915_gem_request_completed(req, true)) 1205 if (i915_gem_request_completed(req, true))
1159 return 0; 1206 return 0;
1160 1207
1161 if (time_after_eq(jiffies, timeout)) 1208 if (signal_pending_state(state, current))
1209 break;
1210
1211 if (busywait_stop(timeout, cpu))
1162 break; 1212 break;
1163 1213
1164 cpu_relax_lowlatency(); 1214 cpu_relax_lowlatency();
1165 } 1215 }
1216
1166 if (i915_gem_request_completed(req, false)) 1217 if (i915_gem_request_completed(req, false))
1167 return 0; 1218 return 0;
1168 1219
@@ -1197,6 +1248,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1197 struct drm_i915_private *dev_priv = dev->dev_private; 1248 struct drm_i915_private *dev_priv = dev->dev_private;
1198 const bool irq_test_in_progress = 1249 const bool irq_test_in_progress =
1199 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); 1250 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1251 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1200 DEFINE_WAIT(wait); 1252 DEFINE_WAIT(wait);
1201 unsigned long timeout_expire; 1253 unsigned long timeout_expire;
1202 s64 before, now; 1254 s64 before, now;
@@ -1210,8 +1262,16 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1210 if (i915_gem_request_completed(req, true)) 1262 if (i915_gem_request_completed(req, true))
1211 return 0; 1263 return 0;
1212 1264
1213 timeout_expire = timeout ? 1265 timeout_expire = 0;
1214 jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0; 1266 if (timeout) {
1267 if (WARN_ON(*timeout < 0))
1268 return -EINVAL;
1269
1270 if (*timeout == 0)
1271 return -ETIME;
1272
1273 timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
1274 }
1215 1275
1216 if (INTEL_INFO(dev_priv)->gen >= 6) 1276 if (INTEL_INFO(dev_priv)->gen >= 6)
1217 gen6_rps_boost(dev_priv, rps, req->emitted_jiffies); 1277 gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
@@ -1221,7 +1281,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1221 before = ktime_get_raw_ns(); 1281 before = ktime_get_raw_ns();
1222 1282
1223 /* Optimistic spin for the next jiffie before touching IRQs */ 1283 /* Optimistic spin for the next jiffie before touching IRQs */
1224 ret = __i915_spin_request(req); 1284 ret = __i915_spin_request(req, state);
1225 if (ret == 0) 1285 if (ret == 0)
1226 goto out; 1286 goto out;
1227 1287
@@ -1233,8 +1293,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1233 for (;;) { 1293 for (;;) {
1234 struct timer_list timer; 1294 struct timer_list timer;
1235 1295
1236 prepare_to_wait(&ring->irq_queue, &wait, 1296 prepare_to_wait(&ring->irq_queue, &wait, state);
1237 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1238 1297
1239 /* We need to check whether any gpu reset happened in between 1298 /* We need to check whether any gpu reset happened in between
1240 * the caller grabbing the seqno and now ... */ 1299 * the caller grabbing the seqno and now ... */
@@ -1252,7 +1311,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1252 break; 1311 break;
1253 } 1312 }
1254 1313
1255 if (interruptible && signal_pending(current)) { 1314 if (signal_pending_state(state, current)) {
1256 ret = -ERESTARTSYS; 1315 ret = -ERESTARTSYS;
1257 break; 1316 break;
1258 } 1317 }
@@ -2546,6 +2605,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2546 request->batch_obj = obj; 2605 request->batch_obj = obj;
2547 2606
2548 request->emitted_jiffies = jiffies; 2607 request->emitted_jiffies = jiffies;
2608 request->previous_seqno = ring->last_submitted_seqno;
2549 ring->last_submitted_seqno = request->seqno; 2609 ring->last_submitted_seqno = request->seqno;
2550 list_add_tail(&request->list, &ring->request_list); 2610 list_add_tail(&request->list, &ring->request_list);
2551 2611
@@ -4072,6 +4132,29 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4072 return false; 4132 return false;
4073} 4133}
4074 4134
4135void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
4136{
4137 struct drm_i915_gem_object *obj = vma->obj;
4138 bool mappable, fenceable;
4139 u32 fence_size, fence_alignment;
4140
4141 fence_size = i915_gem_get_gtt_size(obj->base.dev,
4142 obj->base.size,
4143 obj->tiling_mode);
4144 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4145 obj->base.size,
4146 obj->tiling_mode,
4147 true);
4148
4149 fenceable = (vma->node.size == fence_size &&
4150 (vma->node.start & (fence_alignment - 1)) == 0);
4151
4152 mappable = (vma->node.start + fence_size <=
4153 to_i915(obj->base.dev)->gtt.mappable_end);
4154
4155 obj->map_and_fenceable = mappable && fenceable;
4156}
4157
4075static int 4158static int
4076i915_gem_object_do_pin(struct drm_i915_gem_object *obj, 4159i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
4077 struct i915_address_space *vm, 4160 struct i915_address_space *vm,
@@ -4139,25 +4222,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
4139 4222
4140 if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL && 4223 if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
4141 (bound ^ vma->bound) & GLOBAL_BIND) { 4224 (bound ^ vma->bound) & GLOBAL_BIND) {
4142 bool mappable, fenceable; 4225 __i915_vma_set_map_and_fenceable(vma);
4143 u32 fence_size, fence_alignment;
4144
4145 fence_size = i915_gem_get_gtt_size(obj->base.dev,
4146 obj->base.size,
4147 obj->tiling_mode);
4148 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4149 obj->base.size,
4150 obj->tiling_mode,
4151 true);
4152
4153 fenceable = (vma->node.size == fence_size &&
4154 (vma->node.start & (fence_alignment - 1)) == 0);
4155
4156 mappable = (vma->node.start + fence_size <=
4157 dev_priv->gtt.mappable_end);
4158
4159 obj->map_and_fenceable = mappable && fenceable;
4160
4161 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); 4226 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
4162 } 4227 }
4163 4228
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8c688a5f1589..02ceb7a4b481 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -141,8 +141,6 @@ static void i915_gem_context_clean(struct intel_context *ctx)
141 if (!ppgtt) 141 if (!ppgtt)
142 return; 142 return;
143 143
144 WARN_ON(!list_empty(&ppgtt->base.active_list));
145
146 list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list, 144 list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
147 mm_list) { 145 mm_list) {
148 if (WARN_ON(__i915_vma_unbind_no_wait(vma))) 146 if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 40a10b25956c..f010391b87f5 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -642,11 +642,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
642 } 642 }
643 643
644 /* check for L-shaped memory aka modified enhanced addressing */ 644 /* check for L-shaped memory aka modified enhanced addressing */
645 if (IS_GEN4(dev)) { 645 if (IS_GEN4(dev) &&
646 uint32_t ddc2 = I915_READ(DCC2); 646 !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
647 647 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
648 if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) 648 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
649 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
650 } 649 }
651 650
652 if (dcc == 0xffffffff) { 651 if (dcc == 0xffffffff) {
@@ -675,16 +674,35 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
675 * matching, which was the case for the swizzling required in 674 * matching, which was the case for the swizzling required in
676 * the table above, or from the 1-ch value being less than 675 * the table above, or from the 1-ch value being less than
677 * the minimum size of a rank. 676 * the minimum size of a rank.
677 *
678 * Reports indicate that the swizzling actually
679 * varies depending upon page placement inside the
680 * channels, i.e. we see swizzled pages where the
681 * banks of memory are paired and unswizzled on the
682 * uneven portion, so leave that as unknown.
678 */ 683 */
679 if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { 684 if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
680 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
681 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
682 } else {
683 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 685 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
684 swizzle_y = I915_BIT_6_SWIZZLE_9; 686 swizzle_y = I915_BIT_6_SWIZZLE_9;
685 } 687 }
686 } 688 }
687 689
690 if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
691 swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
692 /* Userspace likes to explode if it sees unknown swizzling,
693 * so lie. We will finish the lie when reporting through
694 * the get-tiling-ioctl by reporting the physical swizzle
695 * mode as unknown instead.
696 *
697 * As we don't strictly know what the swizzling is, it may be
698 * bit17 dependent, and so we need to also prevent the pages
699 * from being moved.
700 */
701 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
702 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
703 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
704 }
705
688 dev_priv->mm.bit_6_swizzle_x = swizzle_x; 706 dev_priv->mm.bit_6_swizzle_x = swizzle_x;
689 dev_priv->mm.bit_6_swizzle_y = swizzle_y; 707 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
690} 708}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 43f35d12b677..86c7500454b4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2676,6 +2676,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
2676 return ret; 2676 return ret;
2677 } 2677 }
2678 vma->bound |= GLOBAL_BIND; 2678 vma->bound |= GLOBAL_BIND;
2679 __i915_vma_set_map_and_fenceable(vma);
2679 list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list); 2680 list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
2680 } 2681 }
2681 2682
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index cdacf3f5b77a..87e919a06b27 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -687,6 +687,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
687 } 687 }
688 688
689 vma->bound |= GLOBAL_BIND; 689 vma->bound |= GLOBAL_BIND;
690 __i915_vma_set_map_and_fenceable(vma);
690 list_add_tail(&vma->mm_list, &ggtt->inactive_list); 691 list_add_tail(&vma->mm_list, &ggtt->inactive_list);
691 } 692 }
692 693
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 71860f8680f9..32cf97346978 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -116,6 +116,7 @@ static void skylake_pfit_enable(struct intel_crtc *crtc);
116static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); 116static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
117static void ironlake_pfit_enable(struct intel_crtc *crtc); 117static void ironlake_pfit_enable(struct intel_crtc *crtc);
118static void intel_modeset_setup_hw_state(struct drm_device *dev); 118static void intel_modeset_setup_hw_state(struct drm_device *dev);
119static void intel_pre_disable_primary(struct drm_crtc *crtc);
119 120
120typedef struct { 121typedef struct {
121 int min, max; 122 int min, max;
@@ -2607,6 +2608,8 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2607 struct drm_i915_gem_object *obj; 2608 struct drm_i915_gem_object *obj;
2608 struct drm_plane *primary = intel_crtc->base.primary; 2609 struct drm_plane *primary = intel_crtc->base.primary;
2609 struct drm_plane_state *plane_state = primary->state; 2610 struct drm_plane_state *plane_state = primary->state;
2611 struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2612 struct intel_plane *intel_plane = to_intel_plane(primary);
2610 struct drm_framebuffer *fb; 2613 struct drm_framebuffer *fb;
2611 2614
2612 if (!plane_config->fb) 2615 if (!plane_config->fb)
@@ -2643,6 +2646,18 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2643 } 2646 }
2644 } 2647 }
2645 2648
2649 /*
2650 * We've failed to reconstruct the BIOS FB. Current display state
2651 * indicates that the primary plane is visible, but has a NULL FB,
2652 * which will lead to problems later if we don't fix it up. The
2653 * simplest solution is to just disable the primary plane now and
2654 * pretend the BIOS never had it enabled.
2655 */
2656 to_intel_plane_state(plane_state)->visible = false;
2657 crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2658 intel_pre_disable_primary(&intel_crtc->base);
2659 intel_plane->disable_plane(primary, &intel_crtc->base);
2660
2646 return; 2661 return;
2647 2662
2648valid_fb: 2663valid_fb:
@@ -5194,11 +5209,31 @@ static enum intel_display_power_domain port_to_power_domain(enum port port)
5194 case PORT_E: 5209 case PORT_E:
5195 return POWER_DOMAIN_PORT_DDI_E_2_LANES; 5210 return POWER_DOMAIN_PORT_DDI_E_2_LANES;
5196 default: 5211 default:
5197 WARN_ON_ONCE(1); 5212 MISSING_CASE(port);
5198 return POWER_DOMAIN_PORT_OTHER; 5213 return POWER_DOMAIN_PORT_OTHER;
5199 } 5214 }
5200} 5215}
5201 5216
5217static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5218{
5219 switch (port) {
5220 case PORT_A:
5221 return POWER_DOMAIN_AUX_A;
5222 case PORT_B:
5223 return POWER_DOMAIN_AUX_B;
5224 case PORT_C:
5225 return POWER_DOMAIN_AUX_C;
5226 case PORT_D:
5227 return POWER_DOMAIN_AUX_D;
5228 case PORT_E:
5229 /* FIXME: Check VBT for actual wiring of PORT E */
5230 return POWER_DOMAIN_AUX_D;
5231 default:
5232 MISSING_CASE(port);
5233 return POWER_DOMAIN_AUX_A;
5234 }
5235}
5236
5202#define for_each_power_domain(domain, mask) \ 5237#define for_each_power_domain(domain, mask) \
5203 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 5238 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
5204 if ((1 << (domain)) & (mask)) 5239 if ((1 << (domain)) & (mask))
@@ -5230,6 +5265,36 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5230 } 5265 }
5231} 5266}
5232 5267
5268enum intel_display_power_domain
5269intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5270{
5271 struct drm_device *dev = intel_encoder->base.dev;
5272 struct intel_digital_port *intel_dig_port;
5273
5274 switch (intel_encoder->type) {
5275 case INTEL_OUTPUT_UNKNOWN:
5276 case INTEL_OUTPUT_HDMI:
5277 /*
5278 * Only DDI platforms should ever use these output types.
5279 * We can get here after the HDMI detect code has already set
5280 * the type of the shared encoder. Since we can't be sure
5281 * what's the status of the given connectors, play safe and
5282 * run the DP detection too.
5283 */
5284 WARN_ON_ONCE(!HAS_DDI(dev));
5285 case INTEL_OUTPUT_DISPLAYPORT:
5286 case INTEL_OUTPUT_EDP:
5287 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5288 return port_to_aux_power_domain(intel_dig_port->port);
5289 case INTEL_OUTPUT_DP_MST:
5290 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5291 return port_to_aux_power_domain(intel_dig_port->port);
5292 default:
5293 MISSING_CASE(intel_encoder->type);
5294 return POWER_DOMAIN_AUX_A;
5295 }
5296}
5297
5233static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) 5298static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
5234{ 5299{
5235 struct drm_device *dev = crtc->dev; 5300 struct drm_device *dev = crtc->dev;
@@ -6259,9 +6324,11 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6259 if (to_intel_plane_state(crtc->primary->state)->visible) { 6324 if (to_intel_plane_state(crtc->primary->state)->visible) {
6260 intel_crtc_wait_for_pending_flips(crtc); 6325 intel_crtc_wait_for_pending_flips(crtc);
6261 intel_pre_disable_primary(crtc); 6326 intel_pre_disable_primary(crtc);
6327
6328 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6329 to_intel_plane_state(crtc->primary->state)->visible = false;
6262 } 6330 }
6263 6331
6264 intel_crtc_disable_planes(crtc, crtc->state->plane_mask);
6265 dev_priv->display.crtc_disable(crtc); 6332 dev_priv->display.crtc_disable(crtc);
6266 intel_crtc->active = false; 6333 intel_crtc->active = false;
6267 intel_update_watermarks(crtc); 6334 intel_update_watermarks(crtc);
@@ -9858,14 +9925,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9858 return true; 9925 return true;
9859} 9926}
9860 9927
9861static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 9928static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
9862{ 9929{
9863 struct drm_device *dev = crtc->dev; 9930 struct drm_device *dev = crtc->dev;
9864 struct drm_i915_private *dev_priv = dev->dev_private; 9931 struct drm_i915_private *dev_priv = dev->dev_private;
9865 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9932 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9866 uint32_t cntl = 0, size = 0; 9933 uint32_t cntl = 0, size = 0;
9867 9934
9868 if (base) { 9935 if (on) {
9869 unsigned int width = intel_crtc->base.cursor->state->crtc_w; 9936 unsigned int width = intel_crtc->base.cursor->state->crtc_w;
9870 unsigned int height = intel_crtc->base.cursor->state->crtc_h; 9937 unsigned int height = intel_crtc->base.cursor->state->crtc_h;
9871 unsigned int stride = roundup_pow_of_two(width) * 4; 9938 unsigned int stride = roundup_pow_of_two(width) * 4;
@@ -9920,16 +9987,15 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
9920 } 9987 }
9921} 9988}
9922 9989
9923static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) 9990static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
9924{ 9991{
9925 struct drm_device *dev = crtc->dev; 9992 struct drm_device *dev = crtc->dev;
9926 struct drm_i915_private *dev_priv = dev->dev_private; 9993 struct drm_i915_private *dev_priv = dev->dev_private;
9927 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9994 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9928 int pipe = intel_crtc->pipe; 9995 int pipe = intel_crtc->pipe;
9929 uint32_t cntl; 9996 uint32_t cntl = 0;
9930 9997
9931 cntl = 0; 9998 if (on) {
9932 if (base) {
9933 cntl = MCURSOR_GAMMA_ENABLE; 9999 cntl = MCURSOR_GAMMA_ENABLE;
9934 switch (intel_crtc->base.cursor->state->crtc_w) { 10000 switch (intel_crtc->base.cursor->state->crtc_w) {
9935 case 64: 10001 case 64:
@@ -9980,18 +10046,17 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
9980 int y = cursor_state->crtc_y; 10046 int y = cursor_state->crtc_y;
9981 u32 base = 0, pos = 0; 10047 u32 base = 0, pos = 0;
9982 10048
9983 if (on) 10049 base = intel_crtc->cursor_addr;
9984 base = intel_crtc->cursor_addr;
9985 10050
9986 if (x >= intel_crtc->config->pipe_src_w) 10051 if (x >= intel_crtc->config->pipe_src_w)
9987 base = 0; 10052 on = false;
9988 10053
9989 if (y >= intel_crtc->config->pipe_src_h) 10054 if (y >= intel_crtc->config->pipe_src_h)
9990 base = 0; 10055 on = false;
9991 10056
9992 if (x < 0) { 10057 if (x < 0) {
9993 if (x + cursor_state->crtc_w <= 0) 10058 if (x + cursor_state->crtc_w <= 0)
9994 base = 0; 10059 on = false;
9995 10060
9996 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 10061 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
9997 x = -x; 10062 x = -x;
@@ -10000,16 +10065,13 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
10000 10065
10001 if (y < 0) { 10066 if (y < 0) {
10002 if (y + cursor_state->crtc_h <= 0) 10067 if (y + cursor_state->crtc_h <= 0)
10003 base = 0; 10068 on = false;
10004 10069
10005 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 10070 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10006 y = -y; 10071 y = -y;
10007 } 10072 }
10008 pos |= y << CURSOR_Y_SHIFT; 10073 pos |= y << CURSOR_Y_SHIFT;
10009 10074
10010 if (base == 0 && intel_crtc->cursor_base == 0)
10011 return;
10012
10013 I915_WRITE(CURPOS(pipe), pos); 10075 I915_WRITE(CURPOS(pipe), pos);
10014 10076
10015 /* ILK+ do this automagically */ 10077 /* ILK+ do this automagically */
@@ -10020,9 +10082,9 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
10020 } 10082 }
10021 10083
10022 if (IS_845G(dev) || IS_I865G(dev)) 10084 if (IS_845G(dev) || IS_I865G(dev))
10023 i845_update_cursor(crtc, base); 10085 i845_update_cursor(crtc, base, on);
10024 else 10086 else
10025 i9xx_update_cursor(crtc, base); 10087 i9xx_update_cursor(crtc, base, on);
10026} 10088}
10027 10089
10028static bool cursor_size_ok(struct drm_device *dev, 10090static bool cursor_size_ok(struct drm_device *dev,
@@ -12061,18 +12123,22 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
12061static bool check_digital_port_conflicts(struct drm_atomic_state *state) 12123static bool check_digital_port_conflicts(struct drm_atomic_state *state)
12062{ 12124{
12063 struct drm_device *dev = state->dev; 12125 struct drm_device *dev = state->dev;
12064 struct intel_encoder *encoder;
12065 struct drm_connector *connector; 12126 struct drm_connector *connector;
12066 struct drm_connector_state *connector_state;
12067 unsigned int used_ports = 0; 12127 unsigned int used_ports = 0;
12068 int i;
12069 12128
12070 /* 12129 /*
12071 * Walk the connector list instead of the encoder 12130 * Walk the connector list instead of the encoder
12072 * list to detect the problem on ddi platforms 12131 * list to detect the problem on ddi platforms
12073 * where there's just one encoder per digital port. 12132 * where there's just one encoder per digital port.
12074 */ 12133 */
12075 for_each_connector_in_state(state, connector, connector_state, i) { 12134 drm_for_each_connector(connector, dev) {
12135 struct drm_connector_state *connector_state;
12136 struct intel_encoder *encoder;
12137
12138 connector_state = drm_atomic_get_existing_connector_state(state, connector);
12139 if (!connector_state)
12140 connector_state = connector->state;
12141
12076 if (!connector_state->best_encoder) 12142 if (!connector_state->best_encoder)
12077 continue; 12143 continue;
12078 12144
@@ -12460,7 +12526,6 @@ intel_pipe_config_compare(struct drm_device *dev,
12460 if (INTEL_INFO(dev)->gen < 8) { 12526 if (INTEL_INFO(dev)->gen < 8) {
12461 PIPE_CONF_CHECK_M_N(dp_m_n); 12527 PIPE_CONF_CHECK_M_N(dp_m_n);
12462 12528
12463 PIPE_CONF_CHECK_I(has_drrs);
12464 if (current_config->has_drrs) 12529 if (current_config->has_drrs)
12465 PIPE_CONF_CHECK_M_N(dp_m2_n2); 12530 PIPE_CONF_CHECK_M_N(dp_m2_n2);
12466 } else 12531 } else
@@ -13667,6 +13732,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
13667 struct drm_crtc *crtc = crtc_state->base.crtc; 13732 struct drm_crtc *crtc = crtc_state->base.crtc;
13668 struct drm_framebuffer *fb = state->base.fb; 13733 struct drm_framebuffer *fb = state->base.fb;
13669 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13734 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13735 enum pipe pipe = to_intel_plane(plane)->pipe;
13670 unsigned stride; 13736 unsigned stride;
13671 int ret; 13737 int ret;
13672 13738
@@ -13700,6 +13766,22 @@ intel_check_cursor_plane(struct drm_plane *plane,
13700 return -EINVAL; 13766 return -EINVAL;
13701 } 13767 }
13702 13768
13769 /*
13770 * There's something wrong with the cursor on CHV pipe C.
13771 * If it straddles the left edge of the screen then
13772 * moving it away from the edge or disabling it often
13773 * results in a pipe underrun, and often that can lead to
13774 * dead pipe (constant underrun reported, and it scans
13775 * out just a solid color). To recover from that, the
13776 * display power well must be turned off and on again.
13777 * Refuse the put the cursor into that compromised position.
13778 */
13779 if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
13780 state->visible && state->base.crtc_x < 0) {
13781 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
13782 return -EINVAL;
13783 }
13784
13703 return 0; 13785 return 0;
13704} 13786}
13705 13787
@@ -13723,9 +13805,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
13723 crtc = crtc ? crtc : plane->crtc; 13805 crtc = crtc ? crtc : plane->crtc;
13724 intel_crtc = to_intel_crtc(crtc); 13806 intel_crtc = to_intel_crtc(crtc);
13725 13807
13726 if (intel_crtc->cursor_bo == obj)
13727 goto update;
13728
13729 if (!obj) 13808 if (!obj)
13730 addr = 0; 13809 addr = 0;
13731 else if (!INTEL_INFO(dev)->cursor_needs_physical) 13810 else if (!INTEL_INFO(dev)->cursor_needs_physical)
@@ -13734,9 +13813,7 @@ intel_commit_cursor_plane(struct drm_plane *plane,
13734 addr = obj->phys_handle->busaddr; 13813 addr = obj->phys_handle->busaddr;
13735 13814
13736 intel_crtc->cursor_addr = addr; 13815 intel_crtc->cursor_addr = addr;
13737 intel_crtc->cursor_bo = obj;
13738 13816
13739update:
13740 if (crtc->state->active) 13817 if (crtc->state->active)
13741 intel_crtc_update_cursor(crtc, state->visible); 13818 intel_crtc_update_cursor(crtc, state->visible);
13742} 13819}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 09bdd94ca3ba..78b8ec84d576 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -277,7 +277,7 @@ static void pps_lock(struct intel_dp *intel_dp)
277 * See vlv_power_sequencer_reset() why we need 277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here. 278 * a power domain reference here.
279 */ 279 */
280 power_domain = intel_display_port_power_domain(encoder); 280 power_domain = intel_display_port_aux_power_domain(encoder);
281 intel_display_power_get(dev_priv, power_domain); 281 intel_display_power_get(dev_priv, power_domain);
282 282
283 mutex_lock(&dev_priv->pps_mutex); 283 mutex_lock(&dev_priv->pps_mutex);
@@ -293,7 +293,7 @@ static void pps_unlock(struct intel_dp *intel_dp)
293 293
294 mutex_unlock(&dev_priv->pps_mutex); 294 mutex_unlock(&dev_priv->pps_mutex);
295 295
296 power_domain = intel_display_port_power_domain(encoder); 296 power_domain = intel_display_port_aux_power_domain(encoder);
297 intel_display_power_put(dev_priv, power_domain); 297 intel_display_power_put(dev_priv, power_domain);
298} 298}
299 299
@@ -816,8 +816,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
816 816
817 intel_dp_check_edp(intel_dp); 817 intel_dp_check_edp(intel_dp);
818 818
819 intel_aux_display_runtime_get(dev_priv);
820
821 /* Try to wait for any previous AUX channel activity */ 819 /* Try to wait for any previous AUX channel activity */
822 for (try = 0; try < 3; try++) { 820 for (try = 0; try < 3; try++) {
823 status = I915_READ_NOTRACE(ch_ctl); 821 status = I915_READ_NOTRACE(ch_ctl);
@@ -926,7 +924,6 @@ done:
926 ret = recv_bytes; 924 ret = recv_bytes;
927out: 925out:
928 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); 926 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
929 intel_aux_display_runtime_put(dev_priv);
930 927
931 if (vdd) 928 if (vdd)
932 edp_panel_vdd_off(intel_dp, false); 929 edp_panel_vdd_off(intel_dp, false);
@@ -1784,7 +1781,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1784 if (edp_have_panel_vdd(intel_dp)) 1781 if (edp_have_panel_vdd(intel_dp))
1785 return need_to_disable; 1782 return need_to_disable;
1786 1783
1787 power_domain = intel_display_port_power_domain(intel_encoder); 1784 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1788 intel_display_power_get(dev_priv, power_domain); 1785 intel_display_power_get(dev_priv, power_domain);
1789 1786
1790 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n", 1787 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
@@ -1874,7 +1871,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1874 if ((pp & POWER_TARGET_ON) == 0) 1871 if ((pp & POWER_TARGET_ON) == 0)
1875 intel_dp->last_power_cycle = jiffies; 1872 intel_dp->last_power_cycle = jiffies;
1876 1873
1877 power_domain = intel_display_port_power_domain(intel_encoder); 1874 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1878 intel_display_power_put(dev_priv, power_domain); 1875 intel_display_power_put(dev_priv, power_domain);
1879} 1876}
1880 1877
@@ -2025,7 +2022,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
2025 wait_panel_off(intel_dp); 2022 wait_panel_off(intel_dp);
2026 2023
2027 /* We got a reference when we enabled the VDD. */ 2024 /* We got a reference when we enabled the VDD. */
2028 power_domain = intel_display_port_power_domain(intel_encoder); 2025 power_domain = intel_display_port_aux_power_domain(intel_encoder);
2029 intel_display_power_put(dev_priv, power_domain); 2026 intel_display_power_put(dev_priv, power_domain);
2030} 2027}
2031 2028
@@ -4765,26 +4762,6 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
4765 intel_dp->has_audio = false; 4762 intel_dp->has_audio = false;
4766} 4763}
4767 4764
4768static enum intel_display_power_domain
4769intel_dp_power_get(struct intel_dp *dp)
4770{
4771 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4772 enum intel_display_power_domain power_domain;
4773
4774 power_domain = intel_display_port_power_domain(encoder);
4775 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4776
4777 return power_domain;
4778}
4779
4780static void
4781intel_dp_power_put(struct intel_dp *dp,
4782 enum intel_display_power_domain power_domain)
4783{
4784 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4785 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4786}
4787
4788static enum drm_connector_status 4765static enum drm_connector_status
4789intel_dp_detect(struct drm_connector *connector, bool force) 4766intel_dp_detect(struct drm_connector *connector, bool force)
4790{ 4767{
@@ -4808,7 +4785,8 @@ intel_dp_detect(struct drm_connector *connector, bool force)
4808 return connector_status_disconnected; 4785 return connector_status_disconnected;
4809 } 4786 }
4810 4787
4811 power_domain = intel_dp_power_get(intel_dp); 4788 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4789 intel_display_power_get(to_i915(dev), power_domain);
4812 4790
4813 /* Can't disconnect eDP, but you can close the lid... */ 4791 /* Can't disconnect eDP, but you can close the lid... */
4814 if (is_edp(intel_dp)) 4792 if (is_edp(intel_dp))
@@ -4853,7 +4831,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
4853 } 4831 }
4854 4832
4855out: 4833out:
4856 intel_dp_power_put(intel_dp, power_domain); 4834 intel_display_power_put(to_i915(dev), power_domain);
4857 return status; 4835 return status;
4858} 4836}
4859 4837
@@ -4862,6 +4840,7 @@ intel_dp_force(struct drm_connector *connector)
4862{ 4840{
4863 struct intel_dp *intel_dp = intel_attached_dp(connector); 4841 struct intel_dp *intel_dp = intel_attached_dp(connector);
4864 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 4842 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4843 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4865 enum intel_display_power_domain power_domain; 4844 enum intel_display_power_domain power_domain;
4866 4845
4867 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 4846 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -4871,11 +4850,12 @@ intel_dp_force(struct drm_connector *connector)
4871 if (connector->status != connector_status_connected) 4850 if (connector->status != connector_status_connected)
4872 return; 4851 return;
4873 4852
4874 power_domain = intel_dp_power_get(intel_dp); 4853 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4854 intel_display_power_get(dev_priv, power_domain);
4875 4855
4876 intel_dp_set_edid(intel_dp); 4856 intel_dp_set_edid(intel_dp);
4877 4857
4878 intel_dp_power_put(intel_dp, power_domain); 4858 intel_display_power_put(dev_priv, power_domain);
4879 4859
4880 if (intel_encoder->type != INTEL_OUTPUT_EDP) 4860 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4881 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 4861 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
@@ -5091,7 +5071,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5091 * indefinitely. 5071 * indefinitely.
5092 */ 5072 */
5093 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); 5073 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5094 power_domain = intel_display_port_power_domain(&intel_dig_port->base); 5074 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
5095 intel_display_power_get(dev_priv, power_domain); 5075 intel_display_power_get(dev_priv, power_domain);
5096 5076
5097 edp_panel_vdd_schedule_off(intel_dp); 5077 edp_panel_vdd_schedule_off(intel_dp);
@@ -5153,7 +5133,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5153 enum intel_display_power_domain power_domain; 5133 enum intel_display_power_domain power_domain;
5154 enum irqreturn ret = IRQ_NONE; 5134 enum irqreturn ret = IRQ_NONE;
5155 5135
5156 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) 5136 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5137 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5157 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; 5138 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5158 5139
5159 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { 5140 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
@@ -5172,7 +5153,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5172 port_name(intel_dig_port->port), 5153 port_name(intel_dig_port->port),
5173 long_hpd ? "long" : "short"); 5154 long_hpd ? "long" : "short");
5174 5155
5175 power_domain = intel_display_port_power_domain(intel_encoder); 5156 power_domain = intel_display_port_aux_power_domain(intel_encoder);
5176 intel_display_power_get(dev_priv, power_domain); 5157 intel_display_power_get(dev_priv, power_domain);
5177 5158
5178 if (long_hpd) { 5159 if (long_hpd) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0598932ce623..0d00f07b7163 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -550,7 +550,6 @@ struct intel_crtc {
550 int adjusted_x; 550 int adjusted_x;
551 int adjusted_y; 551 int adjusted_y;
552 552
553 struct drm_i915_gem_object *cursor_bo;
554 uint32_t cursor_addr; 553 uint32_t cursor_addr;
555 uint32_t cursor_cntl; 554 uint32_t cursor_cntl;
556 uint32_t cursor_size; 555 uint32_t cursor_size;
@@ -1169,6 +1168,8 @@ void hsw_enable_ips(struct intel_crtc *crtc);
1169void hsw_disable_ips(struct intel_crtc *crtc); 1168void hsw_disable_ips(struct intel_crtc *crtc);
1170enum intel_display_power_domain 1169enum intel_display_power_domain
1171intel_display_port_power_domain(struct intel_encoder *intel_encoder); 1170intel_display_port_power_domain(struct intel_encoder *intel_encoder);
1171enum intel_display_power_domain
1172intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder);
1172void intel_mode_from_pipe_config(struct drm_display_mode *mode, 1173void intel_mode_from_pipe_config(struct drm_display_mode *mode,
1173 struct intel_crtc_state *pipe_config); 1174 struct intel_crtc_state *pipe_config);
1174void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc); 1175void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
@@ -1377,8 +1378,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
1377 enum intel_display_power_domain domain); 1378 enum intel_display_power_domain domain);
1378void intel_display_power_put(struct drm_i915_private *dev_priv, 1379void intel_display_power_put(struct drm_i915_private *dev_priv,
1379 enum intel_display_power_domain domain); 1380 enum intel_display_power_domain domain);
1380void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
1381void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
1382void intel_runtime_pm_get(struct drm_i915_private *dev_priv); 1381void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
1383void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); 1382void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
1384void intel_runtime_pm_put(struct drm_i915_private *dev_priv); 1383void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 9eafa191cee2..e6c035b0fc1c 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1335,21 +1335,17 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force)
1335{ 1335{
1336 struct drm_i915_private *dev_priv = to_i915(connector->dev); 1336 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1337 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 1337 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1338 struct intel_encoder *intel_encoder =
1339 &hdmi_to_dig_port(intel_hdmi)->base;
1340 enum intel_display_power_domain power_domain;
1341 struct edid *edid = NULL; 1338 struct edid *edid = NULL;
1342 bool connected = false; 1339 bool connected = false;
1343 1340
1344 power_domain = intel_display_port_power_domain(intel_encoder); 1341 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
1345 intel_display_power_get(dev_priv, power_domain);
1346 1342
1347 if (force) 1343 if (force)
1348 edid = drm_get_edid(connector, 1344 edid = drm_get_edid(connector,
1349 intel_gmbus_get_adapter(dev_priv, 1345 intel_gmbus_get_adapter(dev_priv,
1350 intel_hdmi->ddc_bus)); 1346 intel_hdmi->ddc_bus));
1351 1347
1352 intel_display_power_put(dev_priv, power_domain); 1348 intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
1353 1349
1354 to_intel_connector(connector)->detect_edid = edid; 1350 to_intel_connector(connector)->detect_edid = edid;
1355 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { 1351 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -1378,15 +1374,18 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
1378 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 1374 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1379 struct drm_i915_private *dev_priv = to_i915(connector->dev); 1375 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1380 bool live_status = false; 1376 bool live_status = false;
1381 unsigned int retry = 3; 1377 unsigned int try;
1382 1378
1383 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 1379 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1384 connector->base.id, connector->name); 1380 connector->base.id, connector->name);
1385 1381
1386 while (!live_status && --retry) { 1382 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
1383
1384 for (try = 0; !live_status && try < 9; try++) {
1385 if (try)
1386 msleep(10);
1387 live_status = intel_digital_port_connected(dev_priv, 1387 live_status = intel_digital_port_connected(dev_priv,
1388 hdmi_to_dig_port(intel_hdmi)); 1388 hdmi_to_dig_port(intel_hdmi));
1389 mdelay(10);
1390 } 1389 }
1391 1390
1392 if (!live_status) 1391 if (!live_status)
@@ -1402,6 +1401,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
1402 } else 1401 } else
1403 status = connector_status_disconnected; 1402 status = connector_status_disconnected;
1404 1403
1404 intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
1405
1405 return status; 1406 return status;
1406} 1407}
1407 1408
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 1369fc41d039..8324654037b6 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -483,7 +483,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
483 int i = 0, inc, try = 0; 483 int i = 0, inc, try = 0;
484 int ret = 0; 484 int ret = 0;
485 485
486 intel_aux_display_runtime_get(dev_priv); 486 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
487 mutex_lock(&dev_priv->gmbus_mutex); 487 mutex_lock(&dev_priv->gmbus_mutex);
488 488
489 if (bus->force_bit) { 489 if (bus->force_bit) {
@@ -595,7 +595,9 @@ timeout:
595 595
596out: 596out:
597 mutex_unlock(&dev_priv->gmbus_mutex); 597 mutex_unlock(&dev_priv->gmbus_mutex);
598 intel_aux_display_runtime_put(dev_priv); 598
599 intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
600
599 return ret; 601 return ret;
600} 602}
601 603
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 071a76b9ac52..f091ad12d694 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4782,8 +4782,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
4782 /* 2b: Program RC6 thresholds.*/ 4782 /* 2b: Program RC6 thresholds.*/
4783 4783
4784 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ 4784 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
4785 if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && 4785 if (IS_SKYLAKE(dev))
4786 (INTEL_REVID(dev) <= SKL_REVID_E0)))
4787 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); 4786 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
4788 else 4787 else
4789 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); 4788 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
@@ -4825,7 +4824,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
4825 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. 4824 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
4826 */ 4825 */
4827 if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || 4826 if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
4828 ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0))) 4827 ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0)))
4829 I915_WRITE(GEN9_PG_ENABLE, 0); 4828 I915_WRITE(GEN9_PG_ENABLE, 0);
4830 else 4829 else
4831 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 4830 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index d89c1d0aa1b7..7e23d65c9b24 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -362,6 +362,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
362 BIT(POWER_DOMAIN_AUX_C) | \ 362 BIT(POWER_DOMAIN_AUX_C) | \
363 BIT(POWER_DOMAIN_AUDIO) | \ 363 BIT(POWER_DOMAIN_AUDIO) | \
364 BIT(POWER_DOMAIN_VGA) | \ 364 BIT(POWER_DOMAIN_VGA) | \
365 BIT(POWER_DOMAIN_GMBUS) | \
365 BIT(POWER_DOMAIN_INIT)) 366 BIT(POWER_DOMAIN_INIT))
366#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \ 367#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
367 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 368 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
@@ -1483,6 +1484,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1483 BIT(POWER_DOMAIN_AUX_B) | \ 1484 BIT(POWER_DOMAIN_AUX_B) | \
1484 BIT(POWER_DOMAIN_AUX_C) | \ 1485 BIT(POWER_DOMAIN_AUX_C) | \
1485 BIT(POWER_DOMAIN_AUX_D) | \ 1486 BIT(POWER_DOMAIN_AUX_D) | \
1487 BIT(POWER_DOMAIN_GMBUS) | \
1486 BIT(POWER_DOMAIN_INIT)) 1488 BIT(POWER_DOMAIN_INIT))
1487#define HSW_DISPLAY_POWER_DOMAINS ( \ 1489#define HSW_DISPLAY_POWER_DOMAINS ( \
1488 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ 1490 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
@@ -1845,6 +1847,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
1845 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, 1847 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
1846 i915.disable_power_well); 1848 i915.disable_power_well);
1847 1849
1850 BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
1851
1848 mutex_init(&power_domains->lock); 1852 mutex_init(&power_domains->lock);
1849 1853
1850 /* 1854 /*
@@ -2064,36 +2068,6 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
2064} 2068}
2065 2069
2066/** 2070/**
2067 * intel_aux_display_runtime_get - grab an auxiliary power domain reference
2068 * @dev_priv: i915 device instance
2069 *
2070 * This function grabs a power domain reference for the auxiliary power domain
2071 * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
2072 * parents are powered up. Therefore users should only grab a reference to the
2073 * innermost power domain they need.
2074 *
2075 * Any power domain reference obtained by this function must have a symmetric
2076 * call to intel_aux_display_runtime_put() to release the reference again.
2077 */
2078void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
2079{
2080 intel_runtime_pm_get(dev_priv);
2081}
2082
2083/**
2084 * intel_aux_display_runtime_put - release an auxiliary power domain reference
2085 * @dev_priv: i915 device instance
2086 *
2087 * This function drops the auxiliary power domain reference obtained by
2088 * intel_aux_display_runtime_get() and might power down the corresponding
2089 * hardware block right away if this is the last reference.
2090 */
2091void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
2092{
2093 intel_runtime_pm_put(dev_priv);
2094}
2095
2096/**
2097 * intel_runtime_pm_get - grab a runtime pm reference 2071 * intel_runtime_pm_get - grab a runtime pm reference
2098 * @dev_priv: i915 device instance 2072 * @dev_priv: i915 device instance
2099 * 2073 *
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 64f16ea779ef..7b990b4e96d2 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -63,8 +63,7 @@ static void imx_drm_driver_lastclose(struct drm_device *drm)
63#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER) 63#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER)
64 struct imx_drm_device *imxdrm = drm->dev_private; 64 struct imx_drm_device *imxdrm = drm->dev_private;
65 65
66 if (imxdrm->fbhelper) 66 drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
67 drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
68#endif 67#endif
69} 68}
70 69
@@ -340,7 +339,7 @@ err_kms:
340 * imx_drm_add_crtc - add a new crtc 339 * imx_drm_add_crtc - add a new crtc
341 */ 340 */
342int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, 341int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
343 struct imx_drm_crtc **new_crtc, 342 struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane,
344 const struct imx_drm_crtc_helper_funcs *imx_drm_helper_funcs, 343 const struct imx_drm_crtc_helper_funcs *imx_drm_helper_funcs,
345 struct device_node *port) 344 struct device_node *port)
346{ 345{
@@ -379,7 +378,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
379 drm_crtc_helper_add(crtc, 378 drm_crtc_helper_add(crtc,
380 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); 379 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
381 380
382 drm_crtc_init(drm, crtc, 381 drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
383 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); 382 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
384 383
385 return 0; 384 return 0;
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 28e776d8d9d2..83284b4d4be1 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -9,6 +9,7 @@ struct drm_display_mode;
9struct drm_encoder; 9struct drm_encoder;
10struct drm_fbdev_cma; 10struct drm_fbdev_cma;
11struct drm_framebuffer; 11struct drm_framebuffer;
12struct drm_plane;
12struct imx_drm_crtc; 13struct imx_drm_crtc;
13struct platform_device; 14struct platform_device;
14 15
@@ -24,7 +25,7 @@ struct imx_drm_crtc_helper_funcs {
24}; 25};
25 26
26int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, 27int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
27 struct imx_drm_crtc **new_crtc, 28 struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane,
28 const struct imx_drm_crtc_helper_funcs *imx_helper_funcs, 29 const struct imx_drm_crtc_helper_funcs *imx_helper_funcs,
29 struct device_node *port); 30 struct device_node *port);
30int imx_drm_remove_crtc(struct imx_drm_crtc *); 31int imx_drm_remove_crtc(struct imx_drm_crtc *);
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index e671ad369416..f9597146dc67 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -721,6 +721,7 @@ static const struct of_device_id imx_tve_dt_ids[] = {
721 { .compatible = "fsl,imx53-tve", }, 721 { .compatible = "fsl,imx53-tve", },
722 { /* sentinel */ } 722 { /* sentinel */ }
723}; 723};
724MODULE_DEVICE_TABLE(of, imx_tve_dt_ids);
724 725
725static struct platform_driver imx_tve_driver = { 726static struct platform_driver imx_tve_driver = {
726 .probe = imx_tve_probe, 727 .probe = imx_tve_probe,
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 7bc8301fafff..4ab841eebee1 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -212,7 +212,8 @@ static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
212 212
213 spin_lock_irqsave(&drm->event_lock, flags); 213 spin_lock_irqsave(&drm->event_lock, flags);
214 if (ipu_crtc->page_flip_event) 214 if (ipu_crtc->page_flip_event)
215 drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event); 215 drm_crtc_send_vblank_event(&ipu_crtc->base,
216 ipu_crtc->page_flip_event);
216 ipu_crtc->page_flip_event = NULL; 217 ipu_crtc->page_flip_event = NULL;
217 imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); 218 imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
218 spin_unlock_irqrestore(&drm->event_lock, flags); 219 spin_unlock_irqrestore(&drm->event_lock, flags);
@@ -349,7 +350,6 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
349 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); 350 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
350 int dp = -EINVAL; 351 int dp = -EINVAL;
351 int ret; 352 int ret;
352 int id;
353 353
354 ret = ipu_get_resources(ipu_crtc, pdata); 354 ret = ipu_get_resources(ipu_crtc, pdata);
355 if (ret) { 355 if (ret) {
@@ -358,18 +358,23 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
358 return ret; 358 return ret;
359 } 359 }
360 360
361 if (pdata->dp >= 0)
362 dp = IPU_DP_FLOW_SYNC_BG;
363 ipu_crtc->plane[0] = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0,
364 DRM_PLANE_TYPE_PRIMARY);
365 if (IS_ERR(ipu_crtc->plane[0])) {
366 ret = PTR_ERR(ipu_crtc->plane[0]);
367 goto err_put_resources;
368 }
369
361 ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc, 370 ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc,
362 &ipu_crtc_helper_funcs, ipu_crtc->dev->of_node); 371 &ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs,
372 ipu_crtc->dev->of_node);
363 if (ret) { 373 if (ret) {
364 dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret); 374 dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret);
365 goto err_put_resources; 375 goto err_put_resources;
366 } 376 }
367 377
368 if (pdata->dp >= 0)
369 dp = IPU_DP_FLOW_SYNC_BG;
370 id = imx_drm_crtc_id(ipu_crtc->imx_crtc);
371 ipu_crtc->plane[0] = ipu_plane_init(ipu_crtc->base.dev, ipu,
372 pdata->dma[0], dp, BIT(id), true);
373 ret = ipu_plane_get_resources(ipu_crtc->plane[0]); 378 ret = ipu_plane_get_resources(ipu_crtc->plane[0]);
374 if (ret) { 379 if (ret) {
375 dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n", 380 dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n",
@@ -379,10 +384,10 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
379 384
380 /* If this crtc is using the DP, add an overlay plane */ 385 /* If this crtc is using the DP, add an overlay plane */
381 if (pdata->dp >= 0 && pdata->dma[1] > 0) { 386 if (pdata->dp >= 0 && pdata->dma[1] > 0) {
382 ipu_crtc->plane[1] = ipu_plane_init(ipu_crtc->base.dev, ipu, 387 ipu_crtc->plane[1] = ipu_plane_init(drm, ipu, pdata->dma[1],
383 pdata->dma[1], 388 IPU_DP_FLOW_SYNC_FG,
384 IPU_DP_FLOW_SYNC_FG, 389 drm_crtc_mask(&ipu_crtc->base),
385 BIT(id), false); 390 DRM_PLANE_TYPE_OVERLAY);
386 if (IS_ERR(ipu_crtc->plane[1])) 391 if (IS_ERR(ipu_crtc->plane[1]))
387 ipu_crtc->plane[1] = NULL; 392 ipu_crtc->plane[1] = NULL;
388 } 393 }
@@ -407,28 +412,6 @@ err_put_resources:
407 return ret; 412 return ret;
408} 413}
409 414
410static struct device_node *ipu_drm_get_port_by_id(struct device_node *parent,
411 int port_id)
412{
413 struct device_node *port;
414 int id, ret;
415
416 port = of_get_child_by_name(parent, "port");
417 while (port) {
418 ret = of_property_read_u32(port, "reg", &id);
419 if (!ret && id == port_id)
420 return port;
421
422 do {
423 port = of_get_next_child(parent, port);
424 if (!port)
425 return NULL;
426 } while (of_node_cmp(port->name, "port"));
427 }
428
429 return NULL;
430}
431
432static int ipu_drm_bind(struct device *dev, struct device *master, void *data) 415static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
433{ 416{
434 struct ipu_client_platformdata *pdata = dev->platform_data; 417 struct ipu_client_platformdata *pdata = dev->platform_data;
@@ -470,23 +453,11 @@ static const struct component_ops ipu_crtc_ops = {
470static int ipu_drm_probe(struct platform_device *pdev) 453static int ipu_drm_probe(struct platform_device *pdev)
471{ 454{
472 struct device *dev = &pdev->dev; 455 struct device *dev = &pdev->dev;
473 struct ipu_client_platformdata *pdata = dev->platform_data;
474 int ret; 456 int ret;
475 457
476 if (!dev->platform_data) 458 if (!dev->platform_data)
477 return -EINVAL; 459 return -EINVAL;
478 460
479 if (!dev->of_node) {
480 /* Associate crtc device with the corresponding DI port node */
481 dev->of_node = ipu_drm_get_port_by_id(dev->parent->of_node,
482 pdata->di + 2);
483 if (!dev->of_node) {
484 dev_err(dev, "missing port@%d node in %s\n",
485 pdata->di + 2, dev->parent->of_node->full_name);
486 return -ENODEV;
487 }
488 }
489
490 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); 461 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
491 if (ret) 462 if (ret)
492 return ret; 463 return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 575f4c84388f..e2ff410bab74 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -381,7 +381,7 @@ static struct drm_plane_funcs ipu_plane_funcs = {
381 381
382struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, 382struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
383 int dma, int dp, unsigned int possible_crtcs, 383 int dma, int dp, unsigned int possible_crtcs,
384 bool priv) 384 enum drm_plane_type type)
385{ 385{
386 struct ipu_plane *ipu_plane; 386 struct ipu_plane *ipu_plane;
387 int ret; 387 int ret;
@@ -399,10 +399,9 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
399 ipu_plane->dma = dma; 399 ipu_plane->dma = dma;
400 ipu_plane->dp_flow = dp; 400 ipu_plane->dp_flow = dp;
401 401
402 ret = drm_plane_init(dev, &ipu_plane->base, possible_crtcs, 402 ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs,
403 &ipu_plane_funcs, ipu_plane_formats, 403 &ipu_plane_funcs, ipu_plane_formats,
404 ARRAY_SIZE(ipu_plane_formats), 404 ARRAY_SIZE(ipu_plane_formats), type);
405 priv);
406 if (ret) { 405 if (ret) {
407 DRM_ERROR("failed to initialize plane\n"); 406 DRM_ERROR("failed to initialize plane\n");
408 kfree(ipu_plane); 407 kfree(ipu_plane);
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index 9b5eff18f5b8..3a443b413c60 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -34,7 +34,7 @@ struct ipu_plane {
34 34
35struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, 35struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
36 int dma, int dp, unsigned int possible_crtcs, 36 int dma, int dp, unsigned int possible_crtcs,
37 bool priv); 37 enum drm_plane_type type);
38 38
39/* Init IDMAC, DMFC, DP */ 39/* Init IDMAC, DMFC, DP */
40int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc, 40int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index b4deb9cf9d71..2e9b9f1b5cd2 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -54,7 +54,11 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
54 54
55 if (imxpd->panel && imxpd->panel->funcs && 55 if (imxpd->panel && imxpd->panel->funcs &&
56 imxpd->panel->funcs->get_modes) { 56 imxpd->panel->funcs->get_modes) {
57 struct drm_display_info *di = &connector->display_info;
58
57 num_modes = imxpd->panel->funcs->get_modes(imxpd->panel); 59 num_modes = imxpd->panel->funcs->get_modes(imxpd->panel);
60 if (!imxpd->bus_format && di->num_bus_formats)
61 imxpd->bus_format = di->bus_formats[0];
58 if (num_modes > 0) 62 if (num_modes > 0)
59 return num_modes; 63 return num_modes;
60 } 64 }
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 8f760002e401..913192c94876 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -159,7 +159,6 @@ struct nvkm_device_func {
159struct nvkm_device_quirk { 159struct nvkm_device_quirk {
160 u8 tv_pin_mask; 160 u8 tv_pin_mask;
161 u8 tv_gpio; 161 u8 tv_gpio;
162 bool War00C800_0;
163}; 162};
164 163
165struct nvkm_device_chip { 164struct nvkm_device_chip {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index 28bc202f9753..40f845e31272 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -7,6 +7,7 @@ struct nvkm_instmem {
7 const struct nvkm_instmem_func *func; 7 const struct nvkm_instmem_func *func;
8 struct nvkm_subdev subdev; 8 struct nvkm_subdev subdev;
9 9
10 spinlock_t lock;
10 struct list_head list; 11 struct list_head list;
11 u32 reserved; 12 u32 reserved;
12 13
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 8b8332e46f24..d5e6938cc6bc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -367,6 +367,7 @@ static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
367 return -ENODEV; 367 return -ENODEV;
368 } 368 }
369 obj = (union acpi_object *)buffer.pointer; 369 obj = (union acpi_object *)buffer.pointer;
370 len = min(len, (int)obj->buffer.length);
370 memcpy(bios+offset, obj->buffer.pointer, len); 371 memcpy(bios+offset, obj->buffer.pointer, len);
371 kfree(buffer.pointer); 372 kfree(buffer.pointer);
372 return len; 373 return len;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index db6bc6760545..64c8d932d5f1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -829,7 +829,6 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
829 struct drm_device *dev = drm->dev; 829 struct drm_device *dev = drm->dev;
830 struct nouveau_page_flip_state *s; 830 struct nouveau_page_flip_state *s;
831 unsigned long flags; 831 unsigned long flags;
832 int crtcid = -1;
833 832
834 spin_lock_irqsave(&dev->event_lock, flags); 833 spin_lock_irqsave(&dev->event_lock, flags);
835 834
@@ -841,15 +840,19 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
841 840
842 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); 841 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
843 if (s->event) { 842 if (s->event) {
844 /* Vblank timestamps/counts are only correct on >= NV-50 */ 843 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
845 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) 844 drm_arm_vblank_event(dev, s->crtc, s->event);
846 crtcid = s->crtc; 845 } else {
846 drm_send_vblank_event(dev, s->crtc, s->event);
847 847
848 drm_send_vblank_event(dev, crtcid, s->event); 848 /* Give up ownership of vblank for page-flipped crtc */
849 drm_vblank_put(dev, s->crtc);
850 }
851 }
852 else {
853 /* Give up ownership of vblank for page-flipped crtc */
854 drm_vblank_put(dev, s->crtc);
849 } 855 }
850
851 /* Give up ownership of vblank for page-flipped crtc */
852 drm_vblank_put(dev, s->crtc);
853 856
854 list_del(&s->head); 857 list_del(&s->head);
855 if (ps) 858 if (ps)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 3050042e6c6d..a02813e994ec 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -39,6 +39,7 @@
39 39
40#include <nvif/client.h> 40#include <nvif/client.h>
41#include <nvif/device.h> 41#include <nvif/device.h>
42#include <nvif/ioctl.h>
42 43
43#include <drmP.h> 44#include <drmP.h>
44 45
@@ -65,9 +66,10 @@ struct nouveau_drm_tile {
65}; 66};
66 67
67enum nouveau_drm_object_route { 68enum nouveau_drm_object_route {
68 NVDRM_OBJECT_NVIF = 0, 69 NVDRM_OBJECT_NVIF = NVIF_IOCTL_V0_OWNER_NVIF,
69 NVDRM_OBJECT_USIF, 70 NVDRM_OBJECT_USIF,
70 NVDRM_OBJECT_ABI16, 71 NVDRM_OBJECT_ABI16,
72 NVDRM_OBJECT_ANY = NVIF_IOCTL_V0_OWNER_ANY,
71}; 73};
72 74
73enum nouveau_drm_notify_route { 75enum nouveau_drm_notify_route {
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 89dc4ce63490..6ae1b3494bcd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -313,7 +313,10 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
313 if (nvif_unpack(argv->v0, 0, 0, true)) { 313 if (nvif_unpack(argv->v0, 0, 0, true)) {
314 /* block access to objects not created via this interface */ 314 /* block access to objects not created via this interface */
315 owner = argv->v0.owner; 315 owner = argv->v0.owner;
316 argv->v0.owner = NVDRM_OBJECT_USIF; 316 if (argv->v0.object == 0ULL)
317 argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
318 else
319 argv->v0.owner = NVDRM_OBJECT_USIF;
317 } else 320 } else
318 goto done; 321 goto done;
319 322
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index e3c783d0e2ab..62ad0300cfa5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -259,12 +259,6 @@ nvkm_device_pci_10de_0df4[] = {
259}; 259};
260 260
261static const struct nvkm_device_pci_vendor 261static const struct nvkm_device_pci_vendor
262nvkm_device_pci_10de_0fcd[] = {
263 { 0x17aa, 0x3801, NULL, { .War00C800_0 = true } }, /* Lenovo Y510P */
264 {}
265};
266
267static const struct nvkm_device_pci_vendor
268nvkm_device_pci_10de_0fd2[] = { 262nvkm_device_pci_10de_0fd2[] = {
269 { 0x1028, 0x0595, "GeForce GT 640M LE" }, 263 { 0x1028, 0x0595, "GeForce GT 640M LE" },
270 { 0x1028, 0x05b2, "GeForce GT 640M LE" }, 264 { 0x1028, 0x05b2, "GeForce GT 640M LE" },
@@ -684,7 +678,6 @@ nvkm_device_pci_10de_1189[] = {
684static const struct nvkm_device_pci_vendor 678static const struct nvkm_device_pci_vendor
685nvkm_device_pci_10de_1199[] = { 679nvkm_device_pci_10de_1199[] = {
686 { 0x1458, 0xd001, "GeForce GTX 760" }, 680 { 0x1458, 0xd001, "GeForce GTX 760" },
687 { 0x1462, 0x1106, "GeForce GTX 780M", { .War00C800_0 = true } }, /* Medion Erazer X7827 */
688 {} 681 {}
689}; 682};
690 683
@@ -695,14 +688,6 @@ nvkm_device_pci_10de_11e3[] = {
695}; 688};
696 689
697static const struct nvkm_device_pci_vendor 690static const struct nvkm_device_pci_vendor
698nvkm_device_pci_10de_11fc[] = {
699 { 0x1179, 0x0001, NULL, { .War00C800_0 = true } }, /* Toshiba Tecra W50 */
700 { 0x17aa, 0x2211, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
701 { 0x17aa, 0x221e, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
702 {}
703};
704
705static const struct nvkm_device_pci_vendor
706nvkm_device_pci_10de_1247[] = { 691nvkm_device_pci_10de_1247[] = {
707 { 0x1043, 0x212a, "GeForce GT 635M" }, 692 { 0x1043, 0x212a, "GeForce GT 635M" },
708 { 0x1043, 0x212b, "GeForce GT 635M" }, 693 { 0x1043, 0x212b, "GeForce GT 635M" },
@@ -1356,7 +1341,7 @@ nvkm_device_pci_10de[] = {
1356 { 0x0fc6, "GeForce GTX 650" }, 1341 { 0x0fc6, "GeForce GTX 650" },
1357 { 0x0fc8, "GeForce GT 740" }, 1342 { 0x0fc8, "GeForce GT 740" },
1358 { 0x0fc9, "GeForce GT 730" }, 1343 { 0x0fc9, "GeForce GT 730" },
1359 { 0x0fcd, "GeForce GT 755M", nvkm_device_pci_10de_0fcd }, 1344 { 0x0fcd, "GeForce GT 755M" },
1360 { 0x0fce, "GeForce GT 640M LE" }, 1345 { 0x0fce, "GeForce GT 640M LE" },
1361 { 0x0fd1, "GeForce GT 650M" }, 1346 { 0x0fd1, "GeForce GT 650M" },
1362 { 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 }, 1347 { 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 },
@@ -1490,7 +1475,7 @@ nvkm_device_pci_10de[] = {
1490 { 0x11e2, "GeForce GTX 765M" }, 1475 { 0x11e2, "GeForce GTX 765M" },
1491 { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 }, 1476 { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 },
1492 { 0x11fa, "Quadro K4000" }, 1477 { 0x11fa, "Quadro K4000" },
1493 { 0x11fc, "Quadro K2100M", nvkm_device_pci_10de_11fc }, 1478 { 0x11fc, "Quadro K2100M" },
1494 { 0x1200, "GeForce GTX 560 Ti" }, 1479 { 0x1200, "GeForce GTX 560 Ti" },
1495 { 0x1201, "GeForce GTX 560" }, 1480 { 0x1201, "GeForce GTX 560" },
1496 { 0x1203, "GeForce GTX 460 SE v2" }, 1481 { 0x1203, "GeForce GTX 460 SE v2" },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index b5b875928aba..74de7a96c22a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -207,6 +207,8 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
207 const u32 b = beta * gr->ppc_tpc_nr[gpc][ppc]; 207 const u32 b = beta * gr->ppc_tpc_nr[gpc][ppc];
208 const u32 t = timeslice_mode; 208 const u32 t = timeslice_mode;
209 const u32 o = PPC_UNIT(gpc, ppc, 0); 209 const u32 o = PPC_UNIT(gpc, ppc, 0);
210 if (!(gr->ppc_mask[gpc] & (1 << ppc)))
211 continue;
210 mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo); 212 mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo);
211 mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo); 213 mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo);
212 bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc]; 214 bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
index 194afe910d21..7dacb3cc0668 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
@@ -52,10 +52,12 @@ mmio_list_base:
52#endif 52#endif
53 53
54#ifdef INCLUDE_CODE 54#ifdef INCLUDE_CODE
55#define gpc_addr(reg,addr) /*
56*/ imm32(reg,addr) /*
57*/ or reg NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE
55#define gpc_wr32(addr,reg) /* 58#define gpc_wr32(addr,reg) /*
59*/ gpc_addr($r14,addr) /*
56*/ mov b32 $r15 reg /* 60*/ mov b32 $r15 reg /*
57*/ imm32($r14, addr) /*
58*/ or $r14 NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE /*
59*/ call(nv_wr32) 61*/ call(nv_wr32)
60 62
61// reports an exception to the host 63// reports an exception to the host
@@ -161,7 +163,7 @@ init:
161 163
162#if NV_PGRAPH_GPCX_UNK__SIZE > 0 164#if NV_PGRAPH_GPCX_UNK__SIZE > 0
163 // figure out which, and how many, UNKs are actually present 165 // figure out which, and how many, UNKs are actually present
164 imm32($r14, 0x500c30) 166 gpc_addr($r14, 0x500c30)
165 clear b32 $r2 167 clear b32 $r2
166 clear b32 $r3 168 clear b32 $r3
167 clear b32 $r4 169 clear b32 $r4
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
index 64d07df4b8b1..bb820ff28621 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
@@ -314,7 +314,7 @@ uint32_t gf117_grgpc_code[] = {
314 0x03f01200, 314 0x03f01200,
315 0x0002d000, 315 0x0002d000,
316 0x17f104bd, 316 0x17f104bd,
317 0x10fe0542, 317 0x10fe0545,
318 0x0007f100, 318 0x0007f100,
319 0x0003f007, 319 0x0003f007,
320 0xbd0000d0, 320 0xbd0000d0,
@@ -338,184 +338,184 @@ uint32_t gf117_grgpc_code[] = {
338 0x02d00103, 338 0x02d00103,
339 0xf104bd00, 339 0xf104bd00,
340 0xf00c30e7, 340 0xf00c30e7,
341 0x24bd50e3, 341 0xe5f050e3,
342 0x44bd34bd, 342 0xbd24bd01,
343/* 0x0430: init_unk_loop */ 343/* 0x0433: init_unk_loop */
344 0xb06821f4, 344 0xf444bd34,
345 0x0bf400f6, 345 0xf6b06821,
346 0x01f7f00f, 346 0x0f0bf400,
347 0xfd04f2bb, 347 0xbb01f7f0,
348 0x30b6054f, 348 0x4ffd04f2,
349/* 0x0445: init_unk_next */ 349 0x0130b605,
350 0x0120b601, 350/* 0x0448: init_unk_next */
351 0xb004e0b6, 351 0xb60120b6,
352 0x1bf40126, 352 0x26b004e0,
353/* 0x0451: init_unk_done */ 353 0xe21bf401,
354 0x070380e2, 354/* 0x0454: init_unk_done */
355 0xf1080480, 355 0x80070380,
356 0xf0010027, 356 0x27f10804,
357 0x22cf0223, 357 0x23f00100,
358 0x9534bd00, 358 0x0022cf02,
359 0x07f10825, 359 0x259534bd,
360 0x03f0c000, 360 0x0007f108,
361 0x0005d001, 361 0x0103f0c0,
362 0x07f104bd, 362 0xbd0005d0,
363 0x03f0c100, 363 0x0007f104,
364 0x0005d001, 364 0x0103f0c1,
365 0x0e9804bd, 365 0xbd0005d0,
366 0x010f9800, 366 0x000e9804,
367 0x015021f5, 367 0xf5010f98,
368 0xbb002fbb, 368 0xbb015021,
369 0x0e98003f, 369 0x3fbb002f,
370 0x020f9801, 370 0x010e9800,
371 0x015021f5, 371 0xf5020f98,
372 0xfd050e98, 372 0x98015021,
373 0x2ebb00ef, 373 0xeffd050e,
374 0x003ebb00, 374 0x002ebb00,
375 0x98020e98, 375 0x98003ebb,
376 0x21f5030f, 376 0x0f98020e,
377 0x0e980150, 377 0x5021f503,
378 0x00effd07, 378 0x070e9801,
379 0xbb002ebb, 379 0xbb00effd,
380 0x35b6003e, 380 0x3ebb002e,
381 0x0007f102, 381 0x0235b600,
382 0x0103f0d3, 382 0xd30007f1,
383 0xbd0003d0, 383 0xd00103f0,
384 0x0825b604,
385 0xb60635b6,
386 0x30b60120,
387 0x0824b601,
388 0xb90834b6,
389 0x21f5022f,
390 0x2fbb02d3,
391 0x003fbb00,
392 0x010007f1,
393 0xd00203f0,
394 0x04bd0003, 384 0x04bd0003,
395 0x29f024bd, 385 0xb60825b6,
396 0x0007f11f, 386 0x20b60635,
397 0x0203f008, 387 0x0130b601,
398 0xbd0002d0, 388 0xb60824b6,
399/* 0x0505: main */ 389 0x2fb90834,
400 0x0031f404, 390 0xd321f502,
401 0xf00028f4, 391 0x002fbb02,
402 0x21f424d7, 392 0xf1003fbb,
403 0xf401f439, 393 0xf0010007,
404 0xf404e4b0, 394 0x03d00203,
405 0x81fe1e18, 395 0xbd04bd00,
406 0x0627f001, 396 0x1f29f024,
407 0x12fd20bd, 397 0x080007f1,
408 0x01e4b604, 398 0xd00203f0,
409 0xfe051efd, 399 0x04bd0002,
410 0x21f50018, 400/* 0x0508: main */
411 0x0ef405fa, 401 0xf40031f4,
412/* 0x0535: main_not_ctx_xfer */ 402 0xd7f00028,
413 0x10ef94d3, 403 0x3921f424,
414 0xf501f5f0, 404 0xb0f401f4,
415 0xf4037e21, 405 0x18f404e4,
416/* 0x0542: ih */ 406 0x0181fe1e,
417 0x80f9c60e, 407 0xbd0627f0,
418 0xf90188fe, 408 0x0412fd20,
419 0xf990f980, 409 0xfd01e4b6,
420 0xf9b0f9a0, 410 0x18fe051e,
421 0xf9e0f9d0, 411 0xfd21f500,
422 0xf104bdf0, 412 0xd30ef405,
423 0xf00200a7, 413/* 0x0538: main_not_ctx_xfer */
424 0xaacf00a3, 414 0xf010ef94,
425 0x04abc400, 415 0x21f501f5,
426 0xf02c0bf4, 416 0x0ef4037e,
427 0xe7f124d7, 417/* 0x0545: ih */
428 0xe3f01a00, 418 0xfe80f9c6,
429 0x00eecf00, 419 0x80f90188,
430 0x1900f7f1, 420 0xa0f990f9,
431 0xcf00f3f0, 421 0xd0f9b0f9,
432 0x21f400ff, 422 0xf0f9e0f9,
433 0x01e7f004, 423 0xa7f104bd,
434 0x1d0007f1, 424 0xa3f00200,
435 0xd00003f0, 425 0x00aacf00,
436 0x04bd000e, 426 0xf404abc4,
437/* 0x0590: ih_no_fifo */ 427 0xd7f02c0b,
438 0x010007f1, 428 0x00e7f124,
439 0xd00003f0, 429 0x00e3f01a,
440 0x04bd000a, 430 0xf100eecf,
441 0xe0fcf0fc, 431 0xf01900f7,
442 0xb0fcd0fc, 432 0xffcf00f3,
443 0x90fca0fc, 433 0x0421f400,
444 0x88fe80fc, 434 0xf101e7f0,
445 0xf480fc00, 435 0xf01d0007,
446 0x01f80032, 436 0x0ed00003,
447/* 0x05b4: hub_barrier_done */ 437/* 0x0593: ih_no_fifo */
448 0x9801f7f0, 438 0xf104bd00,
449 0xfebb040e, 439 0xf0010007,
450 0x02ffb904, 440 0x0ad00003,
451 0x9418e7f1, 441 0xfc04bd00,
452 0xf440e3f0, 442 0xfce0fcf0,
453 0x00f89d21, 443 0xfcb0fcd0,
454/* 0x05cc: ctx_redswitch */ 444 0xfc90fca0,
455 0xf120f7f0, 445 0x0088fe80,
446 0x32f480fc,
447/* 0x05b7: hub_barrier_done */
448 0xf001f800,
449 0x0e9801f7,
450 0x04febb04,
451 0xf102ffb9,
452 0xf09418e7,
453 0x21f440e3,
454/* 0x05cf: ctx_redswitch */
455 0xf000f89d,
456 0x07f120f7,
457 0x03f08500,
458 0x000fd001,
459 0xe7f004bd,
460/* 0x05e1: ctx_redswitch_delay */
461 0x01e2b608,
462 0xf1fd1bf4,
463 0xf10800f5,
464 0xf10200f5,
456 0xf0850007, 465 0xf0850007,
457 0x0fd00103, 466 0x0fd00103,
458 0xf004bd00, 467 0xf804bd00,
459/* 0x05de: ctx_redswitch_delay */ 468/* 0x05fd: ctx_xfer */
460 0xe2b608e7, 469 0x0007f100,
461 0xfd1bf401, 470 0x0203f081,
462 0x0800f5f1, 471 0xbd000fd0,
463 0x0200f5f1, 472 0x0711f404,
464 0x850007f1, 473 0x05cf21f5,
465 0xd00103f0, 474/* 0x0610: ctx_xfer_not_load */
466 0x04bd000f, 475 0x026a21f5,
467/* 0x05fa: ctx_xfer */ 476 0x07f124bd,
468 0x07f100f8, 477 0x03f047fc,
469 0x03f08100, 478 0x0002d002,
470 0x000fd002, 479 0x2cf004bd,
471 0x11f404bd, 480 0x0320b601,
472 0xcc21f507, 481 0x4afc07f1,
473/* 0x060d: ctx_xfer_not_load */ 482 0xd00203f0,
474 0x6a21f505, 483 0x04bd0002,
475 0xf124bd02,
476 0xf047fc07,
477 0x02d00203,
478 0xf004bd00,
479 0x20b6012c,
480 0xfc07f103,
481 0x0203f04a,
482 0xbd0002d0,
483 0x01acf004,
484 0xf102a5f0,
485 0xf00000b7,
486 0x0c9850b3,
487 0x0fc4b604,
488 0x9800bcbb,
489 0x0d98000c,
490 0x00e7f001,
491 0x016f21f5,
492 0xf101acf0,
493 0xf04000b7,
494 0x0c9850b3,
495 0x0fc4b604,
496 0x9800bcbb,
497 0x0d98010c,
498 0x060f9802,
499 0x0800e7f1,
500 0x016f21f5,
501 0xf001acf0, 484 0xf001acf0,
502 0xb7f104a5, 485 0xb7f102a5,
503 0xb3f03000, 486 0xb3f00000,
504 0x040c9850, 487 0x040c9850,
505 0xbb0fc4b6, 488 0xbb0fc4b6,
506 0x0c9800bc, 489 0x0c9800bc,
507 0x030d9802, 490 0x010d9800,
508 0xf1080f98, 491 0xf500e7f0,
509 0xf50200e7, 492 0xf0016f21,
510 0xf5016f21, 493 0xb7f101ac,
511 0xf4025e21, 494 0xb3f04000,
512 0x12f40601, 495 0x040c9850,
513/* 0x06a9: ctx_xfer_post */ 496 0xbb0fc4b6,
514 0x7f21f507, 497 0x0c9800bc,
515/* 0x06ad: ctx_xfer_done */ 498 0x020d9801,
516 0xb421f502, 499 0xf1060f98,
517 0x0000f805, 500 0xf50800e7,
518 0x00000000, 501 0xf0016f21,
502 0xa5f001ac,
503 0x00b7f104,
504 0x50b3f030,
505 0xb6040c98,
506 0xbcbb0fc4,
507 0x020c9800,
508 0x98030d98,
509 0xe7f1080f,
510 0x21f50200,
511 0x21f5016f,
512 0x01f4025e,
513 0x0712f406,
514/* 0x06ac: ctx_xfer_post */
515 0x027f21f5,
516/* 0x06b0: ctx_xfer_done */
517 0x05b721f5,
518 0x000000f8,
519 0x00000000, 519 0x00000000,
520 0x00000000, 520 0x00000000,
521 0x00000000, 521 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
index 2f596433c222..911976d20940 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
@@ -314,7 +314,7 @@ uint32_t gk104_grgpc_code[] = {
314 0x03f01200, 314 0x03f01200,
315 0x0002d000, 315 0x0002d000,
316 0x17f104bd, 316 0x17f104bd,
317 0x10fe0542, 317 0x10fe0545,
318 0x0007f100, 318 0x0007f100,
319 0x0003f007, 319 0x0003f007,
320 0xbd0000d0, 320 0xbd0000d0,
@@ -338,184 +338,184 @@ uint32_t gk104_grgpc_code[] = {
338 0x02d00103, 338 0x02d00103,
339 0xf104bd00, 339 0xf104bd00,
340 0xf00c30e7, 340 0xf00c30e7,
341 0x24bd50e3, 341 0xe5f050e3,
342 0x44bd34bd, 342 0xbd24bd01,
343/* 0x0430: init_unk_loop */ 343/* 0x0433: init_unk_loop */
344 0xb06821f4, 344 0xf444bd34,
345 0x0bf400f6, 345 0xf6b06821,
346 0x01f7f00f, 346 0x0f0bf400,
347 0xfd04f2bb, 347 0xbb01f7f0,
348 0x30b6054f, 348 0x4ffd04f2,
349/* 0x0445: init_unk_next */ 349 0x0130b605,
350 0x0120b601, 350/* 0x0448: init_unk_next */
351 0xb004e0b6, 351 0xb60120b6,
352 0x1bf40126, 352 0x26b004e0,
353/* 0x0451: init_unk_done */ 353 0xe21bf401,
354 0x070380e2, 354/* 0x0454: init_unk_done */
355 0xf1080480, 355 0x80070380,
356 0xf0010027, 356 0x27f10804,
357 0x22cf0223, 357 0x23f00100,
358 0x9534bd00, 358 0x0022cf02,
359 0x07f10825, 359 0x259534bd,
360 0x03f0c000, 360 0x0007f108,
361 0x0005d001, 361 0x0103f0c0,
362 0x07f104bd, 362 0xbd0005d0,
363 0x03f0c100, 363 0x0007f104,
364 0x0005d001, 364 0x0103f0c1,
365 0x0e9804bd, 365 0xbd0005d0,
366 0x010f9800, 366 0x000e9804,
367 0x015021f5, 367 0xf5010f98,
368 0xbb002fbb, 368 0xbb015021,
369 0x0e98003f, 369 0x3fbb002f,
370 0x020f9801, 370 0x010e9800,
371 0x015021f5, 371 0xf5020f98,
372 0xfd050e98, 372 0x98015021,
373 0x2ebb00ef, 373 0xeffd050e,
374 0x003ebb00, 374 0x002ebb00,
375 0x98020e98, 375 0x98003ebb,
376 0x21f5030f, 376 0x0f98020e,
377 0x0e980150, 377 0x5021f503,
378 0x00effd07, 378 0x070e9801,
379 0xbb002ebb, 379 0xbb00effd,
380 0x35b6003e, 380 0x3ebb002e,
381 0x0007f102, 381 0x0235b600,
382 0x0103f0d3, 382 0xd30007f1,
383 0xbd0003d0, 383 0xd00103f0,
384 0x0825b604,
385 0xb60635b6,
386 0x30b60120,
387 0x0824b601,
388 0xb90834b6,
389 0x21f5022f,
390 0x2fbb02d3,
391 0x003fbb00,
392 0x010007f1,
393 0xd00203f0,
394 0x04bd0003, 384 0x04bd0003,
395 0x29f024bd, 385 0xb60825b6,
396 0x0007f11f, 386 0x20b60635,
397 0x0203f008, 387 0x0130b601,
398 0xbd0002d0, 388 0xb60824b6,
399/* 0x0505: main */ 389 0x2fb90834,
400 0x0031f404, 390 0xd321f502,
401 0xf00028f4, 391 0x002fbb02,
402 0x21f424d7, 392 0xf1003fbb,
403 0xf401f439, 393 0xf0010007,
404 0xf404e4b0, 394 0x03d00203,
405 0x81fe1e18, 395 0xbd04bd00,
406 0x0627f001, 396 0x1f29f024,
407 0x12fd20bd, 397 0x080007f1,
408 0x01e4b604, 398 0xd00203f0,
409 0xfe051efd, 399 0x04bd0002,
410 0x21f50018, 400/* 0x0508: main */
411 0x0ef405fa, 401 0xf40031f4,
412/* 0x0535: main_not_ctx_xfer */ 402 0xd7f00028,
413 0x10ef94d3, 403 0x3921f424,
414 0xf501f5f0, 404 0xb0f401f4,
415 0xf4037e21, 405 0x18f404e4,
416/* 0x0542: ih */ 406 0x0181fe1e,
417 0x80f9c60e, 407 0xbd0627f0,
418 0xf90188fe, 408 0x0412fd20,
419 0xf990f980, 409 0xfd01e4b6,
420 0xf9b0f9a0, 410 0x18fe051e,
421 0xf9e0f9d0, 411 0xfd21f500,
422 0xf104bdf0, 412 0xd30ef405,
423 0xf00200a7, 413/* 0x0538: main_not_ctx_xfer */
424 0xaacf00a3, 414 0xf010ef94,
425 0x04abc400, 415 0x21f501f5,
426 0xf02c0bf4, 416 0x0ef4037e,
427 0xe7f124d7, 417/* 0x0545: ih */
428 0xe3f01a00, 418 0xfe80f9c6,
429 0x00eecf00, 419 0x80f90188,
430 0x1900f7f1, 420 0xa0f990f9,
431 0xcf00f3f0, 421 0xd0f9b0f9,
432 0x21f400ff, 422 0xf0f9e0f9,
433 0x01e7f004, 423 0xa7f104bd,
434 0x1d0007f1, 424 0xa3f00200,
435 0xd00003f0, 425 0x00aacf00,
436 0x04bd000e, 426 0xf404abc4,
437/* 0x0590: ih_no_fifo */ 427 0xd7f02c0b,
438 0x010007f1, 428 0x00e7f124,
439 0xd00003f0, 429 0x00e3f01a,
440 0x04bd000a, 430 0xf100eecf,
441 0xe0fcf0fc, 431 0xf01900f7,
442 0xb0fcd0fc, 432 0xffcf00f3,
443 0x90fca0fc, 433 0x0421f400,
444 0x88fe80fc, 434 0xf101e7f0,
445 0xf480fc00, 435 0xf01d0007,
446 0x01f80032, 436 0x0ed00003,
447/* 0x05b4: hub_barrier_done */ 437/* 0x0593: ih_no_fifo */
448 0x9801f7f0, 438 0xf104bd00,
449 0xfebb040e, 439 0xf0010007,
450 0x02ffb904, 440 0x0ad00003,
451 0x9418e7f1, 441 0xfc04bd00,
452 0xf440e3f0, 442 0xfce0fcf0,
453 0x00f89d21, 443 0xfcb0fcd0,
454/* 0x05cc: ctx_redswitch */ 444 0xfc90fca0,
455 0xf120f7f0, 445 0x0088fe80,
446 0x32f480fc,
447/* 0x05b7: hub_barrier_done */
448 0xf001f800,
449 0x0e9801f7,
450 0x04febb04,
451 0xf102ffb9,
452 0xf09418e7,
453 0x21f440e3,
454/* 0x05cf: ctx_redswitch */
455 0xf000f89d,
456 0x07f120f7,
457 0x03f08500,
458 0x000fd001,
459 0xe7f004bd,
460/* 0x05e1: ctx_redswitch_delay */
461 0x01e2b608,
462 0xf1fd1bf4,
463 0xf10800f5,
464 0xf10200f5,
456 0xf0850007, 465 0xf0850007,
457 0x0fd00103, 466 0x0fd00103,
458 0xf004bd00, 467 0xf804bd00,
459/* 0x05de: ctx_redswitch_delay */ 468/* 0x05fd: ctx_xfer */
460 0xe2b608e7, 469 0x0007f100,
461 0xfd1bf401, 470 0x0203f081,
462 0x0800f5f1, 471 0xbd000fd0,
463 0x0200f5f1, 472 0x0711f404,
464 0x850007f1, 473 0x05cf21f5,
465 0xd00103f0, 474/* 0x0610: ctx_xfer_not_load */
466 0x04bd000f, 475 0x026a21f5,
467/* 0x05fa: ctx_xfer */ 476 0x07f124bd,
468 0x07f100f8, 477 0x03f047fc,
469 0x03f08100, 478 0x0002d002,
470 0x000fd002, 479 0x2cf004bd,
471 0x11f404bd, 480 0x0320b601,
472 0xcc21f507, 481 0x4afc07f1,
473/* 0x060d: ctx_xfer_not_load */ 482 0xd00203f0,
474 0x6a21f505, 483 0x04bd0002,
475 0xf124bd02,
476 0xf047fc07,
477 0x02d00203,
478 0xf004bd00,
479 0x20b6012c,
480 0xfc07f103,
481 0x0203f04a,
482 0xbd0002d0,
483 0x01acf004,
484 0xf102a5f0,
485 0xf00000b7,
486 0x0c9850b3,
487 0x0fc4b604,
488 0x9800bcbb,
489 0x0d98000c,
490 0x00e7f001,
491 0x016f21f5,
492 0xf101acf0,
493 0xf04000b7,
494 0x0c9850b3,
495 0x0fc4b604,
496 0x9800bcbb,
497 0x0d98010c,
498 0x060f9802,
499 0x0800e7f1,
500 0x016f21f5,
501 0xf001acf0, 484 0xf001acf0,
502 0xb7f104a5, 485 0xb7f102a5,
503 0xb3f03000, 486 0xb3f00000,
504 0x040c9850, 487 0x040c9850,
505 0xbb0fc4b6, 488 0xbb0fc4b6,
506 0x0c9800bc, 489 0x0c9800bc,
507 0x030d9802, 490 0x010d9800,
508 0xf1080f98, 491 0xf500e7f0,
509 0xf50200e7, 492 0xf0016f21,
510 0xf5016f21, 493 0xb7f101ac,
511 0xf4025e21, 494 0xb3f04000,
512 0x12f40601, 495 0x040c9850,
513/* 0x06a9: ctx_xfer_post */ 496 0xbb0fc4b6,
514 0x7f21f507, 497 0x0c9800bc,
515/* 0x06ad: ctx_xfer_done */ 498 0x020d9801,
516 0xb421f502, 499 0xf1060f98,
517 0x0000f805, 500 0xf50800e7,
518 0x00000000, 501 0xf0016f21,
502 0xa5f001ac,
503 0x00b7f104,
504 0x50b3f030,
505 0xb6040c98,
506 0xbcbb0fc4,
507 0x020c9800,
508 0x98030d98,
509 0xe7f1080f,
510 0x21f50200,
511 0x21f5016f,
512 0x01f4025e,
513 0x0712f406,
514/* 0x06ac: ctx_xfer_post */
515 0x027f21f5,
516/* 0x06b0: ctx_xfer_done */
517 0x05b721f5,
518 0x000000f8,
519 0x00000000, 519 0x00000000,
520 0x00000000, 520 0x00000000,
521 0x00000000, 521 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
index ee8e54db8fc9..1c6e11b05df2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
@@ -314,7 +314,7 @@ uint32_t gk110_grgpc_code[] = {
314 0x03f01200, 314 0x03f01200,
315 0x0002d000, 315 0x0002d000,
316 0x17f104bd, 316 0x17f104bd,
317 0x10fe0542, 317 0x10fe0545,
318 0x0007f100, 318 0x0007f100,
319 0x0003f007, 319 0x0003f007,
320 0xbd0000d0, 320 0xbd0000d0,
@@ -338,184 +338,184 @@ uint32_t gk110_grgpc_code[] = {
338 0x02d00103, 338 0x02d00103,
339 0xf104bd00, 339 0xf104bd00,
340 0xf00c30e7, 340 0xf00c30e7,
341 0x24bd50e3, 341 0xe5f050e3,
342 0x44bd34bd, 342 0xbd24bd01,
343/* 0x0430: init_unk_loop */ 343/* 0x0433: init_unk_loop */
344 0xb06821f4, 344 0xf444bd34,
345 0x0bf400f6, 345 0xf6b06821,
346 0x01f7f00f, 346 0x0f0bf400,
347 0xfd04f2bb, 347 0xbb01f7f0,
348 0x30b6054f, 348 0x4ffd04f2,
349/* 0x0445: init_unk_next */ 349 0x0130b605,
350 0x0120b601, 350/* 0x0448: init_unk_next */
351 0xb004e0b6, 351 0xb60120b6,
352 0x1bf40226, 352 0x26b004e0,
353/* 0x0451: init_unk_done */ 353 0xe21bf402,
354 0x070380e2, 354/* 0x0454: init_unk_done */
355 0xf1080480, 355 0x80070380,
356 0xf0010027, 356 0x27f10804,
357 0x22cf0223, 357 0x23f00100,
358 0x9534bd00, 358 0x0022cf02,
359 0x07f10825, 359 0x259534bd,
360 0x03f0c000, 360 0x0007f108,
361 0x0005d001, 361 0x0103f0c0,
362 0x07f104bd, 362 0xbd0005d0,
363 0x03f0c100, 363 0x0007f104,
364 0x0005d001, 364 0x0103f0c1,
365 0x0e9804bd, 365 0xbd0005d0,
366 0x010f9800, 366 0x000e9804,
367 0x015021f5, 367 0xf5010f98,
368 0xbb002fbb, 368 0xbb015021,
369 0x0e98003f, 369 0x3fbb002f,
370 0x020f9801, 370 0x010e9800,
371 0x015021f5, 371 0xf5020f98,
372 0xfd050e98, 372 0x98015021,
373 0x2ebb00ef, 373 0xeffd050e,
374 0x003ebb00, 374 0x002ebb00,
375 0x98020e98, 375 0x98003ebb,
376 0x21f5030f, 376 0x0f98020e,
377 0x0e980150, 377 0x5021f503,
378 0x00effd07, 378 0x070e9801,
379 0xbb002ebb, 379 0xbb00effd,
380 0x35b6003e, 380 0x3ebb002e,
381 0x0007f102, 381 0x0235b600,
382 0x0103f0d3, 382 0xd30007f1,
383 0xbd0003d0, 383 0xd00103f0,
384 0x0825b604,
385 0xb60635b6,
386 0x30b60120,
387 0x0824b601,
388 0xb90834b6,
389 0x21f5022f,
390 0x2fbb02d3,
391 0x003fbb00,
392 0x010007f1,
393 0xd00203f0,
394 0x04bd0003, 384 0x04bd0003,
395 0x29f024bd, 385 0xb60825b6,
396 0x0007f11f, 386 0x20b60635,
397 0x0203f030, 387 0x0130b601,
398 0xbd0002d0, 388 0xb60824b6,
399/* 0x0505: main */ 389 0x2fb90834,
400 0x0031f404, 390 0xd321f502,
401 0xf00028f4, 391 0x002fbb02,
402 0x21f424d7, 392 0xf1003fbb,
403 0xf401f439, 393 0xf0010007,
404 0xf404e4b0, 394 0x03d00203,
405 0x81fe1e18, 395 0xbd04bd00,
406 0x0627f001, 396 0x1f29f024,
407 0x12fd20bd, 397 0x300007f1,
408 0x01e4b604, 398 0xd00203f0,
409 0xfe051efd, 399 0x04bd0002,
410 0x21f50018, 400/* 0x0508: main */
411 0x0ef405fa, 401 0xf40031f4,
412/* 0x0535: main_not_ctx_xfer */ 402 0xd7f00028,
413 0x10ef94d3, 403 0x3921f424,
414 0xf501f5f0, 404 0xb0f401f4,
415 0xf4037e21, 405 0x18f404e4,
416/* 0x0542: ih */ 406 0x0181fe1e,
417 0x80f9c60e, 407 0xbd0627f0,
418 0xf90188fe, 408 0x0412fd20,
419 0xf990f980, 409 0xfd01e4b6,
420 0xf9b0f9a0, 410 0x18fe051e,
421 0xf9e0f9d0, 411 0xfd21f500,
422 0xf104bdf0, 412 0xd30ef405,
423 0xf00200a7, 413/* 0x0538: main_not_ctx_xfer */
424 0xaacf00a3, 414 0xf010ef94,
425 0x04abc400, 415 0x21f501f5,
426 0xf02c0bf4, 416 0x0ef4037e,
427 0xe7f124d7, 417/* 0x0545: ih */
428 0xe3f01a00, 418 0xfe80f9c6,
429 0x00eecf00, 419 0x80f90188,
430 0x1900f7f1, 420 0xa0f990f9,
431 0xcf00f3f0, 421 0xd0f9b0f9,
432 0x21f400ff, 422 0xf0f9e0f9,
433 0x01e7f004, 423 0xa7f104bd,
434 0x1d0007f1, 424 0xa3f00200,
435 0xd00003f0, 425 0x00aacf00,
436 0x04bd000e, 426 0xf404abc4,
437/* 0x0590: ih_no_fifo */ 427 0xd7f02c0b,
438 0x010007f1, 428 0x00e7f124,
439 0xd00003f0, 429 0x00e3f01a,
440 0x04bd000a, 430 0xf100eecf,
441 0xe0fcf0fc, 431 0xf01900f7,
442 0xb0fcd0fc, 432 0xffcf00f3,
443 0x90fca0fc, 433 0x0421f400,
444 0x88fe80fc, 434 0xf101e7f0,
445 0xf480fc00, 435 0xf01d0007,
446 0x01f80032, 436 0x0ed00003,
447/* 0x05b4: hub_barrier_done */ 437/* 0x0593: ih_no_fifo */
448 0x9801f7f0, 438 0xf104bd00,
449 0xfebb040e, 439 0xf0010007,
450 0x02ffb904, 440 0x0ad00003,
451 0x9418e7f1, 441 0xfc04bd00,
452 0xf440e3f0, 442 0xfce0fcf0,
453 0x00f89d21, 443 0xfcb0fcd0,
454/* 0x05cc: ctx_redswitch */ 444 0xfc90fca0,
455 0xf120f7f0, 445 0x0088fe80,
446 0x32f480fc,
447/* 0x05b7: hub_barrier_done */
448 0xf001f800,
449 0x0e9801f7,
450 0x04febb04,
451 0xf102ffb9,
452 0xf09418e7,
453 0x21f440e3,
454/* 0x05cf: ctx_redswitch */
455 0xf000f89d,
456 0x07f120f7,
457 0x03f08500,
458 0x000fd001,
459 0xe7f004bd,
460/* 0x05e1: ctx_redswitch_delay */
461 0x01e2b608,
462 0xf1fd1bf4,
463 0xf10800f5,
464 0xf10200f5,
456 0xf0850007, 465 0xf0850007,
457 0x0fd00103, 466 0x0fd00103,
458 0xf004bd00, 467 0xf804bd00,
459/* 0x05de: ctx_redswitch_delay */ 468/* 0x05fd: ctx_xfer */
460 0xe2b608e7, 469 0x0007f100,
461 0xfd1bf401, 470 0x0203f081,
462 0x0800f5f1, 471 0xbd000fd0,
463 0x0200f5f1, 472 0x0711f404,
464 0x850007f1, 473 0x05cf21f5,
465 0xd00103f0, 474/* 0x0610: ctx_xfer_not_load */
466 0x04bd000f, 475 0x026a21f5,
467/* 0x05fa: ctx_xfer */ 476 0x07f124bd,
468 0x07f100f8, 477 0x03f047fc,
469 0x03f08100, 478 0x0002d002,
470 0x000fd002, 479 0x2cf004bd,
471 0x11f404bd, 480 0x0320b601,
472 0xcc21f507, 481 0x4afc07f1,
473/* 0x060d: ctx_xfer_not_load */ 482 0xd00203f0,
474 0x6a21f505, 483 0x04bd0002,
475 0xf124bd02,
476 0xf047fc07,
477 0x02d00203,
478 0xf004bd00,
479 0x20b6012c,
480 0xfc07f103,
481 0x0203f04a,
482 0xbd0002d0,
483 0x01acf004,
484 0xf102a5f0,
485 0xf00000b7,
486 0x0c9850b3,
487 0x0fc4b604,
488 0x9800bcbb,
489 0x0d98000c,
490 0x00e7f001,
491 0x016f21f5,
492 0xf101acf0,
493 0xf04000b7,
494 0x0c9850b3,
495 0x0fc4b604,
496 0x9800bcbb,
497 0x0d98010c,
498 0x060f9802,
499 0x0800e7f1,
500 0x016f21f5,
501 0xf001acf0, 484 0xf001acf0,
502 0xb7f104a5, 485 0xb7f102a5,
503 0xb3f03000, 486 0xb3f00000,
504 0x040c9850, 487 0x040c9850,
505 0xbb0fc4b6, 488 0xbb0fc4b6,
506 0x0c9800bc, 489 0x0c9800bc,
507 0x030d9802, 490 0x010d9800,
508 0xf1080f98, 491 0xf500e7f0,
509 0xf50200e7, 492 0xf0016f21,
510 0xf5016f21, 493 0xb7f101ac,
511 0xf4025e21, 494 0xb3f04000,
512 0x12f40601, 495 0x040c9850,
513/* 0x06a9: ctx_xfer_post */ 496 0xbb0fc4b6,
514 0x7f21f507, 497 0x0c9800bc,
515/* 0x06ad: ctx_xfer_done */ 498 0x020d9801,
516 0xb421f502, 499 0xf1060f98,
517 0x0000f805, 500 0xf50800e7,
518 0x00000000, 501 0xf0016f21,
502 0xa5f001ac,
503 0x00b7f104,
504 0x50b3f030,
505 0xb6040c98,
506 0xbcbb0fc4,
507 0x020c9800,
508 0x98030d98,
509 0xe7f1080f,
510 0x21f50200,
511 0x21f5016f,
512 0x01f4025e,
513 0x0712f406,
514/* 0x06ac: ctx_xfer_post */
515 0x027f21f5,
516/* 0x06b0: ctx_xfer_done */
517 0x05b721f5,
518 0x000000f8,
519 0x00000000, 519 0x00000000,
520 0x00000000, 520 0x00000000,
521 0x00000000, 521 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
index fbcc342f896f..84af7ec6a78e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
@@ -276,7 +276,7 @@ uint32_t gk208_grgpc_code[] = {
276 0x02020014, 276 0x02020014,
277 0xf6120040, 277 0xf6120040,
278 0x04bd0002, 278 0x04bd0002,
279 0xfe048141, 279 0xfe048441,
280 0x00400010, 280 0x00400010,
281 0x0000f607, 281 0x0000f607,
282 0x040204bd, 282 0x040204bd,
@@ -295,165 +295,165 @@ uint32_t gk208_grgpc_code[] = {
295 0x01c90080, 295 0x01c90080,
296 0xbd0002f6, 296 0xbd0002f6,
297 0x0c308e04, 297 0x0c308e04,
298 0xbd24bd50, 298 0x01e5f050,
299/* 0x0383: init_unk_loop */ 299 0x34bd24bd,
300 0x7e44bd34, 300/* 0x0386: init_unk_loop */
301 0xb0000065, 301 0x657e44bd,
302 0x0bf400f6, 302 0xf6b00000,
303 0xbb010f0e, 303 0x0e0bf400,
304 0x4ffd04f2, 304 0xf2bb010f,
305 0x0130b605, 305 0x054ffd04,
306/* 0x0398: init_unk_next */ 306/* 0x039b: init_unk_next */
307 0xb60120b6, 307 0xb60130b6,
308 0x26b004e0, 308 0xe0b60120,
309 0xe21bf401, 309 0x0126b004,
310/* 0x03a4: init_unk_done */ 310/* 0x03a7: init_unk_done */
311 0xb50703b5, 311 0xb5e21bf4,
312 0x00820804, 312 0x04b50703,
313 0x22cf0201, 313 0x01008208,
314 0x9534bd00, 314 0x0022cf02,
315 0x00800825, 315 0x259534bd,
316 0x05f601c0, 316 0xc0008008,
317 0x8004bd00, 317 0x0005f601,
318 0xf601c100, 318 0x008004bd,
319 0x04bd0005, 319 0x05f601c1,
320 0x98000e98, 320 0x9804bd00,
321 0x207e010f, 321 0x0f98000e,
322 0x2fbb0001, 322 0x01207e01,
323 0x003fbb00, 323 0x002fbb00,
324 0x98010e98, 324 0x98003fbb,
325 0x207e020f, 325 0x0f98010e,
326 0x0e980001, 326 0x01207e02,
327 0x00effd05, 327 0x050e9800,
328 0xbb002ebb, 328 0xbb00effd,
329 0x0e98003e, 329 0x3ebb002e,
330 0x030f9802, 330 0x020e9800,
331 0x0001207e, 331 0x7e030f98,
332 0xfd070e98, 332 0x98000120,
333 0x2ebb00ef, 333 0xeffd070e,
334 0x003ebb00, 334 0x002ebb00,
335 0x800235b6, 335 0xb6003ebb,
336 0xf601d300, 336 0x00800235,
337 0x04bd0003, 337 0x03f601d3,
338 0xb60825b6, 338 0xb604bd00,
339 0x20b60635, 339 0x35b60825,
340 0x0130b601, 340 0x0120b606,
341 0xb60824b6, 341 0xb60130b6,
342 0x2fb20834, 342 0x34b60824,
343 0x0002687e, 343 0x7e2fb208,
344 0xbb002fbb, 344 0xbb000268,
345 0x0080003f, 345 0x3fbb002f,
346 0x03f60201, 346 0x01008000,
347 0xbd04bd00, 347 0x0003f602,
348 0x1f29f024, 348 0x24bd04bd,
349 0x02300080, 349 0x801f29f0,
350 0xbd0002f6, 350 0xf6023000,
351/* 0x0445: main */ 351 0x04bd0002,
352 0x0031f404, 352/* 0x0448: main */
353 0x0d0028f4, 353 0xf40031f4,
354 0x00377e24, 354 0x240d0028,
355 0xf401f400, 355 0x0000377e,
356 0xf404e4b0, 356 0xb0f401f4,
357 0x81fe1d18, 357 0x18f404e4,
358 0xbd060201, 358 0x0181fe1d,
359 0x0412fd20, 359 0x20bd0602,
360 0xfd01e4b6, 360 0xb60412fd,
361 0x18fe051e, 361 0x1efd01e4,
362 0x05187e00, 362 0x0018fe05,
363 0xd40ef400, 363 0x00051b7e,
364/* 0x0474: main_not_ctx_xfer */ 364/* 0x0477: main_not_ctx_xfer */
365 0xf010ef94, 365 0x94d40ef4,
366 0xf87e01f5, 366 0xf5f010ef,
367 0x0ef40002, 367 0x02f87e01,
368/* 0x0481: ih */ 368 0xc70ef400,
369 0xfe80f9c7, 369/* 0x0484: ih */
370 0x80f90188, 370 0x88fe80f9,
371 0xa0f990f9, 371 0xf980f901,
372 0xd0f9b0f9, 372 0xf9a0f990,
373 0xf0f9e0f9, 373 0xf9d0f9b0,
374 0x004a04bd, 374 0xbdf0f9e0,
375 0x00aacf02, 375 0x02004a04,
376 0xf404abc4, 376 0xc400aacf,
377 0x240d1f0b, 377 0x0bf404ab,
378 0xcf1a004e, 378 0x4e240d1f,
379 0x004f00ee, 379 0xeecf1a00,
380 0x00ffcf19, 380 0x19004f00,
381 0x0000047e, 381 0x7e00ffcf,
382 0x0040010e, 382 0x0e000004,
383 0x000ef61d, 383 0x1d004001,
384/* 0x04be: ih_no_fifo */ 384 0xbd000ef6,
385 0x004004bd, 385/* 0x04c1: ih_no_fifo */
386 0x000af601, 386 0x01004004,
387 0xf0fc04bd, 387 0xbd000af6,
388 0xd0fce0fc, 388 0xfcf0fc04,
389 0xa0fcb0fc, 389 0xfcd0fce0,
390 0x80fc90fc, 390 0xfca0fcb0,
391 0xfc0088fe, 391 0xfe80fc90,
392 0x0032f480, 392 0x80fc0088,
393/* 0x04de: hub_barrier_done */ 393 0xf80032f4,
394 0x010f01f8, 394/* 0x04e1: hub_barrier_done */
395 0xbb040e98, 395 0x98010f01,
396 0xffb204fe, 396 0xfebb040e,
397 0x4094188e, 397 0x8effb204,
398 0x00008f7e, 398 0x7e409418,
399/* 0x04f2: ctx_redswitch */ 399 0xf800008f,
400 0x200f00f8, 400/* 0x04f5: ctx_redswitch */
401 0x80200f00,
402 0xf6018500,
403 0x04bd000f,
404/* 0x0502: ctx_redswitch_delay */
405 0xe2b6080e,
406 0xfd1bf401,
407 0x0800f5f1,
408 0x0200f5f1,
401 0x01850080, 409 0x01850080,
402 0xbd000ff6, 410 0xbd000ff6,
403/* 0x04ff: ctx_redswitch_delay */ 411/* 0x051b: ctx_xfer */
404 0xb6080e04, 412 0x8000f804,
405 0x1bf401e2, 413 0xf6028100,
406 0x00f5f1fd, 414 0x04bd000f,
407 0x00f5f108, 415 0x7e0711f4,
408 0x85008002, 416/* 0x052b: ctx_xfer_not_load */
409 0x000ff601, 417 0x7e0004f5,
410 0x00f804bd, 418 0xbd000216,
411/* 0x0518: ctx_xfer */ 419 0x47fc8024,
412 0x02810080,
413 0xbd000ff6,
414 0x0711f404,
415 0x0004f27e,
416/* 0x0528: ctx_xfer_not_load */
417 0x0002167e,
418 0xfc8024bd,
419 0x02f60247,
420 0xf004bd00,
421 0x20b6012c,
422 0x4afc8003,
423 0x0002f602, 420 0x0002f602,
424 0xacf004bd, 421 0x2cf004bd,
425 0x02a5f001, 422 0x0320b601,
426 0x5000008b, 423 0x024afc80,
427 0xb6040c98, 424 0xbd0002f6,
428 0xbcbb0fc4, 425 0x01acf004,
429 0x000c9800, 426 0x8b02a5f0,
430 0x0e010d98, 427 0x98500000,
431 0x013d7e00,
432 0x01acf000,
433 0x5040008b,
434 0xb6040c98,
435 0xbcbb0fc4,
436 0x010c9800,
437 0x98020d98,
438 0x004e060f,
439 0x013d7e08,
440 0x01acf000,
441 0x8b04a5f0,
442 0x98503000,
443 0xc4b6040c, 428 0xc4b6040c,
444 0x00bcbb0f, 429 0x00bcbb0f,
445 0x98020c98, 430 0x98000c98,
446 0x0f98030d, 431 0x000e010d,
447 0x02004e08,
448 0x00013d7e, 432 0x00013d7e,
449 0x00020a7e, 433 0x8b01acf0,
450 0xf40601f4, 434 0x98504000,
451/* 0x05b2: ctx_xfer_post */ 435 0xc4b6040c,
452 0x277e0712, 436 0x00bcbb0f,
453/* 0x05b6: ctx_xfer_done */ 437 0x98010c98,
454 0xde7e0002, 438 0x0f98020d,
455 0x00f80004, 439 0x08004e06,
456 0x00000000, 440 0x00013d7e,
441 0xf001acf0,
442 0x008b04a5,
443 0x0c985030,
444 0x0fc4b604,
445 0x9800bcbb,
446 0x0d98020c,
447 0x080f9803,
448 0x7e02004e,
449 0x7e00013d,
450 0xf400020a,
451 0x12f40601,
452/* 0x05b5: ctx_xfer_post */
453 0x02277e07,
454/* 0x05b9: ctx_xfer_done */
455 0x04e17e00,
456 0x0000f800,
457 0x00000000, 457 0x00000000,
458 0x00000000, 458 0x00000000,
459 0x00000000, 459 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
index 51f5c3c6e966..11bf363a6ae9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
@@ -289,7 +289,7 @@ uint32_t gm107_grgpc_code[] = {
289 0x020014fe, 289 0x020014fe,
290 0x12004002, 290 0x12004002,
291 0xbd0002f6, 291 0xbd0002f6,
292 0x05b04104, 292 0x05b34104,
293 0x400010fe, 293 0x400010fe,
294 0x00f60700, 294 0x00f60700,
295 0x0204bd00, 295 0x0204bd00,
@@ -308,259 +308,259 @@ uint32_t gm107_grgpc_code[] = {
308 0xc900800f, 308 0xc900800f,
309 0x0002f601, 309 0x0002f601,
310 0x308e04bd, 310 0x308e04bd,
311 0x24bd500c, 311 0xe5f0500c,
312 0x44bd34bd, 312 0xbd24bd01,
313/* 0x03b0: init_unk_loop */ 313/* 0x03b3: init_unk_loop */
314 0x0000657e, 314 0x7e44bd34,
315 0xf400f6b0, 315 0xb0000065,
316 0x010f0e0b, 316 0x0bf400f6,
317 0xfd04f2bb, 317 0xbb010f0e,
318 0x30b6054f, 318 0x4ffd04f2,
319/* 0x03c5: init_unk_next */ 319 0x0130b605,
320 0x0120b601, 320/* 0x03c8: init_unk_next */
321 0xb004e0b6, 321 0xb60120b6,
322 0x1bf40226, 322 0x26b004e0,
323/* 0x03d1: init_unk_done */ 323 0xe21bf402,
324 0x0703b5e2, 324/* 0x03d4: init_unk_done */
325 0x820804b5, 325 0xb50703b5,
326 0xcf020100, 326 0x00820804,
327 0x34bd0022, 327 0x22cf0201,
328 0x80082595, 328 0x9534bd00,
329 0xf601c000, 329 0x00800825,
330 0x05f601c0,
331 0x8004bd00,
332 0xf601c100,
330 0x04bd0005, 333 0x04bd0005,
331 0x01c10080, 334 0x98000e98,
332 0xbd0005f6, 335 0x207e010f,
333 0x000e9804, 336 0x2fbb0001,
334 0x7e010f98, 337 0x003fbb00,
335 0xbb000120, 338 0x98010e98,
336 0x3fbb002f, 339 0x207e020f,
337 0x010e9800, 340 0x0e980001,
338 0x7e020f98, 341 0x00effd05,
339 0x98000120, 342 0xbb002ebb,
340 0xeffd050e, 343 0x0e98003e,
341 0x002ebb00, 344 0x030f9802,
342 0x98003ebb, 345 0x0001207e,
343 0x0f98020e, 346 0xfd070e98,
344 0x01207e03, 347 0x2ebb00ef,
345 0x070e9800, 348 0x003ebb00,
346 0xbb00effd, 349 0x800235b6,
347 0x3ebb002e, 350 0xf601d300,
348 0x0235b600, 351 0x04bd0003,
349 0x01d30080, 352 0xb60825b6,
350 0xbd0003f6, 353 0x20b60635,
351 0x0825b604, 354 0x0130b601,
352 0xb60635b6, 355 0xb60824b6,
353 0x30b60120, 356 0x2fb20834,
354 0x0824b601, 357 0x0002687e,
355 0xb20834b6, 358 0xbb002fbb,
356 0x02687e2f, 359 0x3f0f003f,
357 0x002fbb00, 360 0x501d608e,
358 0x0f003fbb, 361 0xb201e5f0,
359 0x8effb23f, 362 0x008f7eff,
360 0xf0501d60, 363 0x8e0c0f00,
361 0x8f7e01e5,
362 0x0c0f0000,
363 0xa88effb2,
364 0xe5f0501d,
365 0x008f7e01,
366 0x03147e00,
367 0xb23f0f00,
368 0x1d608eff,
369 0x01e5f050,
370 0x00008f7e,
371 0xffb2000f,
372 0x501d9c8e,
373 0x7e01e5f0,
374 0x0f00008f,
375 0x03147e01,
376 0x8effb200,
377 0xf0501da8, 364 0xf0501da8,
378 0x8f7e01e5, 365 0xffb201e5,
379 0xff0f0000, 366 0x00008f7e,
380 0x988effb2, 367 0x0003147e,
368 0x608e3f0f,
381 0xe5f0501d, 369 0xe5f0501d,
382 0x008f7e01, 370 0x7effb201,
383 0xb2020f00, 371 0x0f00008f,
384 0x1da88eff, 372 0x1d9c8e00,
385 0x01e5f050, 373 0x01e5f050,
386 0x00008f7e, 374 0x8f7effb2,
375 0x010f0000,
387 0x0003147e, 376 0x0003147e,
388 0x85050498, 377 0x501da88e,
389 0x98504000, 378 0xb201e5f0,
390 0x64b60406, 379 0x008f7eff,
391 0x0056bb0f, 380 0x8eff0f00,
392/* 0x04e0: tpc_strand_init_tpc_loop */ 381 0xf0501d98,
393 0x05705eb8, 382 0xffb201e5,
394 0x00657e00,
395 0xbdf6b200,
396/* 0x04ed: tpc_strand_init_idx_loop */
397 0x605eb874,
398 0x7fb20005,
399 0x00008f7e,
400 0x05885eb8,
401 0x082f9500,
402 0x00008f7e,
403 0x058c5eb8,
404 0x082f9500,
405 0x00008f7e, 383 0x00008f7e,
406 0x05905eb8, 384 0xa88e020f,
407 0x00657e00,
408 0x06f5b600,
409 0xb601f0b6,
410 0x2fbb08f4,
411 0x003fbb00,
412 0xb60170b6,
413 0x1bf40162,
414 0x0050b7bf,
415 0x0142b608,
416 0x0fa81bf4,
417 0x8effb23f,
418 0xf0501d60,
419 0x8f7e01e5,
420 0x0d0f0000,
421 0xa88effb2,
422 0xe5f0501d, 385 0xe5f0501d,
423 0x008f7e01, 386 0x7effb201,
424 0x03147e00, 387 0x7e00008f,
425 0x01008000, 388 0x98000314,
426 0x0003f602, 389 0x00850504,
427 0x24bd04bd, 390 0x06985040,
428 0x801f29f0, 391 0x0f64b604,
429 0xf6023000, 392/* 0x04e3: tpc_strand_init_tpc_loop */
430 0x04bd0002, 393 0xb80056bb,
431/* 0x0574: main */ 394 0x0005705e,
432 0xf40031f4, 395 0x0000657e,
433 0x240d0028, 396 0x74bdf6b2,
434 0x0000377e, 397/* 0x04f0: tpc_strand_init_idx_loop */
435 0xb0f401f4, 398 0x05605eb8,
436 0x18f404e4, 399 0x7e7fb200,
437 0x0181fe1d, 400 0xb800008f,
438 0x20bd0602, 401 0x0005885e,
439 0xb60412fd, 402 0x7e082f95,
440 0x1efd01e4, 403 0xb800008f,
441 0x0018fe05, 404 0x00058c5e,
442 0x0006477e, 405 0x7e082f95,
443/* 0x05a3: main_not_ctx_xfer */ 406 0xb800008f,
444 0x94d40ef4, 407 0x0005905e,
445 0xf5f010ef, 408 0x0000657e,
446 0x02f87e01, 409 0xb606f5b6,
447 0xc70ef400, 410 0xf4b601f0,
448/* 0x05b0: ih */ 411 0x002fbb08,
449 0x88fe80f9, 412 0xb6003fbb,
450 0xf980f901, 413 0x62b60170,
451 0xf9a0f990, 414 0xbf1bf401,
452 0xf9d0f9b0, 415 0x080050b7,
453 0xbdf0f9e0, 416 0xf40142b6,
454 0x02004a04, 417 0x3f0fa81b,
455 0xc400aacf, 418 0x501d608e,
456 0x0bf404ab, 419 0xb201e5f0,
457 0x4e240d1f, 420 0x008f7eff,
458 0xeecf1a00, 421 0x8e0d0f00,
459 0x19004f00, 422 0xf0501da8,
460 0x7e00ffcf, 423 0xffb201e5,
461 0x0e000004, 424 0x00008f7e,
462 0x1d004001, 425 0x0003147e,
463 0xbd000ef6, 426 0x02010080,
464/* 0x05ed: ih_no_fifo */ 427 0xbd0003f6,
465 0x01004004, 428 0xf024bd04,
466 0xbd000af6, 429 0x00801f29,
467 0xfcf0fc04, 430 0x02f60230,
468 0xfcd0fce0, 431/* 0x0577: main */
469 0xfca0fcb0, 432 0xf404bd00,
470 0xfe80fc90, 433 0x28f40031,
471 0x80fc0088, 434 0x7e240d00,
472 0xf80032f4, 435 0xf4000037,
473/* 0x060d: hub_barrier_done */ 436 0xe4b0f401,
474 0x98010f01, 437 0x1d18f404,
475 0xfebb040e, 438 0x020181fe,
476 0x8effb204, 439 0xfd20bd06,
477 0x7e409418, 440 0xe4b60412,
478 0xf800008f, 441 0x051efd01,
479/* 0x0621: ctx_redswitch */ 442 0x7e0018fe,
480 0x80200f00, 443 0xf400064a,
444/* 0x05a6: main_not_ctx_xfer */
445 0xef94d40e,
446 0x01f5f010,
447 0x0002f87e,
448/* 0x05b3: ih */
449 0xf9c70ef4,
450 0x0188fe80,
451 0x90f980f9,
452 0xb0f9a0f9,
453 0xe0f9d0f9,
454 0x04bdf0f9,
455 0xcf02004a,
456 0xabc400aa,
457 0x1f0bf404,
458 0x004e240d,
459 0x00eecf1a,
460 0xcf19004f,
461 0x047e00ff,
462 0x010e0000,
463 0xf61d0040,
464 0x04bd000e,
465/* 0x05f0: ih_no_fifo */
466 0xf6010040,
467 0x04bd000a,
468 0xe0fcf0fc,
469 0xb0fcd0fc,
470 0x90fca0fc,
471 0x88fe80fc,
472 0xf480fc00,
473 0x01f80032,
474/* 0x0610: hub_barrier_done */
475 0x0e98010f,
476 0x04febb04,
477 0x188effb2,
478 0x8f7e4094,
479 0x00f80000,
480/* 0x0624: ctx_redswitch */
481 0x0080200f,
482 0x0ff60185,
483 0x0e04bd00,
484/* 0x0631: ctx_redswitch_delay */
485 0x01e2b608,
486 0xf1fd1bf4,
487 0xf10800f5,
488 0x800200f5,
481 0xf6018500, 489 0xf6018500,
482 0x04bd000f, 490 0x04bd000f,
483/* 0x062e: ctx_redswitch_delay */ 491/* 0x064a: ctx_xfer */
484 0xe2b6080e, 492 0x008000f8,
485 0xfd1bf401, 493 0x0ff60281,
486 0x0800f5f1, 494 0x8e04bd00,
487 0x0200f5f1, 495 0xf0501dc4,
488 0x01850080, 496 0xffb201e5,
489 0xbd000ff6, 497 0x00008f7e,
490/* 0x0647: ctx_xfer */ 498 0x7e0711f4,
491 0x8000f804, 499/* 0x0667: ctx_xfer_not_load */
492 0xf6028100, 500 0x7e000624,
493 0x04bd000f, 501 0xbd000216,
494 0xc48effb2, 502 0x47fc8024,
495 0xe5f0501d,
496 0x008f7e01,
497 0x0711f400,
498 0x0006217e,
499/* 0x0664: ctx_xfer_not_load */
500 0x0002167e,
501 0xfc8024bd,
502 0x02f60247,
503 0xf004bd00,
504 0x20b6012c,
505 0x4afc8003,
506 0x0002f602, 503 0x0002f602,
507 0x0c0f04bd, 504 0x2cf004bd,
508 0xa88effb2, 505 0x0320b601,
509 0xe5f0501d, 506 0x024afc80,
510 0x008f7e01, 507 0xbd0002f6,
511 0x03147e00, 508 0x8e0c0f04,
512 0xb23f0f00, 509 0xf0501da8,
513 0x1d608eff, 510 0xffb201e5,
514 0x01e5f050,
515 0x00008f7e, 511 0x00008f7e,
516 0xffb2000f, 512 0x0003147e,
517 0x501d9c8e, 513 0x608e3f0f,
518 0x7e01e5f0, 514 0xe5f0501d,
515 0x7effb201,
519 0x0f00008f, 516 0x0f00008f,
520 0x03147e01, 517 0x1d9c8e00,
521 0x01fcf000,
522 0xb203f0b6,
523 0x1da88eff,
524 0x01e5f050, 518 0x01e5f050,
525 0x00008f7e, 519 0x8f7effb2,
526 0xf001acf0, 520 0x010f0000,
527 0x008b02a5, 521 0x0003147e,
528 0x0c985000, 522 0xb601fcf0,
529 0x0fc4b604, 523 0xa88e03f0,
530 0x9800bcbb, 524 0xe5f0501d,
531 0x0d98000c, 525 0x7effb201,
532 0x7e000e01, 526 0xf000008f,
533 0xf000013d,
534 0x008b01ac,
535 0x0c985040,
536 0x0fc4b604,
537 0x9800bcbb,
538 0x0d98010c,
539 0x060f9802,
540 0x7e08004e,
541 0xf000013d,
542 0xa5f001ac, 527 0xa5f001ac,
543 0x30008b04, 528 0x00008b02,
544 0x040c9850, 529 0x040c9850,
545 0xbb0fc4b6, 530 0xbb0fc4b6,
546 0x0c9800bc, 531 0x0c9800bc,
547 0x030d9802, 532 0x010d9800,
548 0x4e080f98, 533 0x3d7e000e,
549 0x3d7e0200, 534 0xacf00001,
550 0x0a7e0001, 535 0x40008b01,
551 0x147e0002, 536 0x040c9850,
552 0x01f40003, 537 0xbb0fc4b6,
553 0x1a12f406, 538 0x0c9800bc,
554/* 0x073c: ctx_xfer_post */ 539 0x020d9801,
555 0x0002277e, 540 0x4e060f98,
556 0xffb20d0f, 541 0x3d7e0800,
557 0x501da88e, 542 0xacf00001,
558 0x7e01e5f0, 543 0x04a5f001,
559 0x7e00008f, 544 0x5030008b,
560/* 0x0753: ctx_xfer_done */ 545 0xb6040c98,
561 0x7e000314, 546 0xbcbb0fc4,
562 0xf800060d, 547 0x020c9800,
563 0x00000000, 548 0x98030d98,
549 0x004e080f,
550 0x013d7e02,
551 0x020a7e00,
552 0x03147e00,
553 0x0601f400,
554/* 0x073f: ctx_xfer_post */
555 0x7e1a12f4,
556 0x0f000227,
557 0x1da88e0d,
558 0x01e5f050,
559 0x8f7effb2,
560 0x147e0000,
561/* 0x0756: ctx_xfer_done */
562 0x107e0003,
563 0x00f80006,
564 0x00000000, 564 0x00000000,
565 0x00000000, 565 0x00000000,
566 0x00000000, 566 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index dda7a7d224c9..9f5dfc85147a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -143,7 +143,7 @@ gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format,
143static int 143static int
144gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) 144gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
145{ 145{
146 struct gf100_gr *gr = (void *)object->engine; 146 struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine));
147 union { 147 union {
148 struct fermi_a_zbc_color_v0 v0; 148 struct fermi_a_zbc_color_v0 v0;
149 } *args = data; 149 } *args = data;
@@ -189,7 +189,7 @@ gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
189static int 189static int
190gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size) 190gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size)
191{ 191{
192 struct gf100_gr *gr = (void *)object->engine; 192 struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine));
193 union { 193 union {
194 struct fermi_a_zbc_depth_v0 v0; 194 struct fermi_a_zbc_depth_v0 v0;
195 } *args = data; 195 } *args = data;
@@ -1530,6 +1530,8 @@ gf100_gr_oneinit(struct nvkm_gr *base)
1530 gr->ppc_nr[i] = gr->func->ppc_nr; 1530 gr->ppc_nr[i] = gr->func->ppc_nr;
1531 for (j = 0; j < gr->ppc_nr[i]; j++) { 1531 for (j = 0; j < gr->ppc_nr[i]; j++) {
1532 u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4))); 1532 u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4)));
1533 if (mask)
1534 gr->ppc_mask[i] |= (1 << j);
1533 gr->ppc_tpc_nr[i][j] = hweight8(mask); 1535 gr->ppc_tpc_nr[i][j] = hweight8(mask);
1534 } 1536 }
1535 } 1537 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 4611961b1187..02e78b8d93f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -97,6 +97,7 @@ struct gf100_gr {
97 u8 tpc_nr[GPC_MAX]; 97 u8 tpc_nr[GPC_MAX];
98 u8 tpc_total; 98 u8 tpc_total;
99 u8 ppc_nr[GPC_MAX]; 99 u8 ppc_nr[GPC_MAX];
100 u8 ppc_mask[GPC_MAX];
100 u8 ppc_tpc_nr[GPC_MAX][4]; 101 u8 ppc_tpc_nr[GPC_MAX][4];
101 102
102 struct nvkm_memory *unk4188b4; 103 struct nvkm_memory *unk4188b4;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
index 43006db6fd58..80fed7e78dcb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
@@ -83,6 +83,7 @@ nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
83 fan->type = NVBIOS_THERM_FAN_UNK; 83 fan->type = NVBIOS_THERM_FAN_UNK;
84 } 84 }
85 85
86 fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
86 fan->min_duty = nvbios_rd08(bios, data + 0x02); 87 fan->min_duty = nvbios_rd08(bios, data + 0x02);
87 fan->max_duty = nvbios_rd08(bios, data + 0x03); 88 fan->max_duty = nvbios_rd08(bios, data + 0x03);
88 89
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index 895ba74057d4..1d7dd38292b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -97,7 +97,9 @@ static void *
97nvkm_instobj_dtor(struct nvkm_memory *memory) 97nvkm_instobj_dtor(struct nvkm_memory *memory)
98{ 98{
99 struct nvkm_instobj *iobj = nvkm_instobj(memory); 99 struct nvkm_instobj *iobj = nvkm_instobj(memory);
100 spin_lock(&iobj->imem->lock);
100 list_del(&iobj->head); 101 list_del(&iobj->head);
102 spin_unlock(&iobj->imem->lock);
101 nvkm_memory_del(&iobj->parent); 103 nvkm_memory_del(&iobj->parent);
102 return iobj; 104 return iobj;
103} 105}
@@ -190,7 +192,9 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
190 nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory); 192 nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory);
191 iobj->parent = memory; 193 iobj->parent = memory;
192 iobj->imem = imem; 194 iobj->imem = imem;
195 spin_lock(&iobj->imem->lock);
193 list_add_tail(&iobj->head, &imem->list); 196 list_add_tail(&iobj->head, &imem->list);
197 spin_unlock(&iobj->imem->lock);
194 memory = &iobj->memory; 198 memory = &iobj->memory;
195 } 199 }
196 200
@@ -309,5 +313,6 @@ nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
309{ 313{
310 nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev); 314 nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev);
311 imem->func = func; 315 imem->func = func;
316 spin_lock_init(&imem->lock);
312 INIT_LIST_HEAD(&imem->list); 317 INIT_LIST_HEAD(&imem->list);
313} 318}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index d942fa7b9f18..86f9f3b13f71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -81,9 +81,7 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
81 nvkm_mask(device, 0x000200, 0x00001000, 0x00001000); 81 nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
82 nvkm_rd32(device, 0x000200); 82 nvkm_rd32(device, 0x000200);
83 83
84 if ( nvkm_boolopt(device->cfgopt, "War00C800_0", 84 if (nvkm_boolopt(device->cfgopt, "War00C800_0", true)) {
85 device->quirk ? device->quirk->War00C800_0 : false)) {
86 nvkm_info(&pmu->subdev, "hw bug workaround enabled\n");
87 switch (device->chipset) { 85 switch (device->chipset) {
88 case 0xe4: 86 case 0xe4:
89 magic(device, 0x04000000); 87 magic(device, 0x04000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
index b61509e26ec9..b735173a18ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
@@ -59,7 +59,7 @@ gk104_volt_set(struct nvkm_volt *base, u32 uv)
59 duty = (uv - bios->base) * div / bios->pwm_range; 59 duty = (uv - bios->base) * div / bios->pwm_range;
60 60
61 nvkm_wr32(device, 0x20340, div); 61 nvkm_wr32(device, 0x20340, div);
62 nvkm_wr32(device, 0x20344, 0x8000000 | duty); 62 nvkm_wr32(device, 0x20344, 0x80000000 | duty);
63 63
64 return 0; 64 return 0;
65} 65}
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index b8e4cdec28c3..24f92bea39c7 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -112,11 +112,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
112 dma_addr_t paddr; 112 dma_addr_t paddr;
113 int ret; 113 int ret;
114 114
115 /* only doing ARGB32 since this is what is needed to alpha-blend
116 * with video overlays:
117 */
118 sizes->surface_bpp = 32; 115 sizes->surface_bpp = 32;
119 sizes->surface_depth = 32; 116 sizes->surface_depth = 24;
120 117
121 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, 118 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
122 sizes->surface_height, sizes->surface_bpp, 119 sizes->surface_height, sizes->surface_bpp,
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 248953d2fdb7..f81fb2641097 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -4173,11 +4173,7 @@ void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
4173 control |= ib->length_dw | (vm_id << 24); 4173 control |= ib->length_dw | (vm_id << 24);
4174 4174
4175 radeon_ring_write(ring, header); 4175 radeon_ring_write(ring, header);
4176 radeon_ring_write(ring, 4176 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFFC));
4177#ifdef __BIG_ENDIAN
4178 (2 << 0) |
4179#endif
4180 (ib->gpu_addr & 0xFFFFFFFC));
4181 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); 4177 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
4182 radeon_ring_write(ring, control); 4178 radeon_ring_write(ring, control);
4183} 4179}
@@ -8472,7 +8468,7 @@ restart_ih:
8472 if (queue_dp) 8468 if (queue_dp)
8473 schedule_work(&rdev->dp_work); 8469 schedule_work(&rdev->dp_work);
8474 if (queue_hotplug) 8470 if (queue_hotplug)
8475 schedule_work(&rdev->hotplug_work); 8471 schedule_delayed_work(&rdev->hotplug_work, 0);
8476 if (queue_reset) { 8472 if (queue_reset) {
8477 rdev->needs_reset = true; 8473 rdev->needs_reset = true;
8478 wake_up_all(&rdev->fence_queue); 8474 wake_up_all(&rdev->fence_queue);
@@ -9630,6 +9626,9 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
9630 (rdev->disp_priority == 2)) { 9626 (rdev->disp_priority == 2)) {
9631 DRM_DEBUG_KMS("force priority to high\n"); 9627 DRM_DEBUG_KMS("force priority to high\n");
9632 } 9628 }
9629
9630 /* Save number of lines the linebuffer leads before the scanout */
9631 radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
9633 } 9632 }
9634 9633
9635 /* select wm A */ 9634 /* select wm A */
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7f33767d7ed6..2ad462896896 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2372,6 +2372,9 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
2372 c.full = dfixed_div(c, a); 2372 c.full = dfixed_div(c, a);
2373 priority_b_mark = dfixed_trunc(c); 2373 priority_b_mark = dfixed_trunc(c);
2374 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; 2374 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
2375
2376 /* Save number of lines the linebuffer leads before the scanout */
2377 radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
2375 } 2378 }
2376 2379
2377 /* select wm A */ 2380 /* select wm A */
@@ -5344,7 +5347,7 @@ restart_ih:
5344 if (queue_dp) 5347 if (queue_dp)
5345 schedule_work(&rdev->dp_work); 5348 schedule_work(&rdev->dp_work);
5346 if (queue_hotplug) 5349 if (queue_hotplug)
5347 schedule_work(&rdev->hotplug_work); 5350 schedule_delayed_work(&rdev->hotplug_work, 0);
5348 if (queue_hdmi) 5351 if (queue_hdmi)
5349 schedule_work(&rdev->audio_work); 5352 schedule_work(&rdev->audio_work);
5350 if (queue_thermal && rdev->pm.dpm_enabled) 5353 if (queue_thermal && rdev->pm.dpm_enabled)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 238b13f045c1..9e7e2bf03b81 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -806,7 +806,7 @@ int r100_irq_process(struct radeon_device *rdev)
806 status = r100_irq_ack(rdev); 806 status = r100_irq_ack(rdev);
807 } 807 }
808 if (queue_hotplug) 808 if (queue_hotplug)
809 schedule_work(&rdev->hotplug_work); 809 schedule_delayed_work(&rdev->hotplug_work, 0);
810 if (rdev->msi_enabled) { 810 if (rdev->msi_enabled) {
811 switch (rdev->family) { 811 switch (rdev->family) {
812 case CHIP_RS400: 812 case CHIP_RS400:
@@ -3217,6 +3217,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
3217 uint32_t pixel_bytes1 = 0; 3217 uint32_t pixel_bytes1 = 0;
3218 uint32_t pixel_bytes2 = 0; 3218 uint32_t pixel_bytes2 = 0;
3219 3219
3220 /* Guess line buffer size to be 8192 pixels */
3221 u32 lb_size = 8192;
3222
3220 if (!rdev->mode_info.mode_config_initialized) 3223 if (!rdev->mode_info.mode_config_initialized)
3221 return; 3224 return;
3222 3225
@@ -3631,6 +3634,13 @@ void r100_bandwidth_update(struct radeon_device *rdev)
3631 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", 3634 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
3632 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); 3635 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
3633 } 3636 }
3637
3638 /* Save number of lines the linebuffer leads before the scanout */
3639 if (mode1)
3640 rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay);
3641
3642 if (mode2)
3643 rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay);
3634} 3644}
3635 3645
3636int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) 3646int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 4ea5b10ff5f4..cc2fdf0be37a 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -4276,7 +4276,7 @@ restart_ih:
4276 WREG32(IH_RB_RPTR, rptr); 4276 WREG32(IH_RB_RPTR, rptr);
4277 } 4277 }
4278 if (queue_hotplug) 4278 if (queue_hotplug)
4279 schedule_work(&rdev->hotplug_work); 4279 schedule_delayed_work(&rdev->hotplug_work, 0);
4280 if (queue_hdmi) 4280 if (queue_hdmi)
4281 schedule_work(&rdev->audio_work); 4281 schedule_work(&rdev->audio_work);
4282 if (queue_thermal && rdev->pm.dpm_enabled) 4282 if (queue_thermal && rdev->pm.dpm_enabled)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b6cbd816537e..87db64983ea8 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2414,7 +2414,7 @@ struct radeon_device {
2414 struct r600_ih ih; /* r6/700 interrupt ring */ 2414 struct r600_ih ih; /* r6/700 interrupt ring */
2415 struct radeon_rlc rlc; 2415 struct radeon_rlc rlc;
2416 struct radeon_mec mec; 2416 struct radeon_mec mec;
2417 struct work_struct hotplug_work; 2417 struct delayed_work hotplug_work;
2418 struct work_struct dp_work; 2418 struct work_struct dp_work;
2419 struct work_struct audio_work; 2419 struct work_struct audio_work;
2420 int num_crtc; /* number of crtcs */ 2420 int num_crtc; /* number of crtcs */
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index fe994aac3b04..c77d349c561c 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -54,6 +54,9 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
54 /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */ 54 /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */
55 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50, 55 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50,
56 PCI_VENDOR_ID_IBM, 0x0550, 1}, 56 PCI_VENDOR_ID_IBM, 0x0550, 1},
57 /* Intel 82855PM host bridge / RV250/M9 GL [Mobility FireGL 9000/Radeon 9000] needs AGPMode 1 (Thinkpad T40p) */
58 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
59 PCI_VENDOR_ID_IBM, 0x054d, 1},
57 /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */ 60 /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */
58 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57, 61 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57,
59 PCI_VENDOR_ID_IBM, 0x0530, 1}, 62 PCI_VENDOR_ID_IBM, 0x0530, 1},
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 5a2cafb4f1bc..340f3f549f29 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1234,13 +1234,32 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1234 if (r < 0) 1234 if (r < 0)
1235 return connector_status_disconnected; 1235 return connector_status_disconnected;
1236 1236
1237 if (radeon_connector->detected_hpd_without_ddc) {
1238 force = true;
1239 radeon_connector->detected_hpd_without_ddc = false;
1240 }
1241
1237 if (!force && radeon_check_hpd_status_unchanged(connector)) { 1242 if (!force && radeon_check_hpd_status_unchanged(connector)) {
1238 ret = connector->status; 1243 ret = connector->status;
1239 goto exit; 1244 goto exit;
1240 } 1245 }
1241 1246
1242 if (radeon_connector->ddc_bus) 1247 if (radeon_connector->ddc_bus) {
1243 dret = radeon_ddc_probe(radeon_connector, false); 1248 dret = radeon_ddc_probe(radeon_connector, false);
1249
1250 /* Sometimes the pins required for the DDC probe on DVI
1251 * connectors don't make contact at the same time that the ones
1252 * for HPD do. If the DDC probe fails even though we had an HPD
1253 * signal, try again later */
1254 if (!dret && !force &&
1255 connector->status != connector_status_connected) {
1256 DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n");
1257 radeon_connector->detected_hpd_without_ddc = true;
1258 schedule_delayed_work(&rdev->hotplug_work,
1259 msecs_to_jiffies(1000));
1260 goto exit;
1261 }
1262 }
1244 if (dret) { 1263 if (dret) {
1245 radeon_connector->detected_by_load = false; 1264 radeon_connector->detected_by_load = false;
1246 radeon_connector_free_edid(connector); 1265 radeon_connector_free_edid(connector);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index a8d9927ed9eb..1eca0acac016 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -322,7 +322,9 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
322 * to complete in this vblank? 322 * to complete in this vblank?
323 */ 323 */
324 if (update_pending && 324 if (update_pending &&
325 (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0, 325 (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev,
326 crtc_id,
327 USE_REAL_VBLANKSTART,
326 &vpos, &hpos, NULL, NULL, 328 &vpos, &hpos, NULL, NULL,
327 &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) && 329 &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) &&
328 ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || 330 ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
@@ -401,6 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
401 struct drm_crtc *crtc = &radeon_crtc->base; 403 struct drm_crtc *crtc = &radeon_crtc->base;
402 unsigned long flags; 404 unsigned long flags;
403 int r; 405 int r;
406 int vpos, hpos, stat, min_udelay;
407 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
404 408
405 down_read(&rdev->exclusive_lock); 409 down_read(&rdev->exclusive_lock);
406 if (work->fence) { 410 if (work->fence) {
@@ -437,6 +441,41 @@ static void radeon_flip_work_func(struct work_struct *__work)
437 /* set the proper interrupt */ 441 /* set the proper interrupt */
438 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); 442 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
439 443
444 /* If this happens to execute within the "virtually extended" vblank
445 * interval before the start of the real vblank interval then it needs
446 * to delay programming the mmio flip until the real vblank is entered.
447 * This prevents completing a flip too early due to the way we fudge
448 * our vblank counter and vblank timestamps in order to work around the
449 * problem that the hw fires vblank interrupts before actual start of
450 * vblank (when line buffer refilling is done for a frame). It
451 * complements the fudging logic in radeon_get_crtc_scanoutpos() for
452 * timestamping and radeon_get_vblank_counter_kms() for vblank counts.
453 *
454 * In practice this won't execute very often unless on very fast
455 * machines because the time window for this to happen is very small.
456 */
457 for (;;) {
458 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
459 * start in hpos, and to the "fudged earlier" vblank start in
460 * vpos.
461 */
462 stat = radeon_get_crtc_scanoutpos(rdev->ddev, work->crtc_id,
463 GET_DISTANCE_TO_VBLANKSTART,
464 &vpos, &hpos, NULL, NULL,
465 &crtc->hwmode);
466
467 if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
468 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
469 !(vpos >= 0 && hpos <= 0))
470 break;
471
472 /* Sleep at least until estimated real start of hw vblank */
473 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
474 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
475 usleep_range(min_udelay, 2 * min_udelay);
476 spin_lock_irqsave(&crtc->dev->event_lock, flags);
477 };
478
440 /* do the flip (mmio) */ 479 /* do the flip (mmio) */
441 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); 480 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
442 481
@@ -1768,6 +1807,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1768 * \param dev Device to query. 1807 * \param dev Device to query.
1769 * \param crtc Crtc to query. 1808 * \param crtc Crtc to query.
1770 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). 1809 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
1810 * For driver internal use only also supports these flags:
1811 *
1812 * USE_REAL_VBLANKSTART to use the real start of vblank instead
1813 * of a fudged earlier start of vblank.
1814 *
1815 * GET_DISTANCE_TO_VBLANKSTART to return distance to the
1816 * fudged earlier start of vblank in *vpos and the distance
1817 * to true start of vblank in *hpos.
1818 *
1771 * \param *vpos Location where vertical scanout position should be stored. 1819 * \param *vpos Location where vertical scanout position should be stored.
1772 * \param *hpos Location where horizontal scanout position should go. 1820 * \param *hpos Location where horizontal scanout position should go.
1773 * \param *stime Target location for timestamp taken immediately before 1821 * \param *stime Target location for timestamp taken immediately before
@@ -1911,10 +1959,40 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
1911 vbl_end = 0; 1959 vbl_end = 0;
1912 } 1960 }
1913 1961
1962 /* Called from driver internal vblank counter query code? */
1963 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1964 /* Caller wants distance from real vbl_start in *hpos */
1965 *hpos = *vpos - vbl_start;
1966 }
1967
1968 /* Fudge vblank to start a few scanlines earlier to handle the
1969 * problem that vblank irqs fire a few scanlines before start
1970 * of vblank. Some driver internal callers need the true vblank
1971 * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
1972 *
1973 * The cause of the "early" vblank irq is that the irq is triggered
1974 * by the line buffer logic when the line buffer read position enters
1975 * the vblank, whereas our crtc scanout position naturally lags the
1976 * line buffer read position.
1977 */
1978 if (!(flags & USE_REAL_VBLANKSTART))
1979 vbl_start -= rdev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
1980
1914 /* Test scanout position against vblank region. */ 1981 /* Test scanout position against vblank region. */
1915 if ((*vpos < vbl_start) && (*vpos >= vbl_end)) 1982 if ((*vpos < vbl_start) && (*vpos >= vbl_end))
1916 in_vbl = false; 1983 in_vbl = false;
1917 1984
1985 /* In vblank? */
1986 if (in_vbl)
1987 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1988
1989 /* Called from driver internal vblank counter query code? */
1990 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1991 /* Caller wants distance from fudged earlier vbl_start */
1992 *vpos -= vbl_start;
1993 return ret;
1994 }
1995
1918 /* Check if inside vblank area and apply corrective offsets: 1996 /* Check if inside vblank area and apply corrective offsets:
1919 * vpos will then be >=0 in video scanout area, but negative 1997 * vpos will then be >=0 in video scanout area, but negative
1920 * within vblank area, counting down the number of lines until 1998 * within vblank area, counting down the number of lines until
@@ -1930,31 +2008,5 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
1930 /* Correct for shifted end of vbl at vbl_end. */ 2008 /* Correct for shifted end of vbl at vbl_end. */
1931 *vpos = *vpos - vbl_end; 2009 *vpos = *vpos - vbl_end;
1932 2010
1933 /* In vblank? */
1934 if (in_vbl)
1935 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1936
1937 /* Is vpos outside nominal vblank area, but less than
1938 * 1/100 of a frame height away from start of vblank?
1939 * If so, assume this isn't a massively delayed vblank
1940 * interrupt, but a vblank interrupt that fired a few
1941 * microseconds before true start of vblank. Compensate
1942 * by adding a full frame duration to the final timestamp.
1943 * Happens, e.g., on ATI R500, R600.
1944 *
1945 * We only do this if DRM_CALLED_FROM_VBLIRQ.
1946 */
1947 if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
1948 vbl_start = mode->crtc_vdisplay;
1949 vtotal = mode->crtc_vtotal;
1950
1951 if (vbl_start - *vpos < vtotal / 100) {
1952 *vpos -= vtotal;
1953
1954 /* Signal this correction as "applied". */
1955 ret |= 0x8;
1956 }
1957 }
1958
1959 return ret; 2011 return ret;
1960} 2012}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 171d3e43c30c..979f3bf65f2c 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -74,7 +74,7 @@ irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg)
74static void radeon_hotplug_work_func(struct work_struct *work) 74static void radeon_hotplug_work_func(struct work_struct *work)
75{ 75{
76 struct radeon_device *rdev = container_of(work, struct radeon_device, 76 struct radeon_device *rdev = container_of(work, struct radeon_device,
77 hotplug_work); 77 hotplug_work.work);
78 struct drm_device *dev = rdev->ddev; 78 struct drm_device *dev = rdev->ddev;
79 struct drm_mode_config *mode_config = &dev->mode_config; 79 struct drm_mode_config *mode_config = &dev->mode_config;
80 struct drm_connector *connector; 80 struct drm_connector *connector;
@@ -302,7 +302,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
302 } 302 }
303 } 303 }
304 304
305 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); 305 INIT_DELAYED_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
306 INIT_WORK(&rdev->dp_work, radeon_dp_work_func); 306 INIT_WORK(&rdev->dp_work, radeon_dp_work_func);
307 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); 307 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
308 308
@@ -310,7 +310,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
310 r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq); 310 r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
311 if (r) { 311 if (r) {
312 rdev->irq.installed = false; 312 rdev->irq.installed = false;
313 flush_work(&rdev->hotplug_work); 313 flush_delayed_work(&rdev->hotplug_work);
314 return r; 314 return r;
315 } 315 }
316 316
@@ -333,7 +333,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
333 rdev->irq.installed = false; 333 rdev->irq.installed = false;
334 if (rdev->msi_enabled) 334 if (rdev->msi_enabled)
335 pci_disable_msi(rdev->pdev); 335 pci_disable_msi(rdev->pdev);
336 flush_work(&rdev->hotplug_work); 336 flush_delayed_work(&rdev->hotplug_work);
337 } 337 }
338} 338}
339 339
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 0ec6fcca16d3..d290a8a09036 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -755,6 +755,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
755 */ 755 */
756u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) 756u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
757{ 757{
758 int vpos, hpos, stat;
759 u32 count;
758 struct radeon_device *rdev = dev->dev_private; 760 struct radeon_device *rdev = dev->dev_private;
759 761
760 if (crtc < 0 || crtc >= rdev->num_crtc) { 762 if (crtc < 0 || crtc >= rdev->num_crtc) {
@@ -762,7 +764,53 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
762 return -EINVAL; 764 return -EINVAL;
763 } 765 }
764 766
765 return radeon_get_vblank_counter(rdev, crtc); 767 /* The hw increments its frame counter at start of vsync, not at start
768 * of vblank, as is required by DRM core vblank counter handling.
769 * Cook the hw count here to make it appear to the caller as if it
770 * incremented at start of vblank. We measure distance to start of
771 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
772 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
773 * result by 1 to give the proper appearance to caller.
774 */
775 if (rdev->mode_info.crtcs[crtc]) {
776 /* Repeat readout if needed to provide stable result if
777 * we cross start of vsync during the queries.
778 */
779 do {
780 count = radeon_get_vblank_counter(rdev, crtc);
781 /* Ask radeon_get_crtc_scanoutpos to return vpos as
782 * distance to start of vblank, instead of regular
783 * vertical scanout pos.
784 */
785 stat = radeon_get_crtc_scanoutpos(
786 dev, crtc, GET_DISTANCE_TO_VBLANKSTART,
787 &vpos, &hpos, NULL, NULL,
788 &rdev->mode_info.crtcs[crtc]->base.hwmode);
789 } while (count != radeon_get_vblank_counter(rdev, crtc));
790
791 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
792 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
793 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
794 }
795 else {
796 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
797 crtc, vpos);
798
799 /* Bump counter if we are at >= leading edge of vblank,
800 * but before vsync where vpos would turn negative and
801 * the hw counter really increments.
802 */
803 if (vpos >= 0)
804 count++;
805 }
806 }
807 else {
808 /* Fallback to use value as is. */
809 count = radeon_get_vblank_counter(rdev, crtc);
810 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
811 }
812
813 return count;
766} 814}
767 815
768/** 816/**
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 830e171c3a9e..bba112628b47 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -367,6 +367,7 @@ struct radeon_crtc {
367 u32 line_time; 367 u32 line_time;
368 u32 wm_low; 368 u32 wm_low;
369 u32 wm_high; 369 u32 wm_high;
370 u32 lb_vblank_lead_lines;
370 struct drm_display_mode hw_mode; 371 struct drm_display_mode hw_mode;
371 enum radeon_output_csc output_csc; 372 enum radeon_output_csc output_csc;
372}; 373};
@@ -553,6 +554,7 @@ struct radeon_connector {
553 void *con_priv; 554 void *con_priv;
554 bool dac_load_detect; 555 bool dac_load_detect;
555 bool detected_by_load; /* if the connection status was determined by load */ 556 bool detected_by_load; /* if the connection status was determined by load */
557 bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */
556 uint16_t connector_object_id; 558 uint16_t connector_object_id;
557 struct radeon_hpd hpd; 559 struct radeon_hpd hpd;
558 struct radeon_router router; 560 struct radeon_router router;
@@ -686,6 +688,9 @@ struct atom_voltage_table
686 struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES]; 688 struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES];
687}; 689};
688 690
691/* Driver internal use only flags of radeon_get_crtc_scanoutpos() */
692#define USE_REAL_VBLANKSTART (1 << 30)
693#define GET_DISTANCE_TO_VBLANKSTART (1 << 31)
689 694
690extern void 695extern void
691radeon_add_atom_connector(struct drm_device *dev, 696radeon_add_atom_connector(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index f4f03dcc1530..59abebd6b5dc 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1756,7 +1756,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1756 */ 1756 */
1757 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { 1757 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1758 if (rdev->pm.active_crtcs & (1 << crtc)) { 1758 if (rdev->pm.active_crtcs & (1 << crtc)) {
1759 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, 1759 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
1760 crtc,
1761 USE_REAL_VBLANKSTART,
1760 &vpos, &hpos, NULL, NULL, 1762 &vpos, &hpos, NULL, NULL,
1761 &rdev->mode_info.crtcs[crtc]->base.hwmode); 1763 &rdev->mode_info.crtcs[crtc]->base.hwmode);
1762 if ((vbl_status & DRM_SCANOUTPOS_VALID) && 1764 if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 574f62bbd215..7eb1ae758906 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -361,31 +361,31 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
361 361
362 /* stitch together an VCE create msg */ 362 /* stitch together an VCE create msg */
363 ib.length_dw = 0; 363 ib.length_dw = 0;
364 ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ 364 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */
365 ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ 365 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */
366 ib.ptr[ib.length_dw++] = handle; 366 ib.ptr[ib.length_dw++] = cpu_to_le32(handle);
367 367
368 ib.ptr[ib.length_dw++] = 0x00000030; /* len */ 368 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000030); /* len */
369 ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */ 369 ib.ptr[ib.length_dw++] = cpu_to_le32(0x01000001); /* create cmd */
370 ib.ptr[ib.length_dw++] = 0x00000000; 370 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000);
371 ib.ptr[ib.length_dw++] = 0x00000042; 371 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000042);
372 ib.ptr[ib.length_dw++] = 0x0000000a; 372 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000a);
373 ib.ptr[ib.length_dw++] = 0x00000001; 373 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001);
374 ib.ptr[ib.length_dw++] = 0x00000080; 374 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000080);
375 ib.ptr[ib.length_dw++] = 0x00000060; 375 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000060);
376 ib.ptr[ib.length_dw++] = 0x00000100; 376 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100);
377 ib.ptr[ib.length_dw++] = 0x00000100; 377 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100);
378 ib.ptr[ib.length_dw++] = 0x0000000c; 378 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c);
379 ib.ptr[ib.length_dw++] = 0x00000000; 379 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000);
380 380
381 ib.ptr[ib.length_dw++] = 0x00000014; /* len */ 381 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */
382 ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ 382 ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */
383 ib.ptr[ib.length_dw++] = upper_32_bits(dummy); 383 ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy));
384 ib.ptr[ib.length_dw++] = dummy; 384 ib.ptr[ib.length_dw++] = cpu_to_le32(dummy);
385 ib.ptr[ib.length_dw++] = 0x00000001; 385 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001);
386 386
387 for (i = ib.length_dw; i < ib_size_dw; ++i) 387 for (i = ib.length_dw; i < ib_size_dw; ++i)
388 ib.ptr[i] = 0x0; 388 ib.ptr[i] = cpu_to_le32(0x0);
389 389
390 r = radeon_ib_schedule(rdev, &ib, NULL, false); 390 r = radeon_ib_schedule(rdev, &ib, NULL, false);
391 if (r) { 391 if (r) {
@@ -428,21 +428,21 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
428 428
429 /* stitch together an VCE destroy msg */ 429 /* stitch together an VCE destroy msg */
430 ib.length_dw = 0; 430 ib.length_dw = 0;
431 ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ 431 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */
432 ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ 432 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */
433 ib.ptr[ib.length_dw++] = handle; 433 ib.ptr[ib.length_dw++] = cpu_to_le32(handle);
434 434
435 ib.ptr[ib.length_dw++] = 0x00000014; /* len */ 435 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */
436 ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ 436 ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */
437 ib.ptr[ib.length_dw++] = upper_32_bits(dummy); 437 ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy));
438 ib.ptr[ib.length_dw++] = dummy; 438 ib.ptr[ib.length_dw++] = cpu_to_le32(dummy);
439 ib.ptr[ib.length_dw++] = 0x00000001; 439 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001);
440 440
441 ib.ptr[ib.length_dw++] = 0x00000008; /* len */ 441 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000008); /* len */
442 ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */ 442 ib.ptr[ib.length_dw++] = cpu_to_le32(0x02000001); /* destroy cmd */
443 443
444 for (i = ib.length_dw; i < ib_size_dw; ++i) 444 for (i = ib.length_dw; i < ib_size_dw; ++i)
445 ib.ptr[i] = 0x0; 445 ib.ptr[i] = cpu_to_le32(0x0);
446 446
447 r = radeon_ib_schedule(rdev, &ib, NULL, false); 447 r = radeon_ib_schedule(rdev, &ib, NULL, false);
448 if (r) { 448 if (r) {
@@ -699,12 +699,12 @@ bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
699{ 699{
700 uint64_t addr = semaphore->gpu_addr; 700 uint64_t addr = semaphore->gpu_addr;
701 701
702 radeon_ring_write(ring, VCE_CMD_SEMAPHORE); 702 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_SEMAPHORE));
703 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); 703 radeon_ring_write(ring, cpu_to_le32((addr >> 3) & 0x000FFFFF));
704 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); 704 radeon_ring_write(ring, cpu_to_le32((addr >> 23) & 0x000FFFFF));
705 radeon_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0)); 705 radeon_ring_write(ring, cpu_to_le32(0x01003000 | (emit_wait ? 1 : 0)));
706 if (!emit_wait) 706 if (!emit_wait)
707 radeon_ring_write(ring, VCE_CMD_END); 707 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
708 708
709 return true; 709 return true;
710} 710}
@@ -719,10 +719,10 @@ bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
719void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 719void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
720{ 720{
721 struct radeon_ring *ring = &rdev->ring[ib->ring]; 721 struct radeon_ring *ring = &rdev->ring[ib->ring];
722 radeon_ring_write(ring, VCE_CMD_IB); 722 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_IB));
723 radeon_ring_write(ring, ib->gpu_addr); 723 radeon_ring_write(ring, cpu_to_le32(ib->gpu_addr));
724 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr)); 724 radeon_ring_write(ring, cpu_to_le32(upper_32_bits(ib->gpu_addr)));
725 radeon_ring_write(ring, ib->length_dw); 725 radeon_ring_write(ring, cpu_to_le32(ib->length_dw));
726} 726}
727 727
728/** 728/**
@@ -738,12 +738,12 @@ void radeon_vce_fence_emit(struct radeon_device *rdev,
738 struct radeon_ring *ring = &rdev->ring[fence->ring]; 738 struct radeon_ring *ring = &rdev->ring[fence->ring];
739 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; 739 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
740 740
741 radeon_ring_write(ring, VCE_CMD_FENCE); 741 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_FENCE));
742 radeon_ring_write(ring, addr); 742 radeon_ring_write(ring, cpu_to_le32(addr));
743 radeon_ring_write(ring, upper_32_bits(addr)); 743 radeon_ring_write(ring, cpu_to_le32(upper_32_bits(addr)));
744 radeon_ring_write(ring, fence->seq); 744 radeon_ring_write(ring, cpu_to_le32(fence->seq));
745 radeon_ring_write(ring, VCE_CMD_TRAP); 745 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_TRAP));
746 radeon_ring_write(ring, VCE_CMD_END); 746 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
747} 747}
748 748
749/** 749/**
@@ -765,7 +765,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
765 ring->idx, r); 765 ring->idx, r);
766 return r; 766 return r;
767 } 767 }
768 radeon_ring_write(ring, VCE_CMD_END); 768 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
769 radeon_ring_unlock_commit(rdev, ring, false); 769 radeon_ring_unlock_commit(rdev, ring, false);
770 770
771 for (i = 0; i < rdev->usec_timeout; i++) { 771 for (i = 0; i < rdev->usec_timeout; i++) {
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 97a904835759..6244f4e44e9a 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -813,7 +813,7 @@ int rs600_irq_process(struct radeon_device *rdev)
813 status = rs600_irq_ack(rdev); 813 status = rs600_irq_ack(rdev);
814 } 814 }
815 if (queue_hotplug) 815 if (queue_hotplug)
816 schedule_work(&rdev->hotplug_work); 816 schedule_delayed_work(&rdev->hotplug_work, 0);
817 if (queue_hdmi) 817 if (queue_hdmi)
818 schedule_work(&rdev->audio_work); 818 schedule_work(&rdev->audio_work);
819 if (rdev->msi_enabled) { 819 if (rdev->msi_enabled) {
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 516ca27cfa12..6bc44c24e837 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -207,6 +207,9 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
207{ 207{
208 u32 tmp; 208 u32 tmp;
209 209
210 /* Guess line buffer size to be 8192 pixels */
211 u32 lb_size = 8192;
212
210 /* 213 /*
211 * Line Buffer Setup 214 * Line Buffer Setup
212 * There is a single line buffer shared by both display controllers. 215 * There is a single line buffer shared by both display controllers.
@@ -243,6 +246,13 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
243 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; 246 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
244 } 247 }
245 WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); 248 WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
249
250 /* Save number of lines the linebuffer leads before the scanout */
251 if (mode1)
252 rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay);
253
254 if (mode2)
255 rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay);
246} 256}
247 257
248struct rs690_watermark { 258struct rs690_watermark {
diff --git a/drivers/gpu/drm/radeon/rv730_dpm.c b/drivers/gpu/drm/radeon/rv730_dpm.c
index 3f5e1cf138ba..d37ba2cb886e 100644
--- a/drivers/gpu/drm/radeon/rv730_dpm.c
+++ b/drivers/gpu/drm/radeon/rv730_dpm.c
@@ -464,7 +464,7 @@ void rv730_stop_dpm(struct radeon_device *rdev)
464 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); 464 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled);
465 465
466 if (result != PPSMC_Result_OK) 466 if (result != PPSMC_Result_OK)
467 DRM_ERROR("Could not force DPM to low\n"); 467 DRM_DEBUG("Could not force DPM to low\n");
468 468
469 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); 469 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
470 470
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index b9c770745a7a..e830c8935db0 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -193,7 +193,7 @@ void rv770_stop_dpm(struct radeon_device *rdev)
193 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); 193 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled);
194 194
195 if (result != PPSMC_Result_OK) 195 if (result != PPSMC_Result_OK)
196 DRM_ERROR("Could not force DPM to low.\n"); 196 DRM_DEBUG("Could not force DPM to low.\n");
197 197
198 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); 198 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
199 199
@@ -1418,7 +1418,7 @@ int rv770_resume_smc(struct radeon_device *rdev)
1418int rv770_set_sw_state(struct radeon_device *rdev) 1418int rv770_set_sw_state(struct radeon_device *rdev)
1419{ 1419{
1420 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK) 1420 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK)
1421 return -EINVAL; 1421 DRM_DEBUG("rv770_set_sw_state failed\n");
1422 return 0; 1422 return 0;
1423} 1423}
1424 1424
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 07037e32dea3..f878d6962da5 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2376,6 +2376,9 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
2376 c.full = dfixed_div(c, a); 2376 c.full = dfixed_div(c, a);
2377 priority_b_mark = dfixed_trunc(c); 2377 priority_b_mark = dfixed_trunc(c);
2378 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; 2378 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
2379
2380 /* Save number of lines the linebuffer leads before the scanout */
2381 radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
2379 } 2382 }
2380 2383
2381 /* select wm A */ 2384 /* select wm A */
@@ -6848,7 +6851,7 @@ restart_ih:
6848 if (queue_dp) 6851 if (queue_dp)
6849 schedule_work(&rdev->dp_work); 6852 schedule_work(&rdev->dp_work);
6850 if (queue_hotplug) 6853 if (queue_hotplug)
6851 schedule_work(&rdev->hotplug_work); 6854 schedule_delayed_work(&rdev->hotplug_work, 0);
6852 if (queue_thermal && rdev->pm.dpm_enabled) 6855 if (queue_thermal && rdev->pm.dpm_enabled)
6853 schedule_work(&rdev->pm.dpm.thermal.work); 6856 schedule_work(&rdev->pm.dpm.thermal.work);
6854 rdev->ih.rptr = rptr; 6857 rdev->ih.rptr = rptr;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 8caea0a33dd8..d908321b94ce 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -67,6 +67,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
67 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). 67 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
68 */ 68 */
69 vma->vm_flags &= ~VM_PFNMAP; 69 vma->vm_flags &= ~VM_PFNMAP;
70 vma->vm_pgoff = 0;
70 71
71 ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, 72 ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
72 obj->size, &rk_obj->dma_attrs); 73 obj->size, &rk_obj->dma_attrs);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 5d8ae5e49c44..03c47eeadc81 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -374,6 +374,7 @@ static const struct of_device_id vop_driver_dt_match[] = {
374 .data = &rk3288_vop }, 374 .data = &rk3288_vop },
375 {}, 375 {},
376}; 376};
377MODULE_DEVICE_TABLE(of, vop_driver_dt_match);
377 378
378static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v) 379static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
379{ 380{
@@ -959,8 +960,8 @@ static int vop_update_plane_event(struct drm_plane *plane,
959 val = (dest.y2 - dest.y1 - 1) << 16; 960 val = (dest.y2 - dest.y1 - 1) << 16;
960 val |= (dest.x2 - dest.x1 - 1) & 0xffff; 961 val |= (dest.x2 - dest.x1 - 1) & 0xffff;
961 VOP_WIN_SET(vop, win, dsp_info, val); 962 VOP_WIN_SET(vop, win, dsp_info, val);
962 val = (dsp_sty - 1) << 16; 963 val = dsp_sty << 16;
963 val |= (dsp_stx - 1) & 0xffff; 964 val |= dsp_stx & 0xffff;
964 VOP_WIN_SET(vop, win, dsp_st, val); 965 VOP_WIN_SET(vop, win, dsp_st, val);
965 VOP_WIN_SET(vop, win, rb_swap, rb_swap); 966 VOP_WIN_SET(vop, win, rb_swap, rb_swap);
966 967
@@ -1289,7 +1290,7 @@ static void vop_win_state_complete(struct vop_win *vop_win,
1289 1290
1290 if (state->event) { 1291 if (state->event) {
1291 spin_lock_irqsave(&drm->event_lock, flags); 1292 spin_lock_irqsave(&drm->event_lock, flags);
1292 drm_send_vblank_event(drm, -1, state->event); 1293 drm_crtc_send_vblank_event(crtc, state->event);
1293 spin_unlock_irqrestore(&drm->event_lock, flags); 1294 spin_unlock_irqrestore(&drm->event_lock, flags);
1294 } 1295 }
1295 1296
@@ -1575,32 +1576,25 @@ static int vop_initial(struct vop *vop)
1575 return PTR_ERR(vop->dclk); 1576 return PTR_ERR(vop->dclk);
1576 } 1577 }
1577 1578
1578 ret = clk_prepare(vop->hclk);
1579 if (ret < 0) {
1580 dev_err(vop->dev, "failed to prepare hclk\n");
1581 return ret;
1582 }
1583
1584 ret = clk_prepare(vop->dclk); 1579 ret = clk_prepare(vop->dclk);
1585 if (ret < 0) { 1580 if (ret < 0) {
1586 dev_err(vop->dev, "failed to prepare dclk\n"); 1581 dev_err(vop->dev, "failed to prepare dclk\n");
1587 goto err_unprepare_hclk; 1582 return ret;
1588 } 1583 }
1589 1584
1590 ret = clk_prepare(vop->aclk); 1585 /* Enable both the hclk and aclk to setup the vop */
1586 ret = clk_prepare_enable(vop->hclk);
1591 if (ret < 0) { 1587 if (ret < 0) {
1592 dev_err(vop->dev, "failed to prepare aclk\n"); 1588 dev_err(vop->dev, "failed to prepare/enable hclk\n");
1593 goto err_unprepare_dclk; 1589 goto err_unprepare_dclk;
1594 } 1590 }
1595 1591
1596 /* 1592 ret = clk_prepare_enable(vop->aclk);
1597 * enable hclk, so that we can config vop register.
1598 */
1599 ret = clk_enable(vop->hclk);
1600 if (ret < 0) { 1593 if (ret < 0) {
1601 dev_err(vop->dev, "failed to prepare aclk\n"); 1594 dev_err(vop->dev, "failed to prepare/enable aclk\n");
1602 goto err_unprepare_aclk; 1595 goto err_disable_hclk;
1603 } 1596 }
1597
1604 /* 1598 /*
1605 * do hclk_reset, reset all vop registers. 1599 * do hclk_reset, reset all vop registers.
1606 */ 1600 */
@@ -1608,7 +1602,7 @@ static int vop_initial(struct vop *vop)
1608 if (IS_ERR(ahb_rst)) { 1602 if (IS_ERR(ahb_rst)) {
1609 dev_err(vop->dev, "failed to get ahb reset\n"); 1603 dev_err(vop->dev, "failed to get ahb reset\n");
1610 ret = PTR_ERR(ahb_rst); 1604 ret = PTR_ERR(ahb_rst);
1611 goto err_disable_hclk; 1605 goto err_disable_aclk;
1612 } 1606 }
1613 reset_control_assert(ahb_rst); 1607 reset_control_assert(ahb_rst);
1614 usleep_range(10, 20); 1608 usleep_range(10, 20);
@@ -1634,26 +1628,25 @@ static int vop_initial(struct vop *vop)
1634 if (IS_ERR(vop->dclk_rst)) { 1628 if (IS_ERR(vop->dclk_rst)) {
1635 dev_err(vop->dev, "failed to get dclk reset\n"); 1629 dev_err(vop->dev, "failed to get dclk reset\n");
1636 ret = PTR_ERR(vop->dclk_rst); 1630 ret = PTR_ERR(vop->dclk_rst);
1637 goto err_unprepare_aclk; 1631 goto err_disable_aclk;
1638 } 1632 }
1639 reset_control_assert(vop->dclk_rst); 1633 reset_control_assert(vop->dclk_rst);
1640 usleep_range(10, 20); 1634 usleep_range(10, 20);
1641 reset_control_deassert(vop->dclk_rst); 1635 reset_control_deassert(vop->dclk_rst);
1642 1636
1643 clk_disable(vop->hclk); 1637 clk_disable(vop->hclk);
1638 clk_disable(vop->aclk);
1644 1639
1645 vop->is_enabled = false; 1640 vop->is_enabled = false;
1646 1641
1647 return 0; 1642 return 0;
1648 1643
1644err_disable_aclk:
1645 clk_disable_unprepare(vop->aclk);
1649err_disable_hclk: 1646err_disable_hclk:
1650 clk_disable(vop->hclk); 1647 clk_disable_unprepare(vop->hclk);
1651err_unprepare_aclk:
1652 clk_unprepare(vop->aclk);
1653err_unprepare_dclk: 1648err_unprepare_dclk:
1654 clk_unprepare(vop->dclk); 1649 clk_unprepare(vop->dclk);
1655err_unprepare_hclk:
1656 clk_unprepare(vop->hclk);
1657 return ret; 1650 return ret;
1658} 1651}
1659 1652
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index 6a954544727f..f154fb1929bd 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -180,7 +180,7 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
180 spin_unlock(&lock->lock); 180 spin_unlock(&lock->lock);
181 } 181 }
182 } else 182 } else
183 wait_event(lock->queue, __ttm_read_lock(lock)); 183 wait_event(lock->queue, __ttm_write_lock(lock));
184 184
185 return ret; 185 return ret;
186} 186}
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index f545913a56c7..578fe0a9324c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -412,7 +412,7 @@ static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
412 .save = virtio_gpu_conn_save, 412 .save = virtio_gpu_conn_save,
413 .restore = virtio_gpu_conn_restore, 413 .restore = virtio_gpu_conn_restore,
414 .detect = virtio_gpu_conn_detect, 414 .detect = virtio_gpu_conn_detect,
415 .fill_modes = drm_helper_probe_single_connector_modes, 415 .fill_modes = drm_helper_probe_single_connector_modes_nomerge,
416 .destroy = virtio_gpu_conn_destroy, 416 .destroy = virtio_gpu_conn_destroy,
417 .reset = drm_atomic_helper_connector_reset, 417 .reset = drm_atomic_helper_connector_reset,
418 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 418 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index a09cf8529b9f..c49812b80dd0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1233,6 +1233,7 @@ static void vmw_master_drop(struct drm_device *dev,
1233 1233
1234 vmw_fp->locked_master = drm_master_get(file_priv->master); 1234 vmw_fp->locked_master = drm_master_get(file_priv->master);
1235 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); 1235 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
1236 vmw_kms_legacy_hotspot_clear(dev_priv);
1236 if (unlikely((ret != 0))) { 1237 if (unlikely((ret != 0))) {
1237 DRM_ERROR("Unable to lock TTM at VT switch.\n"); 1238 DRM_ERROR("Unable to lock TTM at VT switch.\n");
1238 drm_master_put(&vmw_fp->locked_master); 1239 drm_master_put(&vmw_fp->locked_master);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index a8ae9dfb83b7..469cdd520615 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -925,6 +925,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
925 uint32_t num_clips); 925 uint32_t num_clips);
926int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 926int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
927 struct drm_file *file_priv); 927 struct drm_file *file_priv);
928void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
928 929
929int vmw_dumb_create(struct drm_file *file_priv, 930int vmw_dumb_create(struct drm_file *file_priv,
930 struct drm_device *dev, 931 struct drm_device *dev,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index a8baf5f5e765..b6a0806b06bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -390,7 +390,7 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
390 else if (ctx_id == SVGA3D_INVALID_ID) 390 else if (ctx_id == SVGA3D_INVALID_ID)
391 ret = vmw_local_fifo_reserve(dev_priv, bytes); 391 ret = vmw_local_fifo_reserve(dev_priv, bytes);
392 else { 392 else {
393 WARN_ON("Command buffer has not been allocated.\n"); 393 WARN(1, "Command buffer has not been allocated.\n");
394 ret = NULL; 394 ret = NULL;
395 } 395 }
396 if (IS_ERR_OR_NULL(ret)) { 396 if (IS_ERR_OR_NULL(ret)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 9fcd7f82995c..9b4bb9e74d73 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -133,13 +133,19 @@ void vmw_cursor_update_position(struct vmw_private *dev_priv,
133 vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT); 133 vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
134} 134}
135 135
136int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, 136
137 uint32_t handle, uint32_t width, uint32_t height) 137/*
138 * vmw_du_crtc_cursor_set2 - Driver cursor_set2 callback.
139 */
140int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
141 uint32_t handle, uint32_t width, uint32_t height,
142 int32_t hot_x, int32_t hot_y)
138{ 143{
139 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 144 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
140 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 145 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
141 struct vmw_surface *surface = NULL; 146 struct vmw_surface *surface = NULL;
142 struct vmw_dma_buffer *dmabuf = NULL; 147 struct vmw_dma_buffer *dmabuf = NULL;
148 s32 hotspot_x, hotspot_y;
143 int ret; 149 int ret;
144 150
145 /* 151 /*
@@ -151,6 +157,8 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
151 */ 157 */
152 drm_modeset_unlock_crtc(crtc); 158 drm_modeset_unlock_crtc(crtc);
153 drm_modeset_lock_all(dev_priv->dev); 159 drm_modeset_lock_all(dev_priv->dev);
160 hotspot_x = hot_x + du->hotspot_x;
161 hotspot_y = hot_y + du->hotspot_y;
154 162
155 /* A lot of the code assumes this */ 163 /* A lot of the code assumes this */
156 if (handle && (width != 64 || height != 64)) { 164 if (handle && (width != 64 || height != 64)) {
@@ -187,31 +195,34 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
187 vmw_dmabuf_unreference(&du->cursor_dmabuf); 195 vmw_dmabuf_unreference(&du->cursor_dmabuf);
188 196
189 /* setup new image */ 197 /* setup new image */
198 ret = 0;
190 if (surface) { 199 if (surface) {
191 /* vmw_user_surface_lookup takes one reference */ 200 /* vmw_user_surface_lookup takes one reference */
192 du->cursor_surface = surface; 201 du->cursor_surface = surface;
193 202
194 du->cursor_surface->snooper.crtc = crtc; 203 du->cursor_surface->snooper.crtc = crtc;
195 du->cursor_age = du->cursor_surface->snooper.age; 204 du->cursor_age = du->cursor_surface->snooper.age;
196 vmw_cursor_update_image(dev_priv, surface->snooper.image, 205 ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
197 64, 64, du->hotspot_x, du->hotspot_y); 206 64, 64, hotspot_x, hotspot_y);
198 } else if (dmabuf) { 207 } else if (dmabuf) {
199 /* vmw_user_surface_lookup takes one reference */ 208 /* vmw_user_surface_lookup takes one reference */
200 du->cursor_dmabuf = dmabuf; 209 du->cursor_dmabuf = dmabuf;
201 210
202 ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height, 211 ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
203 du->hotspot_x, du->hotspot_y); 212 hotspot_x, hotspot_y);
204 } else { 213 } else {
205 vmw_cursor_update_position(dev_priv, false, 0, 0); 214 vmw_cursor_update_position(dev_priv, false, 0, 0);
206 ret = 0;
207 goto out; 215 goto out;
208 } 216 }
209 217
210 vmw_cursor_update_position(dev_priv, true, 218 if (!ret) {
211 du->cursor_x + du->hotspot_x, 219 vmw_cursor_update_position(dev_priv, true,
212 du->cursor_y + du->hotspot_y); 220 du->cursor_x + hotspot_x,
221 du->cursor_y + hotspot_y);
222 du->core_hotspot_x = hot_x;
223 du->core_hotspot_y = hot_y;
224 }
213 225
214 ret = 0;
215out: 226out:
216 drm_modeset_unlock_all(dev_priv->dev); 227 drm_modeset_unlock_all(dev_priv->dev);
217 drm_modeset_lock_crtc(crtc, crtc->cursor); 228 drm_modeset_lock_crtc(crtc, crtc->cursor);
@@ -239,8 +250,10 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
239 drm_modeset_lock_all(dev_priv->dev); 250 drm_modeset_lock_all(dev_priv->dev);
240 251
241 vmw_cursor_update_position(dev_priv, shown, 252 vmw_cursor_update_position(dev_priv, shown,
242 du->cursor_x + du->hotspot_x, 253 du->cursor_x + du->hotspot_x +
243 du->cursor_y + du->hotspot_y); 254 du->core_hotspot_x,
255 du->cursor_y + du->hotspot_y +
256 du->core_hotspot_y);
244 257
245 drm_modeset_unlock_all(dev_priv->dev); 258 drm_modeset_unlock_all(dev_priv->dev);
246 drm_modeset_lock_crtc(crtc, crtc->cursor); 259 drm_modeset_lock_crtc(crtc, crtc->cursor);
@@ -334,6 +347,29 @@ err_unreserve:
334 ttm_bo_unreserve(bo); 347 ttm_bo_unreserve(bo);
335} 348}
336 349
350/**
351 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
352 *
353 * @dev_priv: Pointer to the device private struct.
354 *
355 * Clears all legacy hotspots.
356 */
357void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
358{
359 struct drm_device *dev = dev_priv->dev;
360 struct vmw_display_unit *du;
361 struct drm_crtc *crtc;
362
363 drm_modeset_lock_all(dev);
364 drm_for_each_crtc(crtc, dev) {
365 du = vmw_crtc_to_du(crtc);
366
367 du->hotspot_x = 0;
368 du->hotspot_y = 0;
369 }
370 drm_modeset_unlock_all(dev);
371}
372
337void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) 373void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
338{ 374{
339 struct drm_device *dev = dev_priv->dev; 375 struct drm_device *dev = dev_priv->dev;
@@ -351,7 +387,9 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
351 du->cursor_age = du->cursor_surface->snooper.age; 387 du->cursor_age = du->cursor_surface->snooper.age;
352 vmw_cursor_update_image(dev_priv, 388 vmw_cursor_update_image(dev_priv,
353 du->cursor_surface->snooper.image, 389 du->cursor_surface->snooper.image,
354 64, 64, du->hotspot_x, du->hotspot_y); 390 64, 64,
391 du->hotspot_x + du->core_hotspot_x,
392 du->hotspot_y + du->core_hotspot_y);
355 } 393 }
356 394
357 mutex_unlock(&dev->mode_config.mutex); 395 mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 782df7ca9794..edd81503516d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -159,6 +159,8 @@ struct vmw_display_unit {
159 159
160 int hotspot_x; 160 int hotspot_x;
161 int hotspot_y; 161 int hotspot_y;
162 s32 core_hotspot_x;
163 s32 core_hotspot_y;
162 164
163 unsigned unit; 165 unsigned unit;
164 166
@@ -193,8 +195,9 @@ void vmw_du_crtc_restore(struct drm_crtc *crtc);
193void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 195void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
194 u16 *r, u16 *g, u16 *b, 196 u16 *r, u16 *g, u16 *b,
195 uint32_t start, uint32_t size); 197 uint32_t start, uint32_t size);
196int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, 198int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
197 uint32_t handle, uint32_t width, uint32_t height); 199 uint32_t handle, uint32_t width, uint32_t height,
200 int32_t hot_x, int32_t hot_y);
198int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); 201int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
199int vmw_du_connector_dpms(struct drm_connector *connector, int mode); 202int vmw_du_connector_dpms(struct drm_connector *connector, int mode);
200void vmw_du_connector_save(struct drm_connector *connector); 203void vmw_du_connector_save(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index bb63e4d795fa..52caecb4502e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -297,7 +297,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
297static struct drm_crtc_funcs vmw_legacy_crtc_funcs = { 297static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
298 .save = vmw_du_crtc_save, 298 .save = vmw_du_crtc_save,
299 .restore = vmw_du_crtc_restore, 299 .restore = vmw_du_crtc_restore,
300 .cursor_set = vmw_du_crtc_cursor_set, 300 .cursor_set2 = vmw_du_crtc_cursor_set2,
301 .cursor_move = vmw_du_crtc_cursor_move, 301 .cursor_move = vmw_du_crtc_cursor_move,
302 .gamma_set = vmw_du_crtc_gamma_set, 302 .gamma_set = vmw_du_crtc_gamma_set,
303 .destroy = vmw_ldu_crtc_destroy, 303 .destroy = vmw_ldu_crtc_destroy,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index b96d1ab610c5..13926ff192e3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -533,7 +533,7 @@ out_no_fence:
533static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { 533static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
534 .save = vmw_du_crtc_save, 534 .save = vmw_du_crtc_save,
535 .restore = vmw_du_crtc_restore, 535 .restore = vmw_du_crtc_restore,
536 .cursor_set = vmw_du_crtc_cursor_set, 536 .cursor_set2 = vmw_du_crtc_cursor_set2,
537 .cursor_move = vmw_du_crtc_cursor_move, 537 .cursor_move = vmw_du_crtc_cursor_move,
538 .gamma_set = vmw_du_crtc_gamma_set, 538 .gamma_set = vmw_du_crtc_gamma_set,
539 .destroy = vmw_sou_crtc_destroy, 539 .destroy = vmw_sou_crtc_destroy,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index b1fc1c02792d..f823fc3efed7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1043,7 +1043,7 @@ out_finish:
1043static struct drm_crtc_funcs vmw_stdu_crtc_funcs = { 1043static struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
1044 .save = vmw_du_crtc_save, 1044 .save = vmw_du_crtc_save,
1045 .restore = vmw_du_crtc_restore, 1045 .restore = vmw_du_crtc_restore,
1046 .cursor_set = vmw_du_crtc_cursor_set, 1046 .cursor_set2 = vmw_du_crtc_cursor_set2,
1047 .cursor_move = vmw_du_crtc_cursor_move, 1047 .cursor_move = vmw_du_crtc_cursor_move,
1048 .gamma_set = vmw_du_crtc_gamma_set, 1048 .gamma_set = vmw_du_crtc_gamma_set,
1049 .destroy = vmw_stdu_crtc_destroy, 1049 .destroy = vmw_stdu_crtc_destroy,
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index ba47b30d28fa..f2e13eb8339f 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -28,6 +28,7 @@
28#include <linux/irqchip/chained_irq.h> 28#include <linux/irqchip/chained_irq.h>
29#include <linux/irqdomain.h> 29#include <linux/irqdomain.h>
30#include <linux/of_device.h> 30#include <linux/of_device.h>
31#include <linux/of_graph.h>
31 32
32#include <drm/drm_fourcc.h> 33#include <drm/drm_fourcc.h>
33 34
@@ -993,12 +994,26 @@ static void platform_device_unregister_children(struct platform_device *pdev)
993struct ipu_platform_reg { 994struct ipu_platform_reg {
994 struct ipu_client_platformdata pdata; 995 struct ipu_client_platformdata pdata;
995 const char *name; 996 const char *name;
996 int reg_offset;
997}; 997};
998 998
999/* These must be in the order of the corresponding device tree port nodes */
999static const struct ipu_platform_reg client_reg[] = { 1000static const struct ipu_platform_reg client_reg[] = {
1000 { 1001 {
1001 .pdata = { 1002 .pdata = {
1003 .csi = 0,
1004 .dma[0] = IPUV3_CHANNEL_CSI0,
1005 .dma[1] = -EINVAL,
1006 },
1007 .name = "imx-ipuv3-camera",
1008 }, {
1009 .pdata = {
1010 .csi = 1,
1011 .dma[0] = IPUV3_CHANNEL_CSI1,
1012 .dma[1] = -EINVAL,
1013 },
1014 .name = "imx-ipuv3-camera",
1015 }, {
1016 .pdata = {
1002 .di = 0, 1017 .di = 0,
1003 .dc = 5, 1018 .dc = 5,
1004 .dp = IPU_DP_FLOW_SYNC_BG, 1019 .dp = IPU_DP_FLOW_SYNC_BG,
@@ -1015,22 +1030,6 @@ static const struct ipu_platform_reg client_reg[] = {
1015 .dma[1] = -EINVAL, 1030 .dma[1] = -EINVAL,
1016 }, 1031 },
1017 .name = "imx-ipuv3-crtc", 1032 .name = "imx-ipuv3-crtc",
1018 }, {
1019 .pdata = {
1020 .csi = 0,
1021 .dma[0] = IPUV3_CHANNEL_CSI0,
1022 .dma[1] = -EINVAL,
1023 },
1024 .reg_offset = IPU_CM_CSI0_REG_OFS,
1025 .name = "imx-ipuv3-camera",
1026 }, {
1027 .pdata = {
1028 .csi = 1,
1029 .dma[0] = IPUV3_CHANNEL_CSI1,
1030 .dma[1] = -EINVAL,
1031 },
1032 .reg_offset = IPU_CM_CSI1_REG_OFS,
1033 .name = "imx-ipuv3-camera",
1034 }, 1033 },
1035}; 1034};
1036 1035
@@ -1051,22 +1050,30 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
1051 for (i = 0; i < ARRAY_SIZE(client_reg); i++) { 1050 for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
1052 const struct ipu_platform_reg *reg = &client_reg[i]; 1051 const struct ipu_platform_reg *reg = &client_reg[i];
1053 struct platform_device *pdev; 1052 struct platform_device *pdev;
1054 struct resource res; 1053
1055 1054 pdev = platform_device_alloc(reg->name, id++);
1056 if (reg->reg_offset) { 1055 if (!pdev) {
1057 memset(&res, 0, sizeof(res)); 1056 ret = -ENOMEM;
1058 res.flags = IORESOURCE_MEM; 1057 goto err_register;
1059 res.start = ipu_base + ipu->devtype->cm_ofs + reg->reg_offset; 1058 }
1060 res.end = res.start + PAGE_SIZE - 1; 1059
1061 pdev = platform_device_register_resndata(dev, reg->name, 1060 pdev->dev.parent = dev;
1062 id++, &res, 1, &reg->pdata, sizeof(reg->pdata)); 1061
1063 } else { 1062 /* Associate subdevice with the corresponding port node */
1064 pdev = platform_device_register_data(dev, reg->name, 1063 pdev->dev.of_node = of_graph_get_port_by_id(dev->of_node, i);
1065 id++, &reg->pdata, sizeof(reg->pdata)); 1064 if (!pdev->dev.of_node) {
1065 dev_err(dev, "missing port@%d node in %s\n", i,
1066 dev->of_node->full_name);
1067 ret = -ENODEV;
1068 goto err_register;
1066 } 1069 }
1067 1070
1068 if (IS_ERR(pdev)) { 1071 ret = platform_device_add_data(pdev, &reg->pdata,
1069 ret = PTR_ERR(pdev); 1072 sizeof(reg->pdata));
1073 if (!ret)
1074 ret = platform_device_add(pdev);
1075 if (ret) {
1076 platform_device_put(pdev);
1070 goto err_register; 1077 goto err_register;
1071 } 1078 }
1072 } 1079 }
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 3166e4bc4eb6..9abcaa53bd25 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -395,8 +395,10 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
395 set_current_state(interruptible ? 395 set_current_state(interruptible ?
396 TASK_INTERRUPTIBLE : 396 TASK_INTERRUPTIBLE :
397 TASK_UNINTERRUPTIBLE); 397 TASK_UNINTERRUPTIBLE);
398 if (signal_pending(current)) { 398 if (interruptible && signal_pending(current)) {
399 rc = -EINTR; 399 __set_current_state(TASK_RUNNING);
400 remove_wait_queue(&vga_wait_queue, &wait);
401 rc = -ERESTARTSYS;
400 break; 402 break;
401 } 403 }
402 schedule(); 404 schedule();
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index ac1feea51be3..8b78a7f1f779 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -316,11 +316,6 @@
316#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001 316#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
317 317
318#define USB_VENDOR_ID_ELAN 0x04f3 318#define USB_VENDOR_ID_ELAN 0x04f3
319#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089
320#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b
321#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103 0x0103
322#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c 0x010c
323#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f
324 319
325#define USB_VENDOR_ID_ELECOM 0x056e 320#define USB_VENDOR_ID_ELECOM 0x056e
326#define USB_DEVICE_ID_ELECOM_BM084 0x0061 321#define USB_DEVICE_ID_ELECOM_BM084 0x0061
@@ -609,6 +604,7 @@
609#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 604#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110
610#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f 605#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f
611#define USB_DEVICE_ID_LOGITECH_HARMONY_PS3 0x0306 606#define USB_DEVICE_ID_LOGITECH_HARMONY_PS3 0x0306
607#define USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS 0xc24d
612#define USB_DEVICE_ID_LOGITECH_MOUSE_C01A 0xc01a 608#define USB_DEVICE_ID_LOGITECH_MOUSE_C01A 0xc01a
613#define USB_DEVICE_ID_LOGITECH_MOUSE_C05A 0xc05a 609#define USB_DEVICE_ID_LOGITECH_MOUSE_C05A 0xc05a
614#define USB_DEVICE_ID_LOGITECH_MOUSE_C06A 0xc06a 610#define USB_DEVICE_ID_LOGITECH_MOUSE_C06A 0xc06a
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index c20ac76c0a8c..c690fae02cf8 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -665,8 +665,9 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
665 struct lg_drv_data *drv_data; 665 struct lg_drv_data *drv_data;
666 int ret; 666 int ret;
667 667
668 /* Only work with the 1st interface (G29 presents multiple) */ 668 /* G29 only work with the 1st interface */
669 if (iface_num != 0) { 669 if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) &&
670 (iface_num != 0)) {
670 dbg_hid("%s: ignoring ifnum %d\n", __func__, iface_num); 671 dbg_hid("%s: ignoring ifnum %d\n", __func__, iface_num);
671 return -ENODEV; 672 return -ENODEV;
672 } 673 }
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 94bb137abe32..7dd0953cd70f 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -72,11 +72,7 @@ static const struct hid_blacklist {
72 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, 72 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
73 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 73 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
74 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, 74 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
75 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL }, 75 { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
76 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
77 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103, HID_QUIRK_ALWAYS_POLL },
78 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c, HID_QUIRK_ALWAYS_POLL },
79 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL },
80 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 76 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
81 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, 77 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
82 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, 78 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
@@ -84,6 +80,7 @@ static const struct hid_blacklist {
84 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL }, 80 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
85 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, 81 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
86 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL }, 82 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
83 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET },
87 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL }, 84 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL },
88 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL }, 85 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL },
89 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL }, 86 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL },
@@ -339,7 +336,8 @@ static const struct hid_blacklist *usbhid_exists_squirk(const u16 idVendor,
339 336
340 for (; hid_blacklist[n].idVendor; n++) 337 for (; hid_blacklist[n].idVendor; n++)
341 if (hid_blacklist[n].idVendor == idVendor && 338 if (hid_blacklist[n].idVendor == idVendor &&
342 hid_blacklist[n].idProduct == idProduct) 339 (hid_blacklist[n].idProduct == (__u16) HID_ANY_ID ||
340 hid_blacklist[n].idProduct == idProduct))
343 bl_entry = &hid_blacklist[n]; 341 bl_entry = &hid_blacklist[n];
344 342
345 if (bl_entry != NULL) 343 if (bl_entry != NULL)
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 8f59f057cdf4..80a73bfc1a65 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1217,6 +1217,7 @@ config SENSORS_PWM_FAN
1217config SENSORS_SHT15 1217config SENSORS_SHT15
1218 tristate "Sensiron humidity and temperature sensors. SHT15 and compat." 1218 tristate "Sensiron humidity and temperature sensors. SHT15 and compat."
1219 depends on GPIOLIB || COMPILE_TEST 1219 depends on GPIOLIB || COMPILE_TEST
1220 select BITREVERSE
1220 help 1221 help
1221 If you say yes here you get support for the Sensiron SHT10, SHT11, 1222 If you say yes here you get support for the Sensiron SHT10, SHT11,
1222 SHT15, SHT71, SHT75 humidity and temperature sensors. 1223 SHT15, SHT71, SHT75 humidity and temperature sensors.
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 65482624ea2c..5289aa0980a8 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -58,6 +58,7 @@ struct tmp102 {
58 u16 config_orig; 58 u16 config_orig;
59 unsigned long last_update; 59 unsigned long last_update;
60 int temp[3]; 60 int temp[3];
61 bool first_time;
61}; 62};
62 63
63/* convert left adjusted 13-bit TMP102 register value to milliCelsius */ 64/* convert left adjusted 13-bit TMP102 register value to milliCelsius */
@@ -93,6 +94,7 @@ static struct tmp102 *tmp102_update_device(struct device *dev)
93 tmp102->temp[i] = tmp102_reg_to_mC(status); 94 tmp102->temp[i] = tmp102_reg_to_mC(status);
94 } 95 }
95 tmp102->last_update = jiffies; 96 tmp102->last_update = jiffies;
97 tmp102->first_time = false;
96 } 98 }
97 mutex_unlock(&tmp102->lock); 99 mutex_unlock(&tmp102->lock);
98 return tmp102; 100 return tmp102;
@@ -102,6 +104,12 @@ static int tmp102_read_temp(void *dev, int *temp)
102{ 104{
103 struct tmp102 *tmp102 = tmp102_update_device(dev); 105 struct tmp102 *tmp102 = tmp102_update_device(dev);
104 106
107 /* Is it too early even to return a conversion? */
108 if (tmp102->first_time) {
109 dev_dbg(dev, "%s: Conversion not ready yet..\n", __func__);
110 return -EAGAIN;
111 }
112
105 *temp = tmp102->temp[0]; 113 *temp = tmp102->temp[0];
106 114
107 return 0; 115 return 0;
@@ -114,6 +122,10 @@ static ssize_t tmp102_show_temp(struct device *dev,
114 struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); 122 struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
115 struct tmp102 *tmp102 = tmp102_update_device(dev); 123 struct tmp102 *tmp102 = tmp102_update_device(dev);
116 124
125 /* Is it too early even to return a read? */
126 if (tmp102->first_time)
127 return -EAGAIN;
128
117 return sprintf(buf, "%d\n", tmp102->temp[sda->index]); 129 return sprintf(buf, "%d\n", tmp102->temp[sda->index]);
118} 130}
119 131
@@ -207,7 +219,9 @@ static int tmp102_probe(struct i2c_client *client,
207 status = -ENODEV; 219 status = -ENODEV;
208 goto fail_restore_config; 220 goto fail_restore_config;
209 } 221 }
210 tmp102->last_update = jiffies - HZ; 222 tmp102->last_update = jiffies;
223 /* Mark that we are not ready with data until conversion is complete */
224 tmp102->first_time = true;
211 mutex_init(&tmp102->lock); 225 mutex_init(&tmp102->lock);
212 226
213 hwmon_dev = hwmon_device_register_with_groups(dev, client->name, 227 hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index c5628a42170a..a8bdcb5292f5 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -202,8 +202,15 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
202 * d is always 6 on Keystone I2C controller 202 * d is always 6 on Keystone I2C controller
203 */ 203 */
204 204
205 /* get minimum of 7 MHz clock, but max of 12 MHz */ 205 /*
206 psc = (input_clock / 7000000) - 1; 206 * Both Davinci and current Keystone User Guides recommend a value
207 * between 7MHz and 12MHz. In reality 7MHz module clock doesn't
208 * always produce enough margin between SDA and SCL transitions.
209 * Measurements show that the higher the module clock is, the
210 * bigger is the margin, providing more reliable communication.
211 * So we better target for 12MHz.
212 */
213 psc = (input_clock / 12000000) - 1;
207 if ((input_clock / (psc + 1)) > 12000000) 214 if ((input_clock / (psc + 1)) > 12000000)
208 psc++; /* better to run under spec than over */ 215 psc++; /* better to run under spec than over */
209 d = (psc >= 2) ? 5 : 7 - psc; 216 d = (psc >= 2) ? 5 : 7 - psc;
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 8c48b27ba059..de7fbbb374cd 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -813,6 +813,12 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
813tx_aborted: 813tx_aborted:
814 if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) 814 if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
815 complete(&dev->cmd_complete); 815 complete(&dev->cmd_complete);
816 else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) {
817 /* workaround to trigger pending interrupt */
818 stat = dw_readl(dev, DW_IC_INTR_MASK);
819 i2c_dw_disable_int(dev);
820 dw_writel(dev, stat, DW_IC_INTR_MASK);
821 }
816 822
817 return IRQ_HANDLED; 823 return IRQ_HANDLED;
818} 824}
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 1d50898e7b24..9ffb63a60f95 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -111,6 +111,7 @@ struct dw_i2c_dev {
111 111
112#define ACCESS_SWAP 0x00000001 112#define ACCESS_SWAP 0x00000001
113#define ACCESS_16BIT 0x00000002 113#define ACCESS_16BIT 0x00000002
114#define ACCESS_INTR_MASK 0x00000004
114 115
115extern int i2c_dw_init(struct dw_i2c_dev *dev); 116extern int i2c_dw_init(struct dw_i2c_dev *dev);
116extern void i2c_dw_disable(struct dw_i2c_dev *dev); 117extern void i2c_dw_disable(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 809579ecb5a4..6b00061c3746 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -93,6 +93,7 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
93static int dw_i2c_acpi_configure(struct platform_device *pdev) 93static int dw_i2c_acpi_configure(struct platform_device *pdev)
94{ 94{
95 struct dw_i2c_dev *dev = platform_get_drvdata(pdev); 95 struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
96 const struct acpi_device_id *id;
96 97
97 dev->adapter.nr = -1; 98 dev->adapter.nr = -1;
98 dev->tx_fifo_depth = 32; 99 dev->tx_fifo_depth = 32;
@@ -106,6 +107,10 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
106 dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, 107 dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt,
107 &dev->sda_hold_time); 108 &dev->sda_hold_time);
108 109
110 id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
111 if (id && id->driver_data)
112 dev->accessor_flags |= (u32)id->driver_data;
113
109 return 0; 114 return 0;
110} 115}
111 116
@@ -116,7 +121,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = {
116 { "INT3433", 0 }, 121 { "INT3433", 0 },
117 { "80860F41", 0 }, 122 { "80860F41", 0 },
118 { "808622C1", 0 }, 123 { "808622C1", 0 },
119 { "AMD0010", 0 }, 124 { "AMD0010", ACCESS_INTR_MASK },
120 { } 125 { }
121}; 126};
122MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); 127MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
@@ -240,12 +245,10 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
240 } 245 }
241 246
242 r = i2c_dw_probe(dev); 247 r = i2c_dw_probe(dev);
243 if (r) { 248 if (r && !dev->pm_runtime_disabled)
244 pm_runtime_disable(&pdev->dev); 249 pm_runtime_disable(&pdev->dev);
245 return r;
246 }
247 250
248 return 0; 251 return r;
249} 252}
250 253
251static int dw_i2c_plat_remove(struct platform_device *pdev) 254static int dw_i2c_plat_remove(struct platform_device *pdev)
@@ -260,7 +263,8 @@ static int dw_i2c_plat_remove(struct platform_device *pdev)
260 263
261 pm_runtime_dont_use_autosuspend(&pdev->dev); 264 pm_runtime_dont_use_autosuspend(&pdev->dev);
262 pm_runtime_put_sync(&pdev->dev); 265 pm_runtime_put_sync(&pdev->dev);
263 pm_runtime_disable(&pdev->dev); 266 if (!dev->pm_runtime_disabled)
267 pm_runtime_disable(&pdev->dev);
264 268
265 return 0; 269 return 0;
266} 270}
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 9bb0b056b25f..d4d853680ae4 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1119,6 +1119,8 @@ static int i2c_imx_probe(struct platform_device *pdev)
1119 i2c_imx, IMX_I2C_I2CR); 1119 i2c_imx, IMX_I2C_I2CR);
1120 imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR); 1120 imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR);
1121 1121
1122 i2c_imx_init_recovery_info(i2c_imx, pdev);
1123
1122 /* Add I2C adapter */ 1124 /* Add I2C adapter */
1123 ret = i2c_add_numbered_adapter(&i2c_imx->adapter); 1125 ret = i2c_add_numbered_adapter(&i2c_imx->adapter);
1124 if (ret < 0) { 1126 if (ret < 0) {
@@ -1126,8 +1128,6 @@ static int i2c_imx_probe(struct platform_device *pdev)
1126 goto clk_disable; 1128 goto clk_disable;
1127 } 1129 }
1128 1130
1129 i2c_imx_init_recovery_info(i2c_imx, pdev);
1130
1131 /* Set up platform driver data */ 1131 /* Set up platform driver data */
1132 platform_set_drvdata(pdev, i2c_imx); 1132 platform_set_drvdata(pdev, i2c_imx);
1133 clk_disable_unprepare(i2c_imx->clk); 1133 clk_disable_unprepare(i2c_imx->clk);
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 5801227b97ab..43207f52e5a3 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -146,6 +146,8 @@ struct mv64xxx_i2c_data {
146 bool errata_delay; 146 bool errata_delay;
147 struct reset_control *rstc; 147 struct reset_control *rstc;
148 bool irq_clear_inverted; 148 bool irq_clear_inverted;
149 /* Clk div is 2 to the power n, not 2 to the power n + 1 */
150 bool clk_n_base_0;
149}; 151};
150 152
151static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_mv64xxx = { 153static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_mv64xxx = {
@@ -757,25 +759,29 @@ MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
757#ifdef CONFIG_OF 759#ifdef CONFIG_OF
758#ifdef CONFIG_HAVE_CLK 760#ifdef CONFIG_HAVE_CLK
759static int 761static int
760mv64xxx_calc_freq(const int tclk, const int n, const int m) 762mv64xxx_calc_freq(struct mv64xxx_i2c_data *drv_data,
763 const int tclk, const int n, const int m)
761{ 764{
762 return tclk / (10 * (m + 1) * (2 << n)); 765 if (drv_data->clk_n_base_0)
766 return tclk / (10 * (m + 1) * (1 << n));
767 else
768 return tclk / (10 * (m + 1) * (2 << n));
763} 769}
764 770
765static bool 771static bool
766mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n, 772mv64xxx_find_baud_factors(struct mv64xxx_i2c_data *drv_data,
767 u32 *best_m) 773 const u32 req_freq, const u32 tclk)
768{ 774{
769 int freq, delta, best_delta = INT_MAX; 775 int freq, delta, best_delta = INT_MAX;
770 int m, n; 776 int m, n;
771 777
772 for (n = 0; n <= 7; n++) 778 for (n = 0; n <= 7; n++)
773 for (m = 0; m <= 15; m++) { 779 for (m = 0; m <= 15; m++) {
774 freq = mv64xxx_calc_freq(tclk, n, m); 780 freq = mv64xxx_calc_freq(drv_data, tclk, n, m);
775 delta = req_freq - freq; 781 delta = req_freq - freq;
776 if (delta >= 0 && delta < best_delta) { 782 if (delta >= 0 && delta < best_delta) {
777 *best_m = m; 783 drv_data->freq_m = m;
778 *best_n = n; 784 drv_data->freq_n = n;
779 best_delta = delta; 785 best_delta = delta;
780 } 786 }
781 if (best_delta == 0) 787 if (best_delta == 0)
@@ -813,8 +819,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
813 if (of_property_read_u32(np, "clock-frequency", &bus_freq)) 819 if (of_property_read_u32(np, "clock-frequency", &bus_freq))
814 bus_freq = 100000; /* 100kHz by default */ 820 bus_freq = 100000; /* 100kHz by default */
815 821
816 if (!mv64xxx_find_baud_factors(bus_freq, tclk, 822 if (of_device_is_compatible(np, "allwinner,sun4i-a10-i2c") ||
817 &drv_data->freq_n, &drv_data->freq_m)) { 823 of_device_is_compatible(np, "allwinner,sun6i-a31-i2c"))
824 drv_data->clk_n_base_0 = true;
825
826 if (!mv64xxx_find_baud_factors(drv_data, bus_freq, tclk)) {
818 rc = -EINVAL; 827 rc = -EINVAL;
819 goto out; 828 goto out;
820 } 829 }
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index b0ae560b38c3..599c0d7bd906 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -576,7 +576,7 @@ static int rcar_reg_slave(struct i2c_client *slave)
576 if (slave->flags & I2C_CLIENT_TEN) 576 if (slave->flags & I2C_CLIENT_TEN)
577 return -EAFNOSUPPORT; 577 return -EAFNOSUPPORT;
578 578
579 pm_runtime_forbid(rcar_i2c_priv_to_dev(priv)); 579 pm_runtime_get_sync(rcar_i2c_priv_to_dev(priv));
580 580
581 priv->slave = slave; 581 priv->slave = slave;
582 rcar_i2c_write(priv, ICSAR, slave->addr); 582 rcar_i2c_write(priv, ICSAR, slave->addr);
@@ -598,7 +598,7 @@ static int rcar_unreg_slave(struct i2c_client *slave)
598 598
599 priv->slave = NULL; 599 priv->slave = NULL;
600 600
601 pm_runtime_allow(rcar_i2c_priv_to_dev(priv)); 601 pm_runtime_put(rcar_i2c_priv_to_dev(priv));
602 602
603 return 0; 603 return 0;
604} 604}
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index c1935ebd6a9c..9096d17beb5b 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -908,7 +908,7 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
908 &i2c->scl_fall_ns)) 908 &i2c->scl_fall_ns))
909 i2c->scl_fall_ns = 300; 909 i2c->scl_fall_ns = 300;
910 if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns", 910 if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns",
911 &i2c->scl_fall_ns)) 911 &i2c->sda_fall_ns))
912 i2c->sda_fall_ns = i2c->scl_fall_ns; 912 i2c->sda_fall_ns = i2c->scl_fall_ns;
913 913
914 strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name)); 914 strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index ea72dca32fdf..25020ec777c9 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -822,7 +822,7 @@ static int st_i2c_probe(struct platform_device *pdev)
822 822
823 adap = &i2c_dev->adap; 823 adap = &i2c_dev->adap;
824 i2c_set_adapdata(adap, i2c_dev); 824 i2c_set_adapdata(adap, i2c_dev);
825 snprintf(adap->name, sizeof(adap->name), "ST I2C(0x%pa)", &res->start); 825 snprintf(adap->name, sizeof(adap->name), "ST I2C(%pa)", &res->start);
826 adap->owner = THIS_MODULE; 826 adap->owner = THIS_MODULE;
827 adap->timeout = 2 * HZ; 827 adap->timeout = 2 * HZ;
828 adap->retries = 0; 828 adap->retries = 0;
diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
index 0c4618b4d515..c2babe50a0d8 100644
--- a/drivers/iio/adc/qcom-spmi-vadc.c
+++ b/drivers/iio/adc/qcom-spmi-vadc.c
@@ -839,8 +839,10 @@ static int vadc_get_dt_data(struct vadc_priv *vadc, struct device_node *node)
839 839
840 for_each_available_child_of_node(node, child) { 840 for_each_available_child_of_node(node, child) {
841 ret = vadc_get_dt_channel_data(vadc->dev, &prop, child); 841 ret = vadc_get_dt_channel_data(vadc->dev, &prop, child);
842 if (ret) 842 if (ret) {
843 of_node_put(child);
843 return ret; 844 return ret;
845 }
844 846
845 vadc->chan_props[index] = prop; 847 vadc->chan_props[index] = prop;
846 848
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index d7e908acb480..0f6f63b20263 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -302,7 +302,7 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
302 if (trialmask == NULL) 302 if (trialmask == NULL)
303 return -ENOMEM; 303 return -ENOMEM;
304 if (!indio_dev->masklength) { 304 if (!indio_dev->masklength) {
305 WARN_ON("Trying to set scanmask prior to registering buffer\n"); 305 WARN(1, "Trying to set scanmask prior to registering buffer\n");
306 goto err_invalid_mask; 306 goto err_invalid_mask;
307 } 307 }
308 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); 308 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 208358f9e7e3..159ede61f793 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -655,7 +655,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
655 break; 655 break;
656 case IIO_SEPARATE: 656 case IIO_SEPARATE:
657 if (!chan->indexed) { 657 if (!chan->indexed) {
658 WARN_ON("Differential channels must be indexed\n"); 658 WARN(1, "Differential channels must be indexed\n");
659 ret = -EINVAL; 659 ret = -EINVAL;
660 goto error_free_full_postfix; 660 goto error_free_full_postfix;
661 } 661 }
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index 7d269ef9e062..f6a07dc32ae4 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -453,6 +453,7 @@ static int apds9960_set_power_state(struct apds9960_data *data, bool on)
453 usleep_range(data->als_adc_int_us, 453 usleep_range(data->als_adc_int_us,
454 APDS9960_MAX_INT_TIME_IN_US); 454 APDS9960_MAX_INT_TIME_IN_US);
455 } else { 455 } else {
456 pm_runtime_mark_last_busy(dev);
456 ret = pm_runtime_put_autosuspend(dev); 457 ret = pm_runtime_put_autosuspend(dev);
457 } 458 }
458 459
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 961f9f990faf..e544fcfd5ced 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -130,10 +130,10 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
130 if (ret < 0) 130 if (ret < 0)
131 break; 131 break;
132 132
133 /* return 0 since laser is likely pointed out of range */ 133 /* return -EINVAL since laser is likely pointed out of range */
134 if (ret & LIDAR_REG_STATUS_INVALID) { 134 if (ret & LIDAR_REG_STATUS_INVALID) {
135 *reg = 0; 135 *reg = 0;
136 ret = 0; 136 ret = -EINVAL;
137 break; 137 break;
138 } 138 }
139 139
@@ -197,7 +197,7 @@ static irqreturn_t lidar_trigger_handler(int irq, void *private)
197 if (!ret) { 197 if (!ret) {
198 iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, 198 iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
199 iio_get_time_ns()); 199 iio_get_time_ns());
200 } else { 200 } else if (ret != -EINVAL) {
201 dev_err(&data->client->dev, "cannot read LIDAR measurement"); 201 dev_err(&data->client->dev, "cannot read LIDAR measurement");
202 } 202 }
203 203
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 944cd90417bc..2d762a2ecd81 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1126,10 +1126,7 @@ static bool validate_ipv4_net_dev(struct net_device *net_dev,
1126 1126
1127 rcu_read_lock(); 1127 rcu_read_lock();
1128 err = fib_lookup(dev_net(net_dev), &fl4, &res, 0); 1128 err = fib_lookup(dev_net(net_dev), &fl4, &res, 0);
1129 if (err) 1129 ret = err == 0 && FIB_RES_DEV(res) == net_dev;
1130 return false;
1131
1132 ret = FIB_RES_DEV(res) == net_dev;
1133 rcu_read_unlock(); 1130 rcu_read_unlock();
1134 1131
1135 return ret; 1132 return ret;
@@ -1268,15 +1265,17 @@ static bool cma_protocol_roce(const struct rdma_cm_id *id)
1268 return cma_protocol_roce_dev_port(device, port_num); 1265 return cma_protocol_roce_dev_port(device, port_num);
1269} 1266}
1270 1267
1271static bool cma_match_net_dev(const struct rdma_id_private *id_priv, 1268static bool cma_match_net_dev(const struct rdma_cm_id *id,
1272 const struct net_device *net_dev) 1269 const struct net_device *net_dev,
1270 u8 port_num)
1273{ 1271{
1274 const struct rdma_addr *addr = &id_priv->id.route.addr; 1272 const struct rdma_addr *addr = &id->route.addr;
1275 1273
1276 if (!net_dev) 1274 if (!net_dev)
1277 /* This request is an AF_IB request or a RoCE request */ 1275 /* This request is an AF_IB request or a RoCE request */
1278 return addr->src_addr.ss_family == AF_IB || 1276 return (!id->port_num || id->port_num == port_num) &&
1279 cma_protocol_roce(&id_priv->id); 1277 (addr->src_addr.ss_family == AF_IB ||
1278 cma_protocol_roce_dev_port(id->device, port_num));
1280 1279
1281 return !addr->dev_addr.bound_dev_if || 1280 return !addr->dev_addr.bound_dev_if ||
1282 (net_eq(dev_net(net_dev), addr->dev_addr.net) && 1281 (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
@@ -1298,13 +1297,13 @@ static struct rdma_id_private *cma_find_listener(
1298 hlist_for_each_entry(id_priv, &bind_list->owners, node) { 1297 hlist_for_each_entry(id_priv, &bind_list->owners, node) {
1299 if (cma_match_private_data(id_priv, ib_event->private_data)) { 1298 if (cma_match_private_data(id_priv, ib_event->private_data)) {
1300 if (id_priv->id.device == cm_id->device && 1299 if (id_priv->id.device == cm_id->device &&
1301 cma_match_net_dev(id_priv, net_dev)) 1300 cma_match_net_dev(&id_priv->id, net_dev, req->port))
1302 return id_priv; 1301 return id_priv;
1303 list_for_each_entry(id_priv_dev, 1302 list_for_each_entry(id_priv_dev,
1304 &id_priv->listen_list, 1303 &id_priv->listen_list,
1305 listen_list) { 1304 listen_list) {
1306 if (id_priv_dev->id.device == cm_id->device && 1305 if (id_priv_dev->id.device == cm_id->device &&
1307 cma_match_net_dev(id_priv_dev, net_dev)) 1306 cma_match_net_dev(&id_priv_dev->id, net_dev, req->port))
1308 return id_priv_dev; 1307 return id_priv_dev;
1309 } 1308 }
1310 } 1309 }
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 8d8af7a41a30..2281de122038 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1811,6 +1811,11 @@ static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1811 if (qp_num == 0) 1811 if (qp_num == 0)
1812 valid = 1; 1812 valid = 1;
1813 } else { 1813 } else {
1814 /* CM attributes other than ClassPortInfo only use Send method */
1815 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1816 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1817 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1818 goto out;
1814 /* Filter GSI packets sent to QP0 */ 1819 /* Filter GSI packets sent to QP0 */
1815 if (qp_num != 0) 1820 if (qp_num != 0)
1816 valid = 1; 1821 valid = 1;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 2aba774f835b..a95a32ba596e 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -512,7 +512,7 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
512 return len; 512 return len;
513} 513}
514 514
515static int ib_nl_send_msg(struct ib_sa_query *query) 515static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
516{ 516{
517 struct sk_buff *skb = NULL; 517 struct sk_buff *skb = NULL;
518 struct nlmsghdr *nlh; 518 struct nlmsghdr *nlh;
@@ -526,7 +526,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query)
526 if (len <= 0) 526 if (len <= 0)
527 return -EMSGSIZE; 527 return -EMSGSIZE;
528 528
529 skb = nlmsg_new(len, GFP_KERNEL); 529 skb = nlmsg_new(len, gfp_mask);
530 if (!skb) 530 if (!skb)
531 return -ENOMEM; 531 return -ENOMEM;
532 532
@@ -544,7 +544,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query)
544 /* Repair the nlmsg header length */ 544 /* Repair the nlmsg header length */
545 nlmsg_end(skb, nlh); 545 nlmsg_end(skb, nlh);
546 546
547 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, GFP_KERNEL); 547 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask);
548 if (!ret) 548 if (!ret)
549 ret = len; 549 ret = len;
550 else 550 else
@@ -553,7 +553,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query)
553 return ret; 553 return ret;
554} 554}
555 555
556static int ib_nl_make_request(struct ib_sa_query *query) 556static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
557{ 557{
558 unsigned long flags; 558 unsigned long flags;
559 unsigned long delay; 559 unsigned long delay;
@@ -562,25 +562,27 @@ static int ib_nl_make_request(struct ib_sa_query *query)
562 INIT_LIST_HEAD(&query->list); 562 INIT_LIST_HEAD(&query->list);
563 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); 563 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
564 564
565 /* Put the request on the list first.*/
565 spin_lock_irqsave(&ib_nl_request_lock, flags); 566 spin_lock_irqsave(&ib_nl_request_lock, flags);
566 ret = ib_nl_send_msg(query);
567 if (ret <= 0) {
568 ret = -EIO;
569 goto request_out;
570 } else {
571 ret = 0;
572 }
573
574 delay = msecs_to_jiffies(sa_local_svc_timeout_ms); 567 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
575 query->timeout = delay + jiffies; 568 query->timeout = delay + jiffies;
576 list_add_tail(&query->list, &ib_nl_request_list); 569 list_add_tail(&query->list, &ib_nl_request_list);
577 /* Start the timeout if this is the only request */ 570 /* Start the timeout if this is the only request */
578 if (ib_nl_request_list.next == &query->list) 571 if (ib_nl_request_list.next == &query->list)
579 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 572 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
580
581request_out:
582 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 573 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
583 574
575 ret = ib_nl_send_msg(query, gfp_mask);
576 if (ret <= 0) {
577 ret = -EIO;
578 /* Remove the request */
579 spin_lock_irqsave(&ib_nl_request_lock, flags);
580 list_del(&query->list);
581 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
582 } else {
583 ret = 0;
584 }
585
584 return ret; 586 return ret;
585} 587}
586 588
@@ -1108,7 +1110,7 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
1108 1110
1109 if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) { 1111 if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) {
1110 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) { 1112 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) {
1111 if (!ib_nl_make_request(query)) 1113 if (!ib_nl_make_request(query, gfp_mask))
1112 return id; 1114 return id;
1113 } 1115 }
1114 ib_sa_disable_local_svc(query); 1116 ib_sa_disable_local_svc(query);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 94816aeb95a0..1c02deab068f 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -62,9 +62,11 @@ static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
62 * The ib_uobject locking scheme is as follows: 62 * The ib_uobject locking scheme is as follows:
63 * 63 *
64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
65 * needs to be held during all idr operations. When an object is 65 * needs to be held during all idr write operations. When an object is
66 * looked up, a reference must be taken on the object's kref before 66 * looked up, a reference must be taken on the object's kref before
67 * dropping this lock. 67 * dropping this lock. For read operations, the rcu_read_lock()
68 * and rcu_write_lock() but similarly the kref reference is grabbed
69 * before the rcu_read_unlock().
68 * 70 *
69 * - Each object also has an rwsem. This rwsem must be held for 71 * - Each object also has an rwsem. This rwsem must be held for
70 * reading while an operation that uses the object is performed. 72 * reading while an operation that uses the object is performed.
@@ -96,7 +98,7 @@ static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
96 98
97static void release_uobj(struct kref *kref) 99static void release_uobj(struct kref *kref)
98{ 100{
99 kfree(container_of(kref, struct ib_uobject, ref)); 101 kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu);
100} 102}
101 103
102static void put_uobj(struct ib_uobject *uobj) 104static void put_uobj(struct ib_uobject *uobj)
@@ -145,7 +147,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
145{ 147{
146 struct ib_uobject *uobj; 148 struct ib_uobject *uobj;
147 149
148 spin_lock(&ib_uverbs_idr_lock); 150 rcu_read_lock();
149 uobj = idr_find(idr, id); 151 uobj = idr_find(idr, id);
150 if (uobj) { 152 if (uobj) {
151 if (uobj->context == context) 153 if (uobj->context == context)
@@ -153,7 +155,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
153 else 155 else
154 uobj = NULL; 156 uobj = NULL;
155 } 157 }
156 spin_unlock(&ib_uverbs_idr_lock); 158 rcu_read_unlock();
157 159
158 return uobj; 160 return uobj;
159} 161}
@@ -2446,6 +2448,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2446 int i, sg_ind; 2448 int i, sg_ind;
2447 int is_ud; 2449 int is_ud;
2448 ssize_t ret = -EINVAL; 2450 ssize_t ret = -EINVAL;
2451 size_t next_size;
2449 2452
2450 if (copy_from_user(&cmd, buf, sizeof cmd)) 2453 if (copy_from_user(&cmd, buf, sizeof cmd))
2451 return -EFAULT; 2454 return -EFAULT;
@@ -2490,7 +2493,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2490 goto out_put; 2493 goto out_put;
2491 } 2494 }
2492 2495
2493 ud = alloc_wr(sizeof(*ud), user_wr->num_sge); 2496 next_size = sizeof(*ud);
2497 ud = alloc_wr(next_size, user_wr->num_sge);
2494 if (!ud) { 2498 if (!ud) {
2495 ret = -ENOMEM; 2499 ret = -ENOMEM;
2496 goto out_put; 2500 goto out_put;
@@ -2511,7 +2515,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2511 user_wr->opcode == IB_WR_RDMA_READ) { 2515 user_wr->opcode == IB_WR_RDMA_READ) {
2512 struct ib_rdma_wr *rdma; 2516 struct ib_rdma_wr *rdma;
2513 2517
2514 rdma = alloc_wr(sizeof(*rdma), user_wr->num_sge); 2518 next_size = sizeof(*rdma);
2519 rdma = alloc_wr(next_size, user_wr->num_sge);
2515 if (!rdma) { 2520 if (!rdma) {
2516 ret = -ENOMEM; 2521 ret = -ENOMEM;
2517 goto out_put; 2522 goto out_put;
@@ -2525,7 +2530,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2525 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2530 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2526 struct ib_atomic_wr *atomic; 2531 struct ib_atomic_wr *atomic;
2527 2532
2528 atomic = alloc_wr(sizeof(*atomic), user_wr->num_sge); 2533 next_size = sizeof(*atomic);
2534 atomic = alloc_wr(next_size, user_wr->num_sge);
2529 if (!atomic) { 2535 if (!atomic) {
2530 ret = -ENOMEM; 2536 ret = -ENOMEM;
2531 goto out_put; 2537 goto out_put;
@@ -2540,7 +2546,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2540 } else if (user_wr->opcode == IB_WR_SEND || 2546 } else if (user_wr->opcode == IB_WR_SEND ||
2541 user_wr->opcode == IB_WR_SEND_WITH_IMM || 2547 user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2542 user_wr->opcode == IB_WR_SEND_WITH_INV) { 2548 user_wr->opcode == IB_WR_SEND_WITH_INV) {
2543 next = alloc_wr(sizeof(*next), user_wr->num_sge); 2549 next_size = sizeof(*next);
2550 next = alloc_wr(next_size, user_wr->num_sge);
2544 if (!next) { 2551 if (!next) {
2545 ret = -ENOMEM; 2552 ret = -ENOMEM;
2546 goto out_put; 2553 goto out_put;
@@ -2572,7 +2579,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2572 2579
2573 if (next->num_sge) { 2580 if (next->num_sge) {
2574 next->sg_list = (void *) next + 2581 next->sg_list = (void *) next +
2575 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2582 ALIGN(next_size, sizeof(struct ib_sge));
2576 if (copy_from_user(next->sg_list, 2583 if (copy_from_user(next->sg_list,
2577 buf + sizeof cmd + 2584 buf + sizeof cmd +
2578 cmd.wr_count * cmd.wqe_size + 2585 cmd.wr_count * cmd.wqe_size +
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 043a60ee6836..545906dec26d 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1516,7 +1516,7 @@ EXPORT_SYMBOL(ib_map_mr_sg);
1516 * @sg_nents: number of entries in sg 1516 * @sg_nents: number of entries in sg
1517 * @set_page: driver page assignment function pointer 1517 * @set_page: driver page assignment function pointer
1518 * 1518 *
1519 * Core service helper for drivers to covert the largest 1519 * Core service helper for drivers to convert the largest
1520 * prefix of given sg list to a page vector. The sg list 1520 * prefix of given sg list to a page vector. The sg list
1521 * prefix converted is the prefix that meet the requirements 1521 * prefix converted is the prefix that meet the requirements
1522 * of ib_map_mr_sg. 1522 * of ib_map_mr_sg.
@@ -1533,7 +1533,7 @@ int ib_sg_to_pages(struct ib_mr *mr,
1533 u64 last_end_dma_addr = 0, last_page_addr = 0; 1533 u64 last_end_dma_addr = 0, last_page_addr = 0;
1534 unsigned int last_page_off = 0; 1534 unsigned int last_page_off = 0;
1535 u64 page_mask = ~((u64)mr->page_size - 1); 1535 u64 page_mask = ~((u64)mr->page_size - 1);
1536 int i; 1536 int i, ret;
1537 1537
1538 mr->iova = sg_dma_address(&sgl[0]); 1538 mr->iova = sg_dma_address(&sgl[0]);
1539 mr->length = 0; 1539 mr->length = 0;
@@ -1544,27 +1544,29 @@ int ib_sg_to_pages(struct ib_mr *mr,
1544 u64 end_dma_addr = dma_addr + dma_len; 1544 u64 end_dma_addr = dma_addr + dma_len;
1545 u64 page_addr = dma_addr & page_mask; 1545 u64 page_addr = dma_addr & page_mask;
1546 1546
1547 if (i && page_addr != dma_addr) { 1547 /*
1548 if (last_end_dma_addr != dma_addr) { 1548 * For the second and later elements, check whether either the
1549 /* gap */ 1549 * end of element i-1 or the start of element i is not aligned
1550 goto done; 1550 * on a page boundary.
1551 1551 */
1552 } else if (last_page_off + dma_len <= mr->page_size) { 1552 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
1553 /* chunk this fragment with the last */ 1553 /* Stop mapping if there is a gap. */
1554 mr->length += dma_len; 1554 if (last_end_dma_addr != dma_addr)
1555 last_end_dma_addr += dma_len; 1555 break;
1556 last_page_off += dma_len; 1556
1557 continue; 1557 /*
1558 } else { 1558 * Coalesce this element with the last. If it is small
1559 /* map starting from the next page */ 1559 * enough just update mr->length. Otherwise start
1560 page_addr = last_page_addr + mr->page_size; 1560 * mapping from the next page.
1561 dma_len -= mr->page_size - last_page_off; 1561 */
1562 } 1562 goto next_page;
1563 } 1563 }
1564 1564
1565 do { 1565 do {
1566 if (unlikely(set_page(mr, page_addr))) 1566 ret = set_page(mr, page_addr);
1567 goto done; 1567 if (unlikely(ret < 0))
1568 return i ? : ret;
1569next_page:
1568 page_addr += mr->page_size; 1570 page_addr += mr->page_size;
1569 } while (page_addr < end_dma_addr); 1571 } while (page_addr < end_dma_addr);
1570 1572
@@ -1574,7 +1576,6 @@ int ib_sg_to_pages(struct ib_mr *mr,
1574 last_page_off = end_dma_addr & ~page_mask; 1576 last_page_off = end_dma_addr & ~page_mask;
1575 } 1577 }
1576 1578
1577done:
1578 return i; 1579 return i;
1579} 1580}
1580EXPORT_SYMBOL(ib_sg_to_pages); 1581EXPORT_SYMBOL(ib_sg_to_pages);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index f567160a4a56..97d6878f9938 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -456,7 +456,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
456 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; 456 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
457 props->max_sge = min(dev->dev->caps.max_sq_sg, 457 props->max_sge = min(dev->dev->caps.max_sq_sg,
458 dev->dev->caps.max_rq_sg); 458 dev->dev->caps.max_rq_sg);
459 props->max_sge_rd = props->max_sge; 459 props->max_sge_rd = MLX4_MAX_SGE_RD;
460 props->max_cq = dev->dev->quotas.cq; 460 props->max_cq = dev->dev->quotas.cq;
461 props->max_cqe = dev->dev->caps.max_cqes; 461 props->max_cqe = dev->dev->caps.max_cqes;
462 props->max_mr = dev->dev->quotas.mpt; 462 props->max_mr = dev->dev->quotas.mpt;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index a2e4ca56da44..13eaaf45288f 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -34,6 +34,7 @@
34#include <linux/log2.h> 34#include <linux/log2.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/netdevice.h> 36#include <linux/netdevice.h>
37#include <linux/vmalloc.h>
37 38
38#include <rdma/ib_cache.h> 39#include <rdma/ib_cache.h>
39#include <rdma/ib_pack.h> 40#include <rdma/ib_pack.h>
@@ -795,8 +796,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
795 if (err) 796 if (err)
796 goto err_mtt; 797 goto err_mtt;
797 798
798 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), gfp); 799 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(u64), gfp);
799 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), gfp); 800 if (!qp->sq.wrid)
801 qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64),
802 gfp, PAGE_KERNEL);
803 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(u64), gfp);
804 if (!qp->rq.wrid)
805 qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64),
806 gfp, PAGE_KERNEL);
800 if (!qp->sq.wrid || !qp->rq.wrid) { 807 if (!qp->sq.wrid || !qp->rq.wrid) {
801 err = -ENOMEM; 808 err = -ENOMEM;
802 goto err_wrid; 809 goto err_wrid;
@@ -886,8 +893,8 @@ err_wrid:
886 if (qp_has_rq(init_attr)) 893 if (qp_has_rq(init_attr))
887 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); 894 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
888 } else { 895 } else {
889 kfree(qp->sq.wrid); 896 kvfree(qp->sq.wrid);
890 kfree(qp->rq.wrid); 897 kvfree(qp->rq.wrid);
891 } 898 }
892 899
893err_mtt: 900err_mtt:
@@ -1062,8 +1069,8 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
1062 &qp->db); 1069 &qp->db);
1063 ib_umem_release(qp->umem); 1070 ib_umem_release(qp->umem);
1064 } else { 1071 } else {
1065 kfree(qp->sq.wrid); 1072 kvfree(qp->sq.wrid);
1066 kfree(qp->rq.wrid); 1073 kvfree(qp->rq.wrid);
1067 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | 1074 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
1068 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) 1075 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
1069 free_proxy_bufs(&dev->ib_dev, qp); 1076 free_proxy_bufs(&dev->ib_dev, qp);
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index dce5dfe3a70e..c394376ebe06 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -34,6 +34,7 @@
34#include <linux/mlx4/qp.h> 34#include <linux/mlx4/qp.h>
35#include <linux/mlx4/srq.h> 35#include <linux/mlx4/srq.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/vmalloc.h>
37 38
38#include "mlx4_ib.h" 39#include "mlx4_ib.h"
39#include "user.h" 40#include "user.h"
@@ -172,8 +173,12 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
172 173
173 srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL); 174 srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL);
174 if (!srq->wrid) { 175 if (!srq->wrid) {
175 err = -ENOMEM; 176 srq->wrid = __vmalloc(srq->msrq.max * sizeof(u64),
176 goto err_mtt; 177 GFP_KERNEL, PAGE_KERNEL);
178 if (!srq->wrid) {
179 err = -ENOMEM;
180 goto err_mtt;
181 }
177 } 182 }
178 } 183 }
179 184
@@ -204,7 +209,7 @@ err_wrid:
204 if (pd->uobject) 209 if (pd->uobject)
205 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); 210 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
206 else 211 else
207 kfree(srq->wrid); 212 kvfree(srq->wrid);
208 213
209err_mtt: 214err_mtt:
210 mlx4_mtt_cleanup(dev->dev, &srq->mtt); 215 mlx4_mtt_cleanup(dev->dev, &srq->mtt);
@@ -281,7 +286,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq)
281 mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); 286 mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
282 ib_umem_release(msrq->umem); 287 ib_umem_release(msrq->umem);
283 } else { 288 } else {
284 kfree(msrq->wrid); 289 kvfree(msrq->wrid);
285 mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, 290 mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
286 &msrq->buf); 291 &msrq->buf);
287 mlx4_db_free(dev->dev, &msrq->db); 292 mlx4_db_free(dev->dev, &msrq->db);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index ec8993a7b3be..6000f7aeede9 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -381,7 +381,19 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
381 } 381 }
382 } 382 }
383 } else if (ent->cur > 2 * ent->limit) { 383 } else if (ent->cur > 2 * ent->limit) {
384 if (!someone_adding(cache) && 384 /*
385 * The remove_keys() logic is performed as garbage collection
386 * task. Such task is intended to be run when no other active
387 * processes are running.
388 *
389 * The need_resched() will return TRUE if there are user tasks
390 * to be activated in near future.
391 *
392 * In such case, we don't execute remove_keys() and postpone
393 * the garbage collection work to try to run in next cycle,
394 * in order to free CPU resources to other tasks.
395 */
396 if (!need_resched() && !someone_adding(cache) &&
385 time_after(jiffies, cache->last_add + 300 * HZ)) { 397 time_after(jiffies, cache->last_add + 300 * HZ)) {
386 remove_keys(dev, i, 1); 398 remove_keys(dev, i, 1);
387 if (ent->cur > ent->limit) 399 if (ent->cur > ent->limit)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index ae80590aabdf..040bb8b5cb15 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -232,6 +232,10 @@ struct phy_info {
232 u16 interface_type; 232 u16 interface_type;
233}; 233};
234 234
235enum ocrdma_flags {
236 OCRDMA_FLAGS_LINK_STATUS_INIT = 0x01
237};
238
235struct ocrdma_dev { 239struct ocrdma_dev {
236 struct ib_device ibdev; 240 struct ib_device ibdev;
237 struct ocrdma_dev_attr attr; 241 struct ocrdma_dev_attr attr;
@@ -287,6 +291,7 @@ struct ocrdma_dev {
287 atomic_t update_sl; 291 atomic_t update_sl;
288 u16 pvid; 292 u16 pvid;
289 u32 asic_id; 293 u32 asic_id;
294 u32 flags;
290 295
291 ulong last_stats_time; 296 ulong last_stats_time;
292 struct mutex stats_lock; /* provide synch for debugfs operations */ 297 struct mutex stats_lock; /* provide synch for debugfs operations */
@@ -591,4 +596,9 @@ static inline u8 ocrdma_is_enabled_and_synced(u32 state)
591 (state & OCRDMA_STATE_FLAG_SYNC); 596 (state & OCRDMA_STATE_FLAG_SYNC);
592} 597}
593 598
599static inline u8 ocrdma_get_ae_link_state(u32 ae_state)
600{
601 return ((ae_state & OCRDMA_AE_LSC_LS_MASK) >> OCRDMA_AE_LSC_LS_SHIFT);
602}
603
594#endif 604#endif
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 30f67bebffa3..283ca842ff74 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -579,6 +579,8 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
579 579
580 cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE); 580 cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE);
581 cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE); 581 cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE);
582 /* Request link events on this MQ. */
583 cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_LINK_EVE_CODE);
582 584
583 cmd->async_cqid_ringsize = cq->id; 585 cmd->async_cqid_ringsize = cq->id;
584 cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << 586 cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
@@ -819,20 +821,42 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
819 } 821 }
820} 822}
821 823
824static void ocrdma_process_link_state(struct ocrdma_dev *dev,
825 struct ocrdma_ae_mcqe *cqe)
826{
827 struct ocrdma_ae_lnkst_mcqe *evt;
828 u8 lstate;
829
830 evt = (struct ocrdma_ae_lnkst_mcqe *)cqe;
831 lstate = ocrdma_get_ae_link_state(evt->speed_state_ptn);
832
833 if (!(lstate & OCRDMA_AE_LSC_LLINK_MASK))
834 return;
835
836 if (dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)
837 ocrdma_update_link_state(dev, (lstate & OCRDMA_LINK_ST_MASK));
838}
839
822static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe) 840static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
823{ 841{
824 /* async CQE processing */ 842 /* async CQE processing */
825 struct ocrdma_ae_mcqe *cqe = ae_cqe; 843 struct ocrdma_ae_mcqe *cqe = ae_cqe;
826 u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >> 844 u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
827 OCRDMA_AE_MCQE_EVENT_CODE_SHIFT; 845 OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
828 846 switch (evt_code) {
829 if (evt_code == OCRDMA_ASYNC_RDMA_EVE_CODE) 847 case OCRDMA_ASYNC_LINK_EVE_CODE:
848 ocrdma_process_link_state(dev, cqe);
849 break;
850 case OCRDMA_ASYNC_RDMA_EVE_CODE:
830 ocrdma_dispatch_ibevent(dev, cqe); 851 ocrdma_dispatch_ibevent(dev, cqe);
831 else if (evt_code == OCRDMA_ASYNC_GRP5_EVE_CODE) 852 break;
853 case OCRDMA_ASYNC_GRP5_EVE_CODE:
832 ocrdma_process_grp5_aync(dev, cqe); 854 ocrdma_process_grp5_aync(dev, cqe);
833 else 855 break;
856 default:
834 pr_err("%s(%d) invalid evt code=0x%x\n", __func__, 857 pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
835 dev->id, evt_code); 858 dev->id, evt_code);
859 }
836} 860}
837 861
838static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe) 862static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
@@ -1363,7 +1387,8 @@ mbx_err:
1363 return status; 1387 return status;
1364} 1388}
1365 1389
1366int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed) 1390int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed,
1391 u8 *lnk_state)
1367{ 1392{
1368 int status = -ENOMEM; 1393 int status = -ENOMEM;
1369 struct ocrdma_get_link_speed_rsp *rsp; 1394 struct ocrdma_get_link_speed_rsp *rsp;
@@ -1384,8 +1409,11 @@ int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
1384 goto mbx_err; 1409 goto mbx_err;
1385 1410
1386 rsp = (struct ocrdma_get_link_speed_rsp *)cmd; 1411 rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
1387 *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK) 1412 if (lnk_speed)
1388 >> OCRDMA_PHY_PS_SHIFT; 1413 *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
1414 >> OCRDMA_PHY_PS_SHIFT;
1415 if (lnk_state)
1416 *lnk_state = (rsp->res_lnk_st & OCRDMA_LINK_ST_MASK);
1389 1417
1390mbx_err: 1418mbx_err:
1391 kfree(cmd); 1419 kfree(cmd);
@@ -2515,9 +2543,10 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2515 ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid)); 2543 ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
2516 cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8); 2544 cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
2517 2545
2518 if (vlan_id < 0x1000) { 2546 if (vlan_id == 0xFFFF)
2519 if (dev->pfc_state) { 2547 vlan_id = 0;
2520 vlan_id = 0; 2548 if (vlan_id || dev->pfc_state) {
2549 if (!vlan_id) {
2521 pr_err("ocrdma%d:Using VLAN with PFC is recommended\n", 2550 pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
2522 dev->id); 2551 dev->id);
2523 pr_err("ocrdma%d:Using VLAN 0 for this connection\n", 2552 pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index 7ed885c1851e..ebc1f442aec3 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -106,7 +106,8 @@ void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,
106 bool solicited, u16 cqe_popped); 106 bool solicited, u16 cqe_popped);
107 107
108/* verbs specific mailbox commands */ 108/* verbs specific mailbox commands */
109int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed); 109int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed,
110 u8 *lnk_st);
110int ocrdma_query_config(struct ocrdma_dev *, 111int ocrdma_query_config(struct ocrdma_dev *,
111 struct ocrdma_mbx_query_config *config); 112 struct ocrdma_mbx_query_config *config);
112 113
@@ -153,5 +154,6 @@ char *port_speed_string(struct ocrdma_dev *dev);
153void ocrdma_init_service_level(struct ocrdma_dev *); 154void ocrdma_init_service_level(struct ocrdma_dev *);
154void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev); 155void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev);
155void ocrdma_free_pd_range(struct ocrdma_dev *dev); 156void ocrdma_free_pd_range(struct ocrdma_dev *dev);
157void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate);
156 158
157#endif /* __OCRDMA_HW_H__ */ 159#endif /* __OCRDMA_HW_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 62b7009daa6c..3afb40b85159 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -290,6 +290,7 @@ static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
290static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) 290static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
291{ 291{
292 int status = 0, i; 292 int status = 0, i;
293 u8 lstate = 0;
293 struct ocrdma_dev *dev; 294 struct ocrdma_dev *dev;
294 295
295 dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev)); 296 dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev));
@@ -319,6 +320,11 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
319 if (status) 320 if (status)
320 goto alloc_err; 321 goto alloc_err;
321 322
323 /* Query Link state and update */
324 status = ocrdma_mbx_get_link_speed(dev, NULL, &lstate);
325 if (!status)
326 ocrdma_update_link_state(dev, lstate);
327
322 for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++) 328 for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
323 if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i])) 329 if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i]))
324 goto sysfs_err; 330 goto sysfs_err;
@@ -373,7 +379,7 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
373 ocrdma_remove_free(dev); 379 ocrdma_remove_free(dev);
374} 380}
375 381
376static int ocrdma_open(struct ocrdma_dev *dev) 382static int ocrdma_dispatch_port_active(struct ocrdma_dev *dev)
377{ 383{
378 struct ib_event port_event; 384 struct ib_event port_event;
379 385
@@ -384,32 +390,9 @@ static int ocrdma_open(struct ocrdma_dev *dev)
384 return 0; 390 return 0;
385} 391}
386 392
387static int ocrdma_close(struct ocrdma_dev *dev) 393static int ocrdma_dispatch_port_error(struct ocrdma_dev *dev)
388{ 394{
389 int i;
390 struct ocrdma_qp *qp, **cur_qp;
391 struct ib_event err_event; 395 struct ib_event err_event;
392 struct ib_qp_attr attrs;
393 int attr_mask = IB_QP_STATE;
394
395 attrs.qp_state = IB_QPS_ERR;
396 mutex_lock(&dev->dev_lock);
397 if (dev->qp_tbl) {
398 cur_qp = dev->qp_tbl;
399 for (i = 0; i < OCRDMA_MAX_QP; i++) {
400 qp = cur_qp[i];
401 if (qp && qp->ibqp.qp_type != IB_QPT_GSI) {
402 /* change the QP state to ERROR */
403 _ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask);
404
405 err_event.event = IB_EVENT_QP_FATAL;
406 err_event.element.qp = &qp->ibqp;
407 err_event.device = &dev->ibdev;
408 ib_dispatch_event(&err_event);
409 }
410 }
411 }
412 mutex_unlock(&dev->dev_lock);
413 396
414 err_event.event = IB_EVENT_PORT_ERR; 397 err_event.event = IB_EVENT_PORT_ERR;
415 err_event.element.port_num = 1; 398 err_event.element.port_num = 1;
@@ -420,7 +403,7 @@ static int ocrdma_close(struct ocrdma_dev *dev)
420 403
421static void ocrdma_shutdown(struct ocrdma_dev *dev) 404static void ocrdma_shutdown(struct ocrdma_dev *dev)
422{ 405{
423 ocrdma_close(dev); 406 ocrdma_dispatch_port_error(dev);
424 ocrdma_remove(dev); 407 ocrdma_remove(dev);
425} 408}
426 409
@@ -431,18 +414,28 @@ static void ocrdma_shutdown(struct ocrdma_dev *dev)
431static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event) 414static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
432{ 415{
433 switch (event) { 416 switch (event) {
434 case BE_DEV_UP:
435 ocrdma_open(dev);
436 break;
437 case BE_DEV_DOWN:
438 ocrdma_close(dev);
439 break;
440 case BE_DEV_SHUTDOWN: 417 case BE_DEV_SHUTDOWN:
441 ocrdma_shutdown(dev); 418 ocrdma_shutdown(dev);
442 break; 419 break;
420 default:
421 break;
443 } 422 }
444} 423}
445 424
425void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate)
426{
427 if (!(dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)) {
428 dev->flags |= OCRDMA_FLAGS_LINK_STATUS_INIT;
429 if (!lstate)
430 return;
431 }
432
433 if (!lstate)
434 ocrdma_dispatch_port_error(dev);
435 else
436 ocrdma_dispatch_port_active(dev);
437}
438
446static struct ocrdma_driver ocrdma_drv = { 439static struct ocrdma_driver ocrdma_drv = {
447 .name = "ocrdma_driver", 440 .name = "ocrdma_driver",
448 .add = ocrdma_add, 441 .add = ocrdma_add,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 6a38268bbe9f..99dd6fdf06d7 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -465,8 +465,11 @@ struct ocrdma_ae_qp_mcqe {
465 u32 valid_ae_event; 465 u32 valid_ae_event;
466}; 466};
467 467
468#define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14 468enum ocrdma_async_event_code {
469#define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5 469 OCRDMA_ASYNC_LINK_EVE_CODE = 0x01,
470 OCRDMA_ASYNC_GRP5_EVE_CODE = 0x05,
471 OCRDMA_ASYNC_RDMA_EVE_CODE = 0x14
472};
470 473
471enum ocrdma_async_grp5_events { 474enum ocrdma_async_grp5_events {
472 OCRDMA_ASYNC_EVENT_QOS_VALUE = 0x01, 475 OCRDMA_ASYNC_EVENT_QOS_VALUE = 0x01,
@@ -489,6 +492,44 @@ enum OCRDMA_ASYNC_EVENT_TYPE {
489 OCRDMA_MAX_ASYNC_ERRORS 492 OCRDMA_MAX_ASYNC_ERRORS
490}; 493};
491 494
495struct ocrdma_ae_lnkst_mcqe {
496 u32 speed_state_ptn;
497 u32 qos_reason_falut;
498 u32 evt_tag;
499 u32 valid_ae_event;
500};
501
502enum {
503 OCRDMA_AE_LSC_PORT_NUM_MASK = 0x3F,
504 OCRDMA_AE_LSC_PT_SHIFT = 0x06,
505 OCRDMA_AE_LSC_PT_MASK = (0x03 <<
506 OCRDMA_AE_LSC_PT_SHIFT),
507 OCRDMA_AE_LSC_LS_SHIFT = 0x08,
508 OCRDMA_AE_LSC_LS_MASK = (0xFF <<
509 OCRDMA_AE_LSC_LS_SHIFT),
510 OCRDMA_AE_LSC_LD_SHIFT = 0x10,
511 OCRDMA_AE_LSC_LD_MASK = (0xFF <<
512 OCRDMA_AE_LSC_LD_SHIFT),
513 OCRDMA_AE_LSC_PPS_SHIFT = 0x18,
514 OCRDMA_AE_LSC_PPS_MASK = (0xFF <<
515 OCRDMA_AE_LSC_PPS_SHIFT),
516 OCRDMA_AE_LSC_PPF_MASK = 0xFF,
517 OCRDMA_AE_LSC_ER_SHIFT = 0x08,
518 OCRDMA_AE_LSC_ER_MASK = (0xFF <<
519 OCRDMA_AE_LSC_ER_SHIFT),
520 OCRDMA_AE_LSC_QOS_SHIFT = 0x10,
521 OCRDMA_AE_LSC_QOS_MASK = (0xFFFF <<
522 OCRDMA_AE_LSC_QOS_SHIFT)
523};
524
525enum {
526 OCRDMA_AE_LSC_PLINK_DOWN = 0x00,
527 OCRDMA_AE_LSC_PLINK_UP = 0x01,
528 OCRDMA_AE_LSC_LLINK_DOWN = 0x02,
529 OCRDMA_AE_LSC_LLINK_MASK = 0x02,
530 OCRDMA_AE_LSC_LLINK_UP = 0x03
531};
532
492/* mailbox command request and responses */ 533/* mailbox command request and responses */
493enum { 534enum {
494 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT = 2, 535 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT = 2,
@@ -676,7 +717,7 @@ enum {
676 OCRDMA_PHY_PFLT_SHIFT = 0x18, 717 OCRDMA_PHY_PFLT_SHIFT = 0x18,
677 OCRDMA_QOS_LNKSP_MASK = 0xFFFF0000, 718 OCRDMA_QOS_LNKSP_MASK = 0xFFFF0000,
678 OCRDMA_QOS_LNKSP_SHIFT = 0x10, 719 OCRDMA_QOS_LNKSP_SHIFT = 0x10,
679 OCRDMA_LLST_MASK = 0xFF, 720 OCRDMA_LINK_ST_MASK = 0x01,
680 OCRDMA_PLFC_MASK = 0x00000400, 721 OCRDMA_PLFC_MASK = 0x00000400,
681 OCRDMA_PLFC_SHIFT = 0x8, 722 OCRDMA_PLFC_SHIFT = 0x8,
682 OCRDMA_PLRFC_MASK = 0x00000200, 723 OCRDMA_PLRFC_MASK = 0x00000200,
@@ -691,7 +732,7 @@ struct ocrdma_get_link_speed_rsp {
691 732
692 u32 pflt_pps_ld_pnum; 733 u32 pflt_pps_ld_pnum;
693 u32 qos_lsp; 734 u32 qos_lsp;
694 u32 res_lls; 735 u32 res_lnk_st;
695}; 736};
696 737
697enum { 738enum {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 583001bcfb8f..76e96f97b3f6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -171,7 +171,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
171 int status; 171 int status;
172 u8 speed; 172 u8 speed;
173 173
174 status = ocrdma_mbx_get_link_speed(dev, &speed); 174 status = ocrdma_mbx_get_link_speed(dev, &speed, NULL);
175 if (status) 175 if (status)
176 speed = OCRDMA_PHYS_LINK_SPEED_ZERO; 176 speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
177 177
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index 5e27f76805e2..4c7c3c84a741 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -292,7 +292,7 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
292 qib_dev_porterr(ppd->dd, ppd->port, 292 qib_dev_porterr(ppd->dd, ppd->port,
293 "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]); 293 "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]);
294 294
295 if ((peek[2] & 2) == 0) { 295 if ((peek[2] & 4) == 0) {
296 /* 296 /*
297 * If cable is paged, rather than "flat memory", we need to 297 * If cable is paged, rather than "flat memory", we need to
298 * set the page to zero, Even if it already appears to be zero. 298 * set the page to zero, Even if it already appears to be zero.
@@ -538,7 +538,7 @@ int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
538 sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n", 538 sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
539 QSFP_DATE_LEN, cd.date); 539 QSFP_DATE_LEN, cd.date);
540 sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n", 540 sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
541 QSFP_LOT_LEN, cd.date); 541 QSFP_LOT_LEN, cd.lot);
542 542
543 while (bidx < QSFP_DEFAULT_HDR_CNT) { 543 while (bidx < QSFP_DEFAULT_HDR_CNT) {
544 int iidx; 544 int iidx;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 2baf5ad251ed..bc803f33d5f6 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -329,9 +329,9 @@ struct qib_sge {
329struct qib_mr { 329struct qib_mr {
330 struct ib_mr ibmr; 330 struct ib_mr ibmr;
331 struct ib_umem *umem; 331 struct ib_umem *umem;
332 struct qib_mregion mr; /* must be last */
333 u64 *pages; 332 u64 *pages;
334 u32 npages; 333 u32 npages;
334 struct qib_mregion mr; /* must be last */
335}; 335};
336 336
337/* 337/*
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index a93070210109..42f4da620f2e 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1293,7 +1293,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
1293 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 1293 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1294 sector_t sector_off = mr_status.sig_err.sig_err_offset; 1294 sector_t sector_off = mr_status.sig_err.sig_err_offset;
1295 1295
1296 do_div(sector_off, sector_size + 8); 1296 sector_div(sector_off, sector_size + 8);
1297 *sector = scsi_get_lba(iser_task->sc) + sector_off; 1297 *sector = scsi_get_lba(iser_task->sc) + sector_off;
1298 1298
1299 pr_err("PI error found type %d at sector %llx " 1299 pr_err("PI error found type %d at sector %llx "
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index dfbbbb28090b..8a51c3b5d657 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -157,16 +157,9 @@ isert_create_qp(struct isert_conn *isert_conn,
157 attr.recv_cq = comp->cq; 157 attr.recv_cq = comp->cq;
158 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; 158 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
159 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; 159 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
160 /* 160 attr.cap.max_send_sge = device->dev_attr.max_sge;
161 * FIXME: Use devattr.max_sge - 2 for max_send_sge as 161 isert_conn->max_sge = min(device->dev_attr.max_sge,
162 * work-around for RDMA_READs with ConnectX-2. 162 device->dev_attr.max_sge_rd);
163 *
164 * Also, still make sure to have at least two SGEs for
165 * outgoing control PDU responses.
166 */
167 attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
168 isert_conn->max_sge = attr.cap.max_send_sge;
169
170 attr.cap.max_recv_sge = 1; 163 attr.cap.max_recv_sge = 1;
171 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 164 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
172 attr.qp_type = IB_QPT_RC; 165 attr.qp_type = IB_QPT_RC;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 9909022dc6c3..3db9a659719b 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -488,7 +488,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
488 struct ib_qp *qp; 488 struct ib_qp *qp;
489 struct ib_fmr_pool *fmr_pool = NULL; 489 struct ib_fmr_pool *fmr_pool = NULL;
490 struct srp_fr_pool *fr_pool = NULL; 490 struct srp_fr_pool *fr_pool = NULL;
491 const int m = 1 + dev->use_fast_reg; 491 const int m = dev->use_fast_reg ? 3 : 1;
492 struct ib_cq_init_attr cq_attr = {}; 492 struct ib_cq_init_attr cq_attr = {};
493 int ret; 493 int ret;
494 494
@@ -994,16 +994,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
994 994
995 ret = srp_lookup_path(ch); 995 ret = srp_lookup_path(ch);
996 if (ret) 996 if (ret)
997 return ret; 997 goto out;
998 998
999 while (1) { 999 while (1) {
1000 init_completion(&ch->done); 1000 init_completion(&ch->done);
1001 ret = srp_send_req(ch, multich); 1001 ret = srp_send_req(ch, multich);
1002 if (ret) 1002 if (ret)
1003 return ret; 1003 goto out;
1004 ret = wait_for_completion_interruptible(&ch->done); 1004 ret = wait_for_completion_interruptible(&ch->done);
1005 if (ret < 0) 1005 if (ret < 0)
1006 return ret; 1006 goto out;
1007 1007
1008 /* 1008 /*
1009 * The CM event handling code will set status to 1009 * The CM event handling code will set status to
@@ -1011,15 +1011,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
1011 * back, or SRP_DLID_REDIRECT if we get a lid/qp 1011 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1012 * redirect REJ back. 1012 * redirect REJ back.
1013 */ 1013 */
1014 switch (ch->status) { 1014 ret = ch->status;
1015 switch (ret) {
1015 case 0: 1016 case 0:
1016 ch->connected = true; 1017 ch->connected = true;
1017 return 0; 1018 goto out;
1018 1019
1019 case SRP_PORT_REDIRECT: 1020 case SRP_PORT_REDIRECT:
1020 ret = srp_lookup_path(ch); 1021 ret = srp_lookup_path(ch);
1021 if (ret) 1022 if (ret)
1022 return ret; 1023 goto out;
1023 break; 1024 break;
1024 1025
1025 case SRP_DLID_REDIRECT: 1026 case SRP_DLID_REDIRECT:
@@ -1028,13 +1029,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
1028 case SRP_STALE_CONN: 1029 case SRP_STALE_CONN:
1029 shost_printk(KERN_ERR, target->scsi_host, PFX 1030 shost_printk(KERN_ERR, target->scsi_host, PFX
1030 "giving up on stale connection\n"); 1031 "giving up on stale connection\n");
1031 ch->status = -ECONNRESET; 1032 ret = -ECONNRESET;
1032 return ch->status; 1033 goto out;
1033 1034
1034 default: 1035 default:
1035 return ch->status; 1036 goto out;
1036 } 1037 }
1037 } 1038 }
1039
1040out:
1041 return ret <= 0 ? ret : -ENODEV;
1038} 1042}
1039 1043
1040static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey) 1044static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
@@ -1309,7 +1313,7 @@ reset_state:
1309} 1313}
1310 1314
1311static int srp_map_finish_fr(struct srp_map_state *state, 1315static int srp_map_finish_fr(struct srp_map_state *state,
1312 struct srp_rdma_ch *ch) 1316 struct srp_rdma_ch *ch, int sg_nents)
1313{ 1317{
1314 struct srp_target_port *target = ch->target; 1318 struct srp_target_port *target = ch->target;
1315 struct srp_device *dev = target->srp_host->srp_dev; 1319 struct srp_device *dev = target->srp_host->srp_dev;
@@ -1324,10 +1328,10 @@ static int srp_map_finish_fr(struct srp_map_state *state,
1324 1328
1325 WARN_ON_ONCE(!dev->use_fast_reg); 1329 WARN_ON_ONCE(!dev->use_fast_reg);
1326 1330
1327 if (state->sg_nents == 0) 1331 if (sg_nents == 0)
1328 return 0; 1332 return 0;
1329 1333
1330 if (state->sg_nents == 1 && target->global_mr) { 1334 if (sg_nents == 1 && target->global_mr) {
1331 srp_map_desc(state, sg_dma_address(state->sg), 1335 srp_map_desc(state, sg_dma_address(state->sg),
1332 sg_dma_len(state->sg), 1336 sg_dma_len(state->sg),
1333 target->global_mr->rkey); 1337 target->global_mr->rkey);
@@ -1341,8 +1345,7 @@ static int srp_map_finish_fr(struct srp_map_state *state,
1341 rkey = ib_inc_rkey(desc->mr->rkey); 1345 rkey = ib_inc_rkey(desc->mr->rkey);
1342 ib_update_fast_reg_key(desc->mr, rkey); 1346 ib_update_fast_reg_key(desc->mr, rkey);
1343 1347
1344 n = ib_map_mr_sg(desc->mr, state->sg, state->sg_nents, 1348 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, dev->mr_page_size);
1345 dev->mr_page_size);
1346 if (unlikely(n < 0)) 1349 if (unlikely(n < 0))
1347 return n; 1350 return n;
1348 1351
@@ -1448,16 +1451,15 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1448 state->fr.next = req->fr_list; 1451 state->fr.next = req->fr_list;
1449 state->fr.end = req->fr_list + ch->target->cmd_sg_cnt; 1452 state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1450 state->sg = scat; 1453 state->sg = scat;
1451 state->sg_nents = scsi_sg_count(req->scmnd);
1452 1454
1453 while (state->sg_nents) { 1455 while (count) {
1454 int i, n; 1456 int i, n;
1455 1457
1456 n = srp_map_finish_fr(state, ch); 1458 n = srp_map_finish_fr(state, ch, count);
1457 if (unlikely(n < 0)) 1459 if (unlikely(n < 0))
1458 return n; 1460 return n;
1459 1461
1460 state->sg_nents -= n; 1462 count -= n;
1461 for (i = 0; i < n; i++) 1463 for (i = 0; i < n; i++)
1462 state->sg = sg_next(state->sg); 1464 state->sg = sg_next(state->sg);
1463 } 1465 }
@@ -1517,10 +1519,12 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1517 1519
1518 if (dev->use_fast_reg) { 1520 if (dev->use_fast_reg) {
1519 state.sg = idb_sg; 1521 state.sg = idb_sg;
1520 state.sg_nents = 1;
1521 sg_set_buf(idb_sg, req->indirect_desc, idb_len); 1522 sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1522 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */ 1523 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1523 ret = srp_map_finish_fr(&state, ch); 1524#ifdef CONFIG_NEED_SG_DMA_LENGTH
1525 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1526#endif
1527 ret = srp_map_finish_fr(&state, ch, 1);
1524 if (ret < 0) 1528 if (ret < 0)
1525 return ret; 1529 return ret;
1526 } else if (dev->use_fmr) { 1530 } else if (dev->use_fmr) {
@@ -1655,7 +1659,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1655 return ret; 1659 return ret;
1656 req->nmdesc++; 1660 req->nmdesc++;
1657 } else { 1661 } else {
1658 idb_rkey = target->global_mr->rkey; 1662 idb_rkey = cpu_to_be32(target->global_mr->rkey);
1659 } 1663 }
1660 1664
1661 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); 1665 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 87a2a919dc43..f6af531f9f32 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -300,10 +300,7 @@ struct srp_map_state {
300 dma_addr_t base_dma_addr; 300 dma_addr_t base_dma_addr;
301 u32 dma_len; 301 u32 dma_len;
302 u32 total_len; 302 u32 total_len;
303 union { 303 unsigned int npages;
304 unsigned int npages;
305 int sg_nents;
306 };
307 unsigned int nmdesc; 304 unsigned int nmdesc;
308 unsigned int ndesc; 305 unsigned int ndesc;
309}; 306};
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index 932d07307454..da326090c2b0 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -592,6 +592,7 @@ static void db9_attach(struct parport *pp)
592 return; 592 return;
593 } 593 }
594 594
595 memset(&db9_parport_cb, 0, sizeof(db9_parport_cb));
595 db9_parport_cb.flags = PARPORT_FLAG_EXCL; 596 db9_parport_cb.flags = PARPORT_FLAG_EXCL;
596 597
597 pd = parport_register_dev_model(pp, "db9", &db9_parport_cb, port_idx); 598 pd = parport_register_dev_model(pp, "db9", &db9_parport_cb, port_idx);
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index 5a672dcac0d8..eae14d512353 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -951,6 +951,7 @@ static void gc_attach(struct parport *pp)
951 pads = gc_cfg[port_idx].args + 1; 951 pads = gc_cfg[port_idx].args + 1;
952 n_pads = gc_cfg[port_idx].nargs - 1; 952 n_pads = gc_cfg[port_idx].nargs - 1;
953 953
954 memset(&gc_parport_cb, 0, sizeof(gc_parport_cb));
954 gc_parport_cb.flags = PARPORT_FLAG_EXCL; 955 gc_parport_cb.flags = PARPORT_FLAG_EXCL;
955 956
956 pd = parport_register_dev_model(pp, "gamecon", &gc_parport_cb, 957 pd = parport_register_dev_model(pp, "gamecon", &gc_parport_cb,
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index 9f5bca26bd2f..77f575dd0901 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -181,6 +181,7 @@ static void tgfx_attach(struct parport *pp)
181 n_buttons = tgfx_cfg[port_idx].args + 1; 181 n_buttons = tgfx_cfg[port_idx].args + 1;
182 n_devs = tgfx_cfg[port_idx].nargs - 1; 182 n_devs = tgfx_cfg[port_idx].nargs - 1;
183 183
184 memset(&tgfx_parport_cb, 0, sizeof(tgfx_parport_cb));
184 tgfx_parport_cb.flags = PARPORT_FLAG_EXCL; 185 tgfx_parport_cb.flags = PARPORT_FLAG_EXCL;
185 186
186 pd = parport_register_dev_model(pp, "turbografx", &tgfx_parport_cb, 187 pd = parport_register_dev_model(pp, "turbografx", &tgfx_parport_cb,
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
index 9c07fe911075..70a893a17467 100644
--- a/drivers/input/joystick/walkera0701.c
+++ b/drivers/input/joystick/walkera0701.c
@@ -218,6 +218,7 @@ static void walkera0701_attach(struct parport *pp)
218 218
219 w->parport = pp; 219 w->parport = pp;
220 220
221 memset(&walkera0701_parport_cb, 0, sizeof(walkera0701_parport_cb));
221 walkera0701_parport_cb.flags = PARPORT_FLAG_EXCL; 222 walkera0701_parport_cb.flags = PARPORT_FLAG_EXCL;
222 walkera0701_parport_cb.irq_func = walkera0701_irq_handler; 223 walkera0701_parport_cb.irq_func = walkera0701_irq_handler;
223 walkera0701_parport_cb.private = w; 224 walkera0701_parport_cb.private = w;
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index 4bf678541496..d5994a745ffa 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -97,8 +97,7 @@ static void arizona_haptics_work(struct work_struct *work)
97 97
98 ret = regmap_update_bits(arizona->regmap, 98 ret = regmap_update_bits(arizona->regmap,
99 ARIZONA_HAPTICS_CONTROL_1, 99 ARIZONA_HAPTICS_CONTROL_1,
100 ARIZONA_HAP_CTRL_MASK, 100 ARIZONA_HAP_CTRL_MASK, 0);
101 1 << ARIZONA_HAP_CTRL_SHIFT);
102 if (ret != 0) { 101 if (ret != 0) {
103 dev_err(arizona->dev, "Failed to stop haptics: %d\n", 102 dev_err(arizona->dev, "Failed to stop haptics: %d\n",
104 ret); 103 ret);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 5e1665bbaa0b..2f589857a039 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -41,6 +41,7 @@
41 41
42#define DRIVER_NAME "elan_i2c" 42#define DRIVER_NAME "elan_i2c"
43#define ELAN_DRIVER_VERSION "1.6.1" 43#define ELAN_DRIVER_VERSION "1.6.1"
44#define ELAN_VENDOR_ID 0x04f3
44#define ETP_MAX_PRESSURE 255 45#define ETP_MAX_PRESSURE 255
45#define ETP_FWIDTH_REDUCE 90 46#define ETP_FWIDTH_REDUCE 90
46#define ETP_FINGER_WIDTH 15 47#define ETP_FINGER_WIDTH 15
@@ -914,6 +915,8 @@ static int elan_setup_input_device(struct elan_tp_data *data)
914 915
915 input->name = "Elan Touchpad"; 916 input->name = "Elan Touchpad";
916 input->id.bustype = BUS_I2C; 917 input->id.bustype = BUS_I2C;
918 input->id.vendor = ELAN_VENDOR_ID;
919 input->id.product = data->product_id;
917 input_set_drvdata(input, data); 920 input_set_drvdata(input, data);
918 921
919 error = input_mt_init_slots(input, ETP_MAX_FINGERS, 922 error = input_mt_init_slots(input, ETP_MAX_FINGERS,
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
index 92c31b8f8fb4..1edfac78d4ac 100644
--- a/drivers/input/serio/parkbd.c
+++ b/drivers/input/serio/parkbd.c
@@ -145,6 +145,7 @@ static int parkbd_getport(struct parport *pp)
145{ 145{
146 struct pardev_cb parkbd_parport_cb; 146 struct pardev_cb parkbd_parport_cb;
147 147
148 memset(&parkbd_parport_cb, 0, sizeof(parkbd_parport_cb));
148 parkbd_parport_cb.irq_func = parkbd_interrupt; 149 parkbd_parport_cb.irq_func = parkbd_interrupt;
149 parkbd_parport_cb.flags = PARPORT_FLAG_EXCL; 150 parkbd_parport_cb.flags = PARPORT_FLAG_EXCL;
150 151
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index e7f966da6efa..78ca44840d60 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1819,6 +1819,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
1819 input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0); 1819 input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0);
1820 input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0); 1820 input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
1821 1821
1822 /* Verify that a device really has an endpoint */
1823 if (intf->altsetting[0].desc.bNumEndpoints < 1) {
1824 dev_err(&intf->dev,
1825 "interface has %d endpoints, but must have minimum 1\n",
1826 intf->altsetting[0].desc.bNumEndpoints);
1827 err = -EINVAL;
1828 goto fail3;
1829 }
1822 endpoint = &intf->altsetting[0].endpoint[0].desc; 1830 endpoint = &intf->altsetting[0].endpoint[0].desc;
1823 1831
1824 /* Go set up our URB, which is called when the tablet receives 1832 /* Go set up our URB, which is called when the tablet receives
@@ -1861,6 +1869,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
1861 if (i == ARRAY_SIZE(speeds)) { 1869 if (i == ARRAY_SIZE(speeds)) {
1862 dev_info(&intf->dev, 1870 dev_info(&intf->dev,
1863 "Aiptek tried all speeds, no sane response\n"); 1871 "Aiptek tried all speeds, no sane response\n");
1872 err = -EINVAL;
1864 goto fail3; 1873 goto fail3;
1865 } 1874 }
1866 1875
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index c5622058c22b..2d5794ec338b 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -2487,6 +2487,31 @@ static struct mxt_acpi_platform_data samus_platform_data[] = {
2487 { } 2487 { }
2488}; 2488};
2489 2489
2490static unsigned int chromebook_tp_buttons[] = {
2491 KEY_RESERVED,
2492 KEY_RESERVED,
2493 KEY_RESERVED,
2494 KEY_RESERVED,
2495 KEY_RESERVED,
2496 BTN_LEFT
2497};
2498
2499static struct mxt_acpi_platform_data chromebook_platform_data[] = {
2500 {
2501 /* Touchpad */
2502 .hid = "ATML0000",
2503 .pdata = {
2504 .t19_num_keys = ARRAY_SIZE(chromebook_tp_buttons),
2505 .t19_keymap = chromebook_tp_buttons,
2506 },
2507 },
2508 {
2509 /* Touchscreen */
2510 .hid = "ATML0001",
2511 },
2512 { }
2513};
2514
2490static const struct dmi_system_id mxt_dmi_table[] = { 2515static const struct dmi_system_id mxt_dmi_table[] = {
2491 { 2516 {
2492 /* 2015 Google Pixel */ 2517 /* 2015 Google Pixel */
@@ -2497,6 +2522,14 @@ static const struct dmi_system_id mxt_dmi_table[] = {
2497 }, 2522 },
2498 .driver_data = samus_platform_data, 2523 .driver_data = samus_platform_data,
2499 }, 2524 },
2525 {
2526 /* Other Google Chromebooks */
2527 .ident = "Chromebook",
2528 .matches = {
2529 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
2530 },
2531 .driver_data = chromebook_platform_data,
2532 },
2500 { } 2533 { }
2501}; 2534};
2502 2535
@@ -2701,6 +2734,7 @@ static const struct i2c_device_id mxt_id[] = {
2701 { "qt602240_ts", 0 }, 2734 { "qt602240_ts", 0 },
2702 { "atmel_mxt_ts", 0 }, 2735 { "atmel_mxt_ts", 0 },
2703 { "atmel_mxt_tp", 0 }, 2736 { "atmel_mxt_tp", 0 },
2737 { "maxtouch", 0 },
2704 { "mXT224", 0 }, 2738 { "mXT224", 0 },
2705 { } 2739 { }
2706}; 2740};
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 17cc20ef4923..ac09855fa435 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -1316,7 +1316,13 @@ static int __maybe_unused elants_i2c_suspend(struct device *dev)
1316 1316
1317 disable_irq(client->irq); 1317 disable_irq(client->irq);
1318 1318
1319 if (device_may_wakeup(dev) || ts->keep_power_in_suspend) { 1319 if (device_may_wakeup(dev)) {
1320 /*
1321 * The device will automatically enter idle mode
1322 * that has reduced power consumption.
1323 */
1324 ts->wake_irq_enabled = (enable_irq_wake(client->irq) == 0);
1325 } else if (ts->keep_power_in_suspend) {
1320 for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) { 1326 for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
1321 error = elants_i2c_send(client, set_sleep_cmd, 1327 error = elants_i2c_send(client, set_sleep_cmd,
1322 sizeof(set_sleep_cmd)); 1328 sizeof(set_sleep_cmd));
@@ -1326,10 +1332,6 @@ static int __maybe_unused elants_i2c_suspend(struct device *dev)
1326 dev_err(&client->dev, 1332 dev_err(&client->dev,
1327 "suspend command failed: %d\n", error); 1333 "suspend command failed: %d\n", error);
1328 } 1334 }
1329
1330 if (device_may_wakeup(dev))
1331 ts->wake_irq_enabled =
1332 (enable_irq_wake(client->irq) == 0);
1333 } else { 1335 } else {
1334 elants_i2c_power_off(ts); 1336 elants_i2c_power_off(ts);
1335 } 1337 }
@@ -1345,10 +1347,11 @@ static int __maybe_unused elants_i2c_resume(struct device *dev)
1345 int retry_cnt; 1347 int retry_cnt;
1346 int error; 1348 int error;
1347 1349
1348 if (device_may_wakeup(dev) && ts->wake_irq_enabled) 1350 if (device_may_wakeup(dev)) {
1349 disable_irq_wake(client->irq); 1351 if (ts->wake_irq_enabled)
1350 1352 disable_irq_wake(client->irq);
1351 if (ts->keep_power_in_suspend) { 1353 elants_i2c_sw_reset(client);
1354 } else if (ts->keep_power_in_suspend) {
1352 for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) { 1355 for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
1353 error = elants_i2c_send(client, set_active_cmd, 1356 error = elants_i2c_send(client, set_active_cmd,
1354 sizeof(set_active_cmd)); 1357 sizeof(set_active_cmd));
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index d21d4edf7236..7caf2fa237f2 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -494,6 +494,22 @@ static void handle_fault_error(struct fault *fault)
494 } 494 }
495} 495}
496 496
497static bool access_error(struct vm_area_struct *vma, struct fault *fault)
498{
499 unsigned long requested = 0;
500
501 if (fault->flags & PPR_FAULT_EXEC)
502 requested |= VM_EXEC;
503
504 if (fault->flags & PPR_FAULT_READ)
505 requested |= VM_READ;
506
507 if (fault->flags & PPR_FAULT_WRITE)
508 requested |= VM_WRITE;
509
510 return (requested & ~vma->vm_flags) != 0;
511}
512
497static void do_fault(struct work_struct *work) 513static void do_fault(struct work_struct *work)
498{ 514{
499 struct fault *fault = container_of(work, struct fault, work); 515 struct fault *fault = container_of(work, struct fault, work);
@@ -516,8 +532,8 @@ static void do_fault(struct work_struct *work)
516 goto out; 532 goto out;
517 } 533 }
518 534
519 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) { 535 /* Check if we have the right permissions on the vma */
520 /* handle_mm_fault would BUG_ON() */ 536 if (access_error(vma, fault)) {
521 up_read(&mm->mmap_sem); 537 up_read(&mm->mmap_sem);
522 handle_fault_error(fault); 538 handle_fault_error(fault);
523 goto out; 539 goto out;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f1042daef9ad..ac7387686ddc 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2159,7 +2159,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2159 sg_res = aligned_nrpages(sg->offset, sg->length); 2159 sg_res = aligned_nrpages(sg->offset, sg->length);
2160 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; 2160 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2161 sg->dma_length = sg->length; 2161 sg->dma_length = sg->length;
2162 pteval = (sg_phys(sg) & PAGE_MASK) | prot; 2162 pteval = page_to_phys(sg_page(sg)) | prot;
2163 phys_pfn = pteval >> VTD_PAGE_SHIFT; 2163 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2164 } 2164 }
2165 2165
@@ -3704,7 +3704,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
3704 3704
3705 for_each_sg(sglist, sg, nelems, i) { 3705 for_each_sg(sglist, sg, nelems, i) {
3706 BUG_ON(!sg_page(sg)); 3706 BUG_ON(!sg_page(sg));
3707 sg->dma_address = sg_phys(sg); 3707 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3708 sg->dma_length = sg->length; 3708 sg->dma_length = sg->length;
3709 } 3709 }
3710 return nelems; 3710 return nelems;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index c69e3f9ec958..50464833d0b8 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -484,6 +484,23 @@ struct page_req_dsc {
484}; 484};
485 485
486#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10) 486#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
487
488static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
489{
490 unsigned long requested = 0;
491
492 if (req->exe_req)
493 requested |= VM_EXEC;
494
495 if (req->rd_req)
496 requested |= VM_READ;
497
498 if (req->wr_req)
499 requested |= VM_WRITE;
500
501 return (requested & ~vma->vm_flags) != 0;
502}
503
487static irqreturn_t prq_event_thread(int irq, void *d) 504static irqreturn_t prq_event_thread(int irq, void *d)
488{ 505{
489 struct intel_iommu *iommu = d; 506 struct intel_iommu *iommu = d;
@@ -539,6 +556,9 @@ static irqreturn_t prq_event_thread(int irq, void *d)
539 if (!vma || address < vma->vm_start) 556 if (!vma || address < vma->vm_start)
540 goto invalid; 557 goto invalid;
541 558
559 if (access_error(vma, req))
560 goto invalid;
561
542 ret = handle_mm_fault(svm->mm, vma, address, 562 ret = handle_mm_fault(svm->mm, vma, address,
543 req->wr_req ? FAULT_FLAG_WRITE : 0); 563 req->wr_req ? FAULT_FLAG_WRITE : 0);
544 if (ret & VM_FAULT_ERROR) 564 if (ret & VM_FAULT_ERROR)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index abae363c7b9b..0e3b0092ec92 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1430,7 +1430,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1430 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); 1430 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
1431 1431
1432 for_each_sg(sg, s, nents, i) { 1432 for_each_sg(sg, s, nents, i) {
1433 phys_addr_t phys = sg_phys(s); 1433 phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
1434 1434
1435 /* 1435 /*
1436 * We are mapping on IOMMU page boundaries, so offset within 1436 * We are mapping on IOMMU page boundaries, so offset within
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 598ab3f0e0ac..cadf104e3074 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -210,7 +210,12 @@ int __init fpga_irq_of_init(struct device_node *node,
210 parent_irq = -1; 210 parent_irq = -1;
211 } 211 }
212 212
213#ifdef CONFIG_ARCH_VERSATILE
214 fpga_irq_init(base, node->name, IRQ_SIC_START, parent_irq, valid_mask,
215 node);
216#else
213 fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node); 217 fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
218#endif
214 219
215 writel(clear_mask, base + IRQ_ENABLE_CLEAR); 220 writel(clear_mask, base + IRQ_ENABLE_CLEAR);
216 writel(clear_mask, base + FIQ_ENABLE_CLEAR); 221 writel(clear_mask, base + FIQ_ENABLE_CLEAR);
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 375be509e95f..2a506fe0c8a4 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -67,8 +67,7 @@ static int write_modem(struct cardstate *cs)
67 struct sk_buff *skb = bcs->tx_skb; 67 struct sk_buff *skb = bcs->tx_skb;
68 int sent = -EOPNOTSUPP; 68 int sent = -EOPNOTSUPP;
69 69
70 if (!tty || !tty->driver || !skb) 70 WARN_ON(!tty || !tty->ops || !skb);
71 return -EINVAL;
72 71
73 if (!skb->len) { 72 if (!skb->len) {
74 dev_kfree_skb_any(skb); 73 dev_kfree_skb_any(skb);
@@ -109,8 +108,7 @@ static int send_cb(struct cardstate *cs)
109 unsigned long flags; 108 unsigned long flags;
110 int sent = 0; 109 int sent = 0;
111 110
112 if (!tty || !tty->driver) 111 WARN_ON(!tty || !tty->ops);
113 return -EFAULT;
114 112
115 cb = cs->cmdbuf; 113 cb = cs->cmdbuf;
116 if (!cb) 114 if (!cb)
@@ -370,19 +368,18 @@ static void gigaset_freecshw(struct cardstate *cs)
370 tasklet_kill(&cs->write_tasklet); 368 tasklet_kill(&cs->write_tasklet);
371 if (!cs->hw.ser) 369 if (!cs->hw.ser)
372 return; 370 return;
373 dev_set_drvdata(&cs->hw.ser->dev.dev, NULL);
374 platform_device_unregister(&cs->hw.ser->dev); 371 platform_device_unregister(&cs->hw.ser->dev);
375 kfree(cs->hw.ser);
376 cs->hw.ser = NULL;
377} 372}
378 373
379static void gigaset_device_release(struct device *dev) 374static void gigaset_device_release(struct device *dev)
380{ 375{
381 struct platform_device *pdev = to_platform_device(dev); 376 struct cardstate *cs = dev_get_drvdata(dev);
382 377
383 /* adapted from platform_device_release() in drivers/base/platform.c */ 378 if (!cs)
384 kfree(dev->platform_data); 379 return;
385 kfree(pdev->resource); 380 dev_set_drvdata(dev, NULL);
381 kfree(cs->hw.ser);
382 cs->hw.ser = NULL;
386} 383}
387 384
388/* 385/*
@@ -432,7 +429,9 @@ static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
432 struct tty_struct *tty = cs->hw.ser->tty; 429 struct tty_struct *tty = cs->hw.ser->tty;
433 unsigned int set, clear; 430 unsigned int set, clear;
434 431
435 if (!tty || !tty->driver || !tty->ops->tiocmset) 432 WARN_ON(!tty || !tty->ops);
433 /* tiocmset is an optional tty driver method */
434 if (!tty->ops->tiocmset)
436 return -EINVAL; 435 return -EINVAL;
437 set = new_state & ~old_state; 436 set = new_state & ~old_state;
438 clear = old_state & ~new_state; 437 clear = old_state & ~new_state;
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index a77eea594b69..cb428b9ee441 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -1170,7 +1170,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
1170 1170
1171 if (ipac->type & IPAC_TYPE_IPACX) { 1171 if (ipac->type & IPAC_TYPE_IPACX) {
1172 ista = ReadIPAC(ipac, ISACX_ISTA); 1172 ista = ReadIPAC(ipac, ISACX_ISTA);
1173 while (ista && cnt--) { 1173 while (ista && --cnt) {
1174 pr_debug("%s: ISTA %02x\n", ipac->name, ista); 1174 pr_debug("%s: ISTA %02x\n", ipac->name, ista);
1175 if (ista & IPACX__ICA) 1175 if (ista & IPACX__ICA)
1176 ipac_irq(&ipac->hscx[0], ista); 1176 ipac_irq(&ipac->hscx[0], ista);
@@ -1182,7 +1182,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
1182 } 1182 }
1183 } else if (ipac->type & IPAC_TYPE_IPAC) { 1183 } else if (ipac->type & IPAC_TYPE_IPAC) {
1184 ista = ReadIPAC(ipac, IPAC_ISTA); 1184 ista = ReadIPAC(ipac, IPAC_ISTA);
1185 while (ista && cnt--) { 1185 while (ista && --cnt) {
1186 pr_debug("%s: ISTA %02x\n", ipac->name, ista); 1186 pr_debug("%s: ISTA %02x\n", ipac->name, ista);
1187 if (ista & (IPAC__ICD | IPAC__EXD)) { 1187 if (ista & (IPAC__ICD | IPAC__EXD)) {
1188 istad = ReadISAC(isac, ISAC_ISTA); 1188 istad = ReadISAC(isac, ISAC_ISTA);
@@ -1200,7 +1200,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
1200 ista = ReadIPAC(ipac, IPAC_ISTA); 1200 ista = ReadIPAC(ipac, IPAC_ISTA);
1201 } 1201 }
1202 } else if (ipac->type & IPAC_TYPE_HSCX) { 1202 } else if (ipac->type & IPAC_TYPE_HSCX) {
1203 while (cnt) { 1203 while (--cnt) {
1204 ista = ReadIPAC(ipac, IPAC_ISTAB + ipac->hscx[1].off); 1204 ista = ReadIPAC(ipac, IPAC_ISTAB + ipac->hscx[1].off);
1205 pr_debug("%s: B2 ISTA %02x\n", ipac->name, ista); 1205 pr_debug("%s: B2 ISTA %02x\n", ipac->name, ista);
1206 if (ista) 1206 if (ista)
@@ -1211,7 +1211,6 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
1211 mISDNisac_irq(isac, istad); 1211 mISDNisac_irq(isac, istad);
1212 if (0 == (ista | istad)) 1212 if (0 == (ista | istad))
1213 break; 1213 break;
1214 cnt--;
1215 } 1214 }
1216 } 1215 }
1217 if (cnt > maxloop) /* only for ISAC/HSCX without PCI IRQ test */ 1216 if (cnt > maxloop) /* only for ISAC/HSCX without PCI IRQ test */
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index b33f53b3ca93..bf04d2a3cf4a 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1896,7 +1896,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
1896 ptr--; 1896 ptr--;
1897 *ptr++ = '\n'; 1897 *ptr++ = '\n';
1898 *ptr = 0; 1898 *ptr = 0;
1899 HiSax_putstatus(cs, NULL, "%s", cs->dlog); 1899 HiSax_putstatus(cs, NULL, cs->dlog);
1900 } else 1900 } else
1901 HiSax_putstatus(cs, "LogEcho: ", 1901 HiSax_putstatus(cs, "LogEcho: ",
1902 "warning Frame too big (%d)", 1902 "warning Frame too big (%d)",
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 4a4825528188..90449e1e91e5 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -901,7 +901,7 @@ Begin:
901 ptr--; 901 ptr--;
902 *ptr++ = '\n'; 902 *ptr++ = '\n';
903 *ptr = 0; 903 *ptr = 0;
904 HiSax_putstatus(cs, NULL, "%s", cs->dlog); 904 HiSax_putstatus(cs, NULL, cs->dlog);
905 } else 905 } else
906 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3); 906 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3);
907 } 907 }
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index b1fad81f0722..13b2151c10f5 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -674,7 +674,7 @@ receive_emsg(struct IsdnCardState *cs)
674 ptr--; 674 ptr--;
675 *ptr++ = '\n'; 675 *ptr++ = '\n';
676 *ptr = 0; 676 *ptr = 0;
677 HiSax_putstatus(cs, NULL, "%s", cs->dlog); 677 HiSax_putstatus(cs, NULL, cs->dlog);
678 } else 678 } else
679 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len); 679 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len);
680 } 680 }
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index b420f8bd862e..ba4beb25d872 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -1179,7 +1179,7 @@ LogFrame(struct IsdnCardState *cs, u_char *buf, int size)
1179 dp--; 1179 dp--;
1180 *dp++ = '\n'; 1180 *dp++ = '\n';
1181 *dp = 0; 1181 *dp = 0;
1182 HiSax_putstatus(cs, NULL, "%s", cs->dlog); 1182 HiSax_putstatus(cs, NULL, cs->dlog);
1183 } else 1183 } else
1184 HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size); 1184 HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size);
1185} 1185}
@@ -1246,7 +1246,7 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
1246 } 1246 }
1247 if (finish) { 1247 if (finish) {
1248 *dp = 0; 1248 *dp = 0;
1249 HiSax_putstatus(cs, NULL, "%s", cs->dlog); 1249 HiSax_putstatus(cs, NULL, cs->dlog);
1250 return; 1250 return;
1251 } 1251 }
1252 if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */ 1252 if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */
@@ -1509,5 +1509,5 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
1509 dp += sprintf(dp, "Unknown protocol %x!", buf[0]); 1509 dp += sprintf(dp, "Unknown protocol %x!", buf[0]);
1510 } 1510 }
1511 *dp = 0; 1511 *dp = 0;
1512 HiSax_putstatus(cs, NULL, "%s", cs->dlog); 1512 HiSax_putstatus(cs, NULL, cs->dlog);
1513} 1513}
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index a16bf56d3f28..85a339030e4b 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -18,6 +18,7 @@ if NVM
18 18
19config NVM_DEBUG 19config NVM_DEBUG
20 bool "Open-Channel SSD debugging support" 20 bool "Open-Channel SSD debugging support"
21 default n
21 ---help--- 22 ---help---
22 Exposes a debug management interface to create/remove targets at: 23 Exposes a debug management interface to create/remove targets at:
23 24
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index f659e605a406..8f41b245cd55 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(nvm_unregister_target);
74void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags, 74void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
75 dma_addr_t *dma_handler) 75 dma_addr_t *dma_handler)
76{ 76{
77 return dev->ops->dev_dma_alloc(dev->q, dev->ppalist_pool, mem_flags, 77 return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags,
78 dma_handler); 78 dma_handler);
79} 79}
80EXPORT_SYMBOL(nvm_dev_dma_alloc); 80EXPORT_SYMBOL(nvm_dev_dma_alloc);
@@ -97,15 +97,47 @@ static struct nvmm_type *nvm_find_mgr_type(const char *name)
97 return NULL; 97 return NULL;
98} 98}
99 99
100struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
101{
102 struct nvmm_type *mt;
103 int ret;
104
105 lockdep_assert_held(&nvm_lock);
106
107 list_for_each_entry(mt, &nvm_mgrs, list) {
108 ret = mt->register_mgr(dev);
109 if (ret < 0) {
110 pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
111 ret, dev->name);
112 return NULL; /* initialization failed */
113 } else if (ret > 0)
114 return mt;
115 }
116
117 return NULL;
118}
119
100int nvm_register_mgr(struct nvmm_type *mt) 120int nvm_register_mgr(struct nvmm_type *mt)
101{ 121{
122 struct nvm_dev *dev;
102 int ret = 0; 123 int ret = 0;
103 124
104 down_write(&nvm_lock); 125 down_write(&nvm_lock);
105 if (nvm_find_mgr_type(mt->name)) 126 if (nvm_find_mgr_type(mt->name)) {
106 ret = -EEXIST; 127 ret = -EEXIST;
107 else 128 goto finish;
129 } else {
108 list_add(&mt->list, &nvm_mgrs); 130 list_add(&mt->list, &nvm_mgrs);
131 }
132
133 /* try to register media mgr if any device have none configured */
134 list_for_each_entry(dev, &nvm_devices, devices) {
135 if (dev->mt)
136 continue;
137
138 dev->mt = nvm_init_mgr(dev);
139 }
140finish:
109 up_write(&nvm_lock); 141 up_write(&nvm_lock);
110 142
111 return ret; 143 return ret;
@@ -160,11 +192,6 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
160} 192}
161EXPORT_SYMBOL(nvm_erase_blk); 193EXPORT_SYMBOL(nvm_erase_blk);
162 194
163static void nvm_core_free(struct nvm_dev *dev)
164{
165 kfree(dev);
166}
167
168static int nvm_core_init(struct nvm_dev *dev) 195static int nvm_core_init(struct nvm_dev *dev)
169{ 196{
170 struct nvm_id *id = &dev->identity; 197 struct nvm_id *id = &dev->identity;
@@ -179,12 +206,21 @@ static int nvm_core_init(struct nvm_dev *dev)
179 dev->sec_size = grp->csecs; 206 dev->sec_size = grp->csecs;
180 dev->oob_size = grp->sos; 207 dev->oob_size = grp->sos;
181 dev->sec_per_pg = grp->fpg_sz / grp->csecs; 208 dev->sec_per_pg = grp->fpg_sz / grp->csecs;
182 dev->addr_mode = id->ppat; 209 memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
183 dev->addr_format = id->ppaf;
184 210
185 dev->plane_mode = NVM_PLANE_SINGLE; 211 dev->plane_mode = NVM_PLANE_SINGLE;
186 dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size; 212 dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
187 213
214 if (grp->mtype != 0) {
215 pr_err("nvm: memory type not supported\n");
216 return -EINVAL;
217 }
218
219 if (grp->fmtype != 0 && grp->fmtype != 1) {
220 pr_err("nvm: flash type not supported\n");
221 return -EINVAL;
222 }
223
188 if (grp->mpos & 0x020202) 224 if (grp->mpos & 0x020202)
189 dev->plane_mode = NVM_PLANE_DOUBLE; 225 dev->plane_mode = NVM_PLANE_DOUBLE;
190 if (grp->mpos & 0x040404) 226 if (grp->mpos & 0x040404)
@@ -213,21 +249,17 @@ static void nvm_free(struct nvm_dev *dev)
213 249
214 if (dev->mt) 250 if (dev->mt)
215 dev->mt->unregister_mgr(dev); 251 dev->mt->unregister_mgr(dev);
216
217 nvm_core_free(dev);
218} 252}
219 253
220static int nvm_init(struct nvm_dev *dev) 254static int nvm_init(struct nvm_dev *dev)
221{ 255{
222 struct nvmm_type *mt; 256 int ret = -EINVAL;
223 int ret = 0;
224 257
225 if (!dev->q || !dev->ops) 258 if (!dev->q || !dev->ops)
226 return -EINVAL; 259 return ret;
227 260
228 if (dev->ops->identity(dev->q, &dev->identity)) { 261 if (dev->ops->identity(dev, &dev->identity)) {
229 pr_err("nvm: device could not be identified\n"); 262 pr_err("nvm: device could not be identified\n");
230 ret = -EINVAL;
231 goto err; 263 goto err;
232 } 264 }
233 265
@@ -251,29 +283,12 @@ static int nvm_init(struct nvm_dev *dev)
251 goto err; 283 goto err;
252 } 284 }
253 285
254 /* register with device with a supported manager */
255 list_for_each_entry(mt, &nvm_mgrs, list) {
256 ret = mt->register_mgr(dev);
257 if (ret < 0)
258 goto err; /* initialization failed */
259 if (ret > 0) {
260 dev->mt = mt;
261 break; /* successfully initialized */
262 }
263 }
264
265 if (!ret) {
266 pr_info("nvm: no compatible manager found.\n");
267 return 0;
268 }
269
270 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n", 286 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
271 dev->name, dev->sec_per_pg, dev->nr_planes, 287 dev->name, dev->sec_per_pg, dev->nr_planes,
272 dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns, 288 dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
273 dev->nr_chnls); 289 dev->nr_chnls);
274 return 0; 290 return 0;
275err: 291err:
276 nvm_free(dev);
277 pr_err("nvm: failed to initialize nvm\n"); 292 pr_err("nvm: failed to initialize nvm\n");
278 return ret; 293 return ret;
279} 294}
@@ -308,22 +323,27 @@ int nvm_register(struct request_queue *q, char *disk_name,
308 if (ret) 323 if (ret)
309 goto err_init; 324 goto err_init;
310 325
311 down_write(&nvm_lock); 326 if (dev->ops->max_phys_sect > 256) {
312 list_add(&dev->devices, &nvm_devices); 327 pr_info("nvm: max sectors supported is 256.\n");
313 up_write(&nvm_lock); 328 ret = -EINVAL;
329 goto err_init;
330 }
314 331
315 if (dev->ops->max_phys_sect > 1) { 332 if (dev->ops->max_phys_sect > 1) {
316 dev->ppalist_pool = dev->ops->create_dma_pool(dev->q, 333 dev->ppalist_pool = dev->ops->create_dma_pool(dev, "ppalist");
317 "ppalist");
318 if (!dev->ppalist_pool) { 334 if (!dev->ppalist_pool) {
319 pr_err("nvm: could not create ppa pool\n"); 335 pr_err("nvm: could not create ppa pool\n");
320 return -ENOMEM; 336 ret = -ENOMEM;
337 goto err_init;
321 } 338 }
322 } else if (dev->ops->max_phys_sect > 256) {
323 pr_info("nvm: max sectors supported is 256.\n");
324 return -EINVAL;
325 } 339 }
326 340
341 /* register device with a supported media manager */
342 down_write(&nvm_lock);
343 dev->mt = nvm_init_mgr(dev);
344 list_add(&dev->devices, &nvm_devices);
345 up_write(&nvm_lock);
346
327 return 0; 347 return 0;
328err_init: 348err_init:
329 kfree(dev); 349 kfree(dev);
@@ -333,19 +353,22 @@ EXPORT_SYMBOL(nvm_register);
333 353
334void nvm_unregister(char *disk_name) 354void nvm_unregister(char *disk_name)
335{ 355{
336 struct nvm_dev *dev = nvm_find_nvm_dev(disk_name); 356 struct nvm_dev *dev;
337 357
358 down_write(&nvm_lock);
359 dev = nvm_find_nvm_dev(disk_name);
338 if (!dev) { 360 if (!dev) {
339 pr_err("nvm: could not find device %s to unregister\n", 361 pr_err("nvm: could not find device %s to unregister\n",
340 disk_name); 362 disk_name);
363 up_write(&nvm_lock);
341 return; 364 return;
342 } 365 }
343 366
344 nvm_exit(dev);
345
346 down_write(&nvm_lock);
347 list_del(&dev->devices); 367 list_del(&dev->devices);
348 up_write(&nvm_lock); 368 up_write(&nvm_lock);
369
370 nvm_exit(dev);
371 kfree(dev);
349} 372}
350EXPORT_SYMBOL(nvm_unregister); 373EXPORT_SYMBOL(nvm_unregister);
351 374
@@ -358,38 +381,24 @@ static int nvm_create_target(struct nvm_dev *dev,
358{ 381{
359 struct nvm_ioctl_create_simple *s = &create->conf.s; 382 struct nvm_ioctl_create_simple *s = &create->conf.s;
360 struct request_queue *tqueue; 383 struct request_queue *tqueue;
361 struct nvmm_type *mt;
362 struct gendisk *tdisk; 384 struct gendisk *tdisk;
363 struct nvm_tgt_type *tt; 385 struct nvm_tgt_type *tt;
364 struct nvm_target *t; 386 struct nvm_target *t;
365 void *targetdata; 387 void *targetdata;
366 int ret = 0;
367 388
368 if (!dev->mt) { 389 if (!dev->mt) {
369 /* register with device with a supported NVM manager */ 390 pr_info("nvm: device has no media manager registered.\n");
370 list_for_each_entry(mt, &nvm_mgrs, list) { 391 return -ENODEV;
371 ret = mt->register_mgr(dev);
372 if (ret < 0)
373 return ret; /* initialization failed */
374 if (ret > 0) {
375 dev->mt = mt;
376 break; /* successfully initialized */
377 }
378 }
379
380 if (!ret) {
381 pr_info("nvm: no compatible nvm manager found.\n");
382 return -ENODEV;
383 }
384 } 392 }
385 393
394 down_write(&nvm_lock);
386 tt = nvm_find_target_type(create->tgttype); 395 tt = nvm_find_target_type(create->tgttype);
387 if (!tt) { 396 if (!tt) {
388 pr_err("nvm: target type %s not found\n", create->tgttype); 397 pr_err("nvm: target type %s not found\n", create->tgttype);
398 up_write(&nvm_lock);
389 return -EINVAL; 399 return -EINVAL;
390 } 400 }
391 401
392 down_write(&nvm_lock);
393 list_for_each_entry(t, &dev->online_targets, list) { 402 list_for_each_entry(t, &dev->online_targets, list) {
394 if (!strcmp(create->tgtname, t->disk->disk_name)) { 403 if (!strcmp(create->tgtname, t->disk->disk_name)) {
395 pr_err("nvm: target name already exists.\n"); 404 pr_err("nvm: target name already exists.\n");
@@ -457,11 +466,11 @@ static void nvm_remove_target(struct nvm_target *t)
457 lockdep_assert_held(&nvm_lock); 466 lockdep_assert_held(&nvm_lock);
458 467
459 del_gendisk(tdisk); 468 del_gendisk(tdisk);
469 blk_cleanup_queue(q);
470
460 if (tt->exit) 471 if (tt->exit)
461 tt->exit(tdisk->private_data); 472 tt->exit(tdisk->private_data);
462 473
463 blk_cleanup_queue(q);
464
465 put_disk(tdisk); 474 put_disk(tdisk);
466 475
467 list_del(&t->list); 476 list_del(&t->list);
@@ -473,7 +482,9 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
473 struct nvm_dev *dev; 482 struct nvm_dev *dev;
474 struct nvm_ioctl_create_simple *s; 483 struct nvm_ioctl_create_simple *s;
475 484
485 down_write(&nvm_lock);
476 dev = nvm_find_nvm_dev(create->dev); 486 dev = nvm_find_nvm_dev(create->dev);
487 up_write(&nvm_lock);
477 if (!dev) { 488 if (!dev) {
478 pr_err("nvm: device not found\n"); 489 pr_err("nvm: device not found\n");
479 return -EINVAL; 490 return -EINVAL;
@@ -532,7 +543,9 @@ static int nvm_configure_show(const char *val)
532 return -EINVAL; 543 return -EINVAL;
533 } 544 }
534 545
546 down_write(&nvm_lock);
535 dev = nvm_find_nvm_dev(devname); 547 dev = nvm_find_nvm_dev(devname);
548 up_write(&nvm_lock);
536 if (!dev) { 549 if (!dev) {
537 pr_err("nvm: device not found\n"); 550 pr_err("nvm: device not found\n");
538 return -EINVAL; 551 return -EINVAL;
@@ -541,7 +554,7 @@ static int nvm_configure_show(const char *val)
541 if (!dev->mt) 554 if (!dev->mt)
542 return 0; 555 return 0;
543 556
544 dev->mt->free_blocks_print(dev); 557 dev->mt->lun_info_print(dev);
545 558
546 return 0; 559 return 0;
547} 560}
@@ -677,8 +690,10 @@ static long nvm_ioctl_info(struct file *file, void __user *arg)
677 info->tgtsize = tgt_iter; 690 info->tgtsize = tgt_iter;
678 up_write(&nvm_lock); 691 up_write(&nvm_lock);
679 692
680 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) 693 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
694 kfree(info);
681 return -EFAULT; 695 return -EFAULT;
696 }
682 697
683 kfree(info); 698 kfree(info);
684 return 0; 699 return 0;
@@ -721,8 +736,11 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
721 736
722 devices->nr_devices = i; 737 devices->nr_devices = i;
723 738
724 if (copy_to_user(arg, devices, sizeof(struct nvm_ioctl_get_devices))) 739 if (copy_to_user(arg, devices,
740 sizeof(struct nvm_ioctl_get_devices))) {
741 kfree(devices);
725 return -EFAULT; 742 return -EFAULT;
743 }
726 744
727 kfree(devices); 745 kfree(devices);
728 return 0; 746 return 0;
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index ae1fb2bdc5f4..a54b339951a3 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -60,23 +60,27 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
60 lun->vlun.lun_id = i % dev->luns_per_chnl; 60 lun->vlun.lun_id = i % dev->luns_per_chnl;
61 lun->vlun.chnl_id = i / dev->luns_per_chnl; 61 lun->vlun.chnl_id = i / dev->luns_per_chnl;
62 lun->vlun.nr_free_blocks = dev->blks_per_lun; 62 lun->vlun.nr_free_blocks = dev->blks_per_lun;
63 lun->vlun.nr_inuse_blocks = 0;
64 lun->vlun.nr_bad_blocks = 0;
63 } 65 }
64 return 0; 66 return 0;
65} 67}
66 68
67static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks, 69static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
68 void *private) 70 void *private)
69{ 71{
70 struct gen_nvm *gn = private; 72 struct gen_nvm *gn = private;
71 struct gen_lun *lun = &gn->luns[lun_id]; 73 struct nvm_dev *dev = gn->dev;
74 struct gen_lun *lun;
72 struct nvm_block *blk; 75 struct nvm_block *blk;
73 int i; 76 int i;
74 77
75 if (unlikely(bitmap_empty(bb_bitmap, nr_blocks))) 78 lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
76 return 0; 79
80 for (i = 0; i < nr_blocks; i++) {
81 if (blks[i] == 0)
82 continue;
77 83
78 i = -1;
79 while ((i = find_next_bit(bb_bitmap, nr_blocks, i + 1)) < nr_blocks) {
80 blk = &lun->vlun.blocks[i]; 84 blk = &lun->vlun.blocks[i];
81 if (!blk) { 85 if (!blk) {
82 pr_err("gennvm: BB data is out of bounds.\n"); 86 pr_err("gennvm: BB data is out of bounds.\n");
@@ -84,6 +88,7 @@ static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks,
84 } 88 }
85 89
86 list_move_tail(&blk->list, &lun->bb_list); 90 list_move_tail(&blk->list, &lun->bb_list);
91 lun->vlun.nr_bad_blocks++;
87 } 92 }
88 93
89 return 0; 94 return 0;
@@ -136,6 +141,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
136 list_move_tail(&blk->list, &lun->used_list); 141 list_move_tail(&blk->list, &lun->used_list);
137 blk->type = 1; 142 blk->type = 1;
138 lun->vlun.nr_free_blocks--; 143 lun->vlun.nr_free_blocks--;
144 lun->vlun.nr_inuse_blocks++;
139 } 145 }
140 } 146 }
141 147
@@ -164,22 +170,32 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
164 block->id = cur_block_id++; 170 block->id = cur_block_id++;
165 171
166 /* First block is reserved for device */ 172 /* First block is reserved for device */
167 if (unlikely(lun_iter == 0 && blk_iter == 0)) 173 if (unlikely(lun_iter == 0 && blk_iter == 0)) {
174 lun->vlun.nr_free_blocks--;
168 continue; 175 continue;
176 }
169 177
170 list_add_tail(&block->list, &lun->free_list); 178 list_add_tail(&block->list, &lun->free_list);
171 } 179 }
172 180
173 if (dev->ops->get_bb_tbl) { 181 if (dev->ops->get_bb_tbl) {
174 ret = dev->ops->get_bb_tbl(dev->q, lun->vlun.id, 182 struct ppa_addr ppa;
175 dev->blks_per_lun, gennvm_block_bb, gn); 183
184 ppa.ppa = 0;
185 ppa.g.ch = lun->vlun.chnl_id;
186 ppa.g.lun = lun->vlun.id;
187 ppa = generic_to_dev_addr(dev, ppa);
188
189 ret = dev->ops->get_bb_tbl(dev, ppa,
190 dev->blks_per_lun,
191 gennvm_block_bb, gn);
176 if (ret) 192 if (ret)
177 pr_err("gennvm: could not read BB table\n"); 193 pr_err("gennvm: could not read BB table\n");
178 } 194 }
179 } 195 }
180 196
181 if (dev->ops->get_l2p_tbl) { 197 if (dev->ops->get_l2p_tbl) {
182 ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages, 198 ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
183 gennvm_block_map, dev); 199 gennvm_block_map, dev);
184 if (ret) { 200 if (ret) {
185 pr_err("gennvm: could not read L2P table.\n"); 201 pr_err("gennvm: could not read L2P table.\n");
@@ -190,15 +206,27 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
190 return 0; 206 return 0;
191} 207}
192 208
209static void gennvm_free(struct nvm_dev *dev)
210{
211 gennvm_blocks_free(dev);
212 gennvm_luns_free(dev);
213 kfree(dev->mp);
214 dev->mp = NULL;
215}
216
193static int gennvm_register(struct nvm_dev *dev) 217static int gennvm_register(struct nvm_dev *dev)
194{ 218{
195 struct gen_nvm *gn; 219 struct gen_nvm *gn;
196 int ret; 220 int ret;
197 221
222 if (!try_module_get(THIS_MODULE))
223 return -ENODEV;
224
198 gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL); 225 gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL);
199 if (!gn) 226 if (!gn)
200 return -ENOMEM; 227 return -ENOMEM;
201 228
229 gn->dev = dev;
202 gn->nr_luns = dev->nr_luns; 230 gn->nr_luns = dev->nr_luns;
203 dev->mp = gn; 231 dev->mp = gn;
204 232
@@ -216,16 +244,15 @@ static int gennvm_register(struct nvm_dev *dev)
216 244
217 return 1; 245 return 1;
218err: 246err:
219 kfree(gn); 247 gennvm_free(dev);
248 module_put(THIS_MODULE);
220 return ret; 249 return ret;
221} 250}
222 251
223static void gennvm_unregister(struct nvm_dev *dev) 252static void gennvm_unregister(struct nvm_dev *dev)
224{ 253{
225 gennvm_blocks_free(dev); 254 gennvm_free(dev);
226 gennvm_luns_free(dev); 255 module_put(THIS_MODULE);
227 kfree(dev->mp);
228 dev->mp = NULL;
229} 256}
230 257
231static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, 258static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
@@ -240,23 +267,21 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
240 if (list_empty(&lun->free_list)) { 267 if (list_empty(&lun->free_list)) {
241 pr_err_ratelimited("gennvm: lun %u have no free pages available", 268 pr_err_ratelimited("gennvm: lun %u have no free pages available",
242 lun->vlun.id); 269 lun->vlun.id);
243 spin_unlock(&vlun->lock);
244 goto out; 270 goto out;
245 } 271 }
246 272
247 while (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks) { 273 if (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks)
248 spin_unlock(&vlun->lock);
249 goto out; 274 goto out;
250 }
251 275
252 blk = list_first_entry(&lun->free_list, struct nvm_block, list); 276 blk = list_first_entry(&lun->free_list, struct nvm_block, list);
253 list_move_tail(&blk->list, &lun->used_list); 277 list_move_tail(&blk->list, &lun->used_list);
254 blk->type = 1; 278 blk->type = 1;
255 279
256 lun->vlun.nr_free_blocks--; 280 lun->vlun.nr_free_blocks--;
281 lun->vlun.nr_inuse_blocks++;
257 282
258 spin_unlock(&vlun->lock);
259out: 283out:
284 spin_unlock(&vlun->lock);
260 return blk; 285 return blk;
261} 286}
262 287
@@ -271,16 +296,21 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
271 case 1: 296 case 1:
272 list_move_tail(&blk->list, &lun->free_list); 297 list_move_tail(&blk->list, &lun->free_list);
273 lun->vlun.nr_free_blocks++; 298 lun->vlun.nr_free_blocks++;
299 lun->vlun.nr_inuse_blocks--;
274 blk->type = 0; 300 blk->type = 0;
275 break; 301 break;
276 case 2: 302 case 2:
277 list_move_tail(&blk->list, &lun->bb_list); 303 list_move_tail(&blk->list, &lun->bb_list);
304 lun->vlun.nr_bad_blocks++;
305 lun->vlun.nr_inuse_blocks--;
278 break; 306 break;
279 default: 307 default:
280 WARN_ON_ONCE(1); 308 WARN_ON_ONCE(1);
281 pr_err("gennvm: erroneous block type (%lu -> %u)\n", 309 pr_err("gennvm: erroneous block type (%lu -> %u)\n",
282 blk->id, blk->type); 310 blk->id, blk->type);
283 list_move_tail(&blk->list, &lun->bb_list); 311 list_move_tail(&blk->list, &lun->bb_list);
312 lun->vlun.nr_bad_blocks++;
313 lun->vlun.nr_inuse_blocks--;
284 } 314 }
285 315
286 spin_unlock(&vlun->lock); 316 spin_unlock(&vlun->lock);
@@ -292,10 +322,10 @@ static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
292 322
293 if (rqd->nr_pages > 1) { 323 if (rqd->nr_pages > 1) {
294 for (i = 0; i < rqd->nr_pages; i++) 324 for (i = 0; i < rqd->nr_pages; i++)
295 rqd->ppa_list[i] = addr_to_generic_mode(dev, 325 rqd->ppa_list[i] = dev_to_generic_addr(dev,
296 rqd->ppa_list[i]); 326 rqd->ppa_list[i]);
297 } else { 327 } else {
298 rqd->ppa_addr = addr_to_generic_mode(dev, rqd->ppa_addr); 328 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
299 } 329 }
300} 330}
301 331
@@ -305,10 +335,10 @@ static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
305 335
306 if (rqd->nr_pages > 1) { 336 if (rqd->nr_pages > 1) {
307 for (i = 0; i < rqd->nr_pages; i++) 337 for (i = 0; i < rqd->nr_pages; i++)
308 rqd->ppa_list[i] = generic_to_addr_mode(dev, 338 rqd->ppa_list[i] = generic_to_dev_addr(dev,
309 rqd->ppa_list[i]); 339 rqd->ppa_list[i]);
310 } else { 340 } else {
311 rqd->ppa_addr = generic_to_addr_mode(dev, rqd->ppa_addr); 341 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
312 } 342 }
313} 343}
314 344
@@ -321,7 +351,7 @@ static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
321 gennvm_generic_to_addr_mode(dev, rqd); 351 gennvm_generic_to_addr_mode(dev, rqd);
322 352
323 rqd->dev = dev; 353 rqd->dev = dev;
324 return dev->ops->submit_io(dev->q, rqd); 354 return dev->ops->submit_io(dev, rqd);
325} 355}
326 356
327static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa, 357static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
@@ -354,10 +384,10 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
354{ 384{
355 int i; 385 int i;
356 386
357 if (!dev->ops->set_bb) 387 if (!dev->ops->set_bb_tbl)
358 return; 388 return;
359 389
360 if (dev->ops->set_bb(dev->q, rqd, 1)) 390 if (dev->ops->set_bb_tbl(dev, rqd, 1))
361 return; 391 return;
362 392
363 gennvm_addr_to_generic_mode(dev, rqd); 393 gennvm_addr_to_generic_mode(dev, rqd);
@@ -425,7 +455,7 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
425 455
426 gennvm_generic_to_addr_mode(dev, &rqd); 456 gennvm_generic_to_addr_mode(dev, &rqd);
427 457
428 ret = dev->ops->erase_block(dev->q, &rqd); 458 ret = dev->ops->erase_block(dev, &rqd);
429 459
430 if (plane_cnt) 460 if (plane_cnt)
431 nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list); 461 nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
@@ -440,15 +470,24 @@ static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
440 return &gn->luns[lunid].vlun; 470 return &gn->luns[lunid].vlun;
441} 471}
442 472
443static void gennvm_free_blocks_print(struct nvm_dev *dev) 473static void gennvm_lun_info_print(struct nvm_dev *dev)
444{ 474{
445 struct gen_nvm *gn = dev->mp; 475 struct gen_nvm *gn = dev->mp;
446 struct gen_lun *lun; 476 struct gen_lun *lun;
447 unsigned int i; 477 unsigned int i;
448 478
449 gennvm_for_each_lun(gn, lun, i) 479
450 pr_info("%s: lun%8u\t%u\n", 480 gennvm_for_each_lun(gn, lun, i) {
451 dev->name, i, lun->vlun.nr_free_blocks); 481 spin_lock(&lun->vlun.lock);
482
483 pr_info("%s: lun%8u\t%u\t%u\t%u\n",
484 dev->name, i,
485 lun->vlun.nr_free_blocks,
486 lun->vlun.nr_inuse_blocks,
487 lun->vlun.nr_bad_blocks);
488
489 spin_unlock(&lun->vlun.lock);
490 }
452} 491}
453 492
454static struct nvmm_type gennvm = { 493static struct nvmm_type gennvm = {
@@ -466,7 +505,7 @@ static struct nvmm_type gennvm = {
466 .erase_blk = gennvm_erase_blk, 505 .erase_blk = gennvm_erase_blk,
467 506
468 .get_lun = gennvm_get_lun, 507 .get_lun = gennvm_get_lun,
469 .free_blocks_print = gennvm_free_blocks_print, 508 .lun_info_print = gennvm_lun_info_print,
470}; 509};
471 510
472static int __init gennvm_module_init(void) 511static int __init gennvm_module_init(void)
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index d23bd3501ddc..9c24b5b32dac 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -35,6 +35,8 @@ struct gen_lun {
35}; 35};
36 36
37struct gen_nvm { 37struct gen_nvm {
38 struct nvm_dev *dev;
39
38 int nr_luns; 40 int nr_luns;
39 struct gen_lun *luns; 41 struct gen_lun *luns;
40}; 42};
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 7ba64c87ba1c..134e4faba482 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -123,12 +123,42 @@ static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
123 return blk->id * rrpc->dev->pgs_per_blk; 123 return blk->id * rrpc->dev->pgs_per_blk;
124} 124}
125 125
126static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
127 struct ppa_addr r)
128{
129 struct ppa_addr l;
130 int secs, pgs, blks, luns;
131 sector_t ppa = r.ppa;
132
133 l.ppa = 0;
134
135 div_u64_rem(ppa, dev->sec_per_pg, &secs);
136 l.g.sec = secs;
137
138 sector_div(ppa, dev->sec_per_pg);
139 div_u64_rem(ppa, dev->sec_per_blk, &pgs);
140 l.g.pg = pgs;
141
142 sector_div(ppa, dev->pgs_per_blk);
143 div_u64_rem(ppa, dev->blks_per_lun, &blks);
144 l.g.blk = blks;
145
146 sector_div(ppa, dev->blks_per_lun);
147 div_u64_rem(ppa, dev->luns_per_chnl, &luns);
148 l.g.lun = luns;
149
150 sector_div(ppa, dev->luns_per_chnl);
151 l.g.ch = ppa;
152
153 return l;
154}
155
126static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) 156static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
127{ 157{
128 struct ppa_addr paddr; 158 struct ppa_addr paddr;
129 159
130 paddr.ppa = addr; 160 paddr.ppa = addr;
131 return __linear_to_generic_addr(dev, paddr); 161 return linear_to_generic_addr(dev, paddr);
132} 162}
133 163
134/* requires lun->lock taken */ 164/* requires lun->lock taken */
@@ -152,7 +182,7 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
152 struct nvm_block *blk; 182 struct nvm_block *blk;
153 struct rrpc_block *rblk; 183 struct rrpc_block *rblk;
154 184
155 blk = nvm_get_blk(rrpc->dev, rlun->parent, 0); 185 blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
156 if (!blk) 186 if (!blk)
157 return NULL; 187 return NULL;
158 188
@@ -172,6 +202,20 @@ static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
172 nvm_put_blk(rrpc->dev, rblk->parent); 202 nvm_put_blk(rrpc->dev, rblk->parent);
173} 203}
174 204
205static void rrpc_put_blks(struct rrpc *rrpc)
206{
207 struct rrpc_lun *rlun;
208 int i;
209
210 for (i = 0; i < rrpc->nr_luns; i++) {
211 rlun = &rrpc->luns[i];
212 if (rlun->cur)
213 rrpc_put_blk(rrpc, rlun->cur);
214 if (rlun->gc_cur)
215 rrpc_put_blk(rrpc, rlun->gc_cur);
216 }
217}
218
175static struct rrpc_lun *get_next_lun(struct rrpc *rrpc) 219static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
176{ 220{
177 int next = atomic_inc_return(&rrpc->next_lun); 221 int next = atomic_inc_return(&rrpc->next_lun);
@@ -972,7 +1016,7 @@ static int rrpc_map_init(struct rrpc *rrpc)
972 return 0; 1016 return 0;
973 1017
974 /* Bring up the mapping table from device */ 1018 /* Bring up the mapping table from device */
975 ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages, 1019 ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
976 rrpc_l2p_update, rrpc); 1020 rrpc_l2p_update, rrpc);
977 if (ret) { 1021 if (ret) {
978 pr_err("nvm: rrpc: could not read L2P table.\n"); 1022 pr_err("nvm: rrpc: could not read L2P table.\n");
@@ -1194,18 +1238,21 @@ static int rrpc_luns_configure(struct rrpc *rrpc)
1194 1238
1195 rblk = rrpc_get_blk(rrpc, rlun, 0); 1239 rblk = rrpc_get_blk(rrpc, rlun, 0);
1196 if (!rblk) 1240 if (!rblk)
1197 return -EINVAL; 1241 goto err;
1198 1242
1199 rrpc_set_lun_cur(rlun, rblk); 1243 rrpc_set_lun_cur(rlun, rblk);
1200 1244
1201 /* Emergency gc block */ 1245 /* Emergency gc block */
1202 rblk = rrpc_get_blk(rrpc, rlun, 1); 1246 rblk = rrpc_get_blk(rrpc, rlun, 1);
1203 if (!rblk) 1247 if (!rblk)
1204 return -EINVAL; 1248 goto err;
1205 rlun->gc_cur = rblk; 1249 rlun->gc_cur = rblk;
1206 } 1250 }
1207 1251
1208 return 0; 1252 return 0;
1253err:
1254 rrpc_put_blks(rrpc);
1255 return -EINVAL;
1209} 1256}
1210 1257
1211static struct nvm_tgt_type tt_rrpc; 1258static struct nvm_tgt_type tt_rrpc;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 917d47e290ae..3147c8d09ea8 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -112,7 +112,8 @@ struct iv_tcw_private {
112 * and encrypts / decrypts at the same time. 112 * and encrypts / decrypts at the same time.
113 */ 113 */
114enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, 114enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
115 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; 115 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
116 DM_CRYPT_EXIT_THREAD};
116 117
117/* 118/*
118 * The fields in here must be read only after initialization. 119 * The fields in here must be read only after initialization.
@@ -1203,20 +1204,18 @@ continue_locked:
1203 if (!RB_EMPTY_ROOT(&cc->write_tree)) 1204 if (!RB_EMPTY_ROOT(&cc->write_tree))
1204 goto pop_from_list; 1205 goto pop_from_list;
1205 1206
1207 if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) {
1208 spin_unlock_irq(&cc->write_thread_wait.lock);
1209 break;
1210 }
1211
1206 __set_current_state(TASK_INTERRUPTIBLE); 1212 __set_current_state(TASK_INTERRUPTIBLE);
1207 __add_wait_queue(&cc->write_thread_wait, &wait); 1213 __add_wait_queue(&cc->write_thread_wait, &wait);
1208 1214
1209 spin_unlock_irq(&cc->write_thread_wait.lock); 1215 spin_unlock_irq(&cc->write_thread_wait.lock);
1210 1216
1211 if (unlikely(kthread_should_stop())) {
1212 set_task_state(current, TASK_RUNNING);
1213 remove_wait_queue(&cc->write_thread_wait, &wait);
1214 break;
1215 }
1216
1217 schedule(); 1217 schedule();
1218 1218
1219 set_task_state(current, TASK_RUNNING);
1220 spin_lock_irq(&cc->write_thread_wait.lock); 1219 spin_lock_irq(&cc->write_thread_wait.lock);
1221 __remove_wait_queue(&cc->write_thread_wait, &wait); 1220 __remove_wait_queue(&cc->write_thread_wait, &wait);
1222 goto continue_locked; 1221 goto continue_locked;
@@ -1531,8 +1530,13 @@ static void crypt_dtr(struct dm_target *ti)
1531 if (!cc) 1530 if (!cc)
1532 return; 1531 return;
1533 1532
1534 if (cc->write_thread) 1533 if (cc->write_thread) {
1534 spin_lock_irq(&cc->write_thread_wait.lock);
1535 set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags);
1536 wake_up_locked(&cc->write_thread_wait);
1537 spin_unlock_irq(&cc->write_thread_wait.lock);
1535 kthread_stop(cc->write_thread); 1538 kthread_stop(cc->write_thread);
1539 }
1536 1540
1537 if (cc->io_queue) 1541 if (cc->io_queue)
1538 destroy_workqueue(cc->io_queue); 1542 destroy_workqueue(cc->io_queue);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index aaa6caa46a9f..cfa29f574c2a 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1537,32 +1537,34 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
1537 struct block_device **bdev, fmode_t *mode) 1537 struct block_device **bdev, fmode_t *mode)
1538{ 1538{
1539 struct multipath *m = ti->private; 1539 struct multipath *m = ti->private;
1540 struct pgpath *pgpath;
1541 unsigned long flags; 1540 unsigned long flags;
1542 int r; 1541 int r;
1543 1542
1544 r = 0;
1545
1546 spin_lock_irqsave(&m->lock, flags); 1543 spin_lock_irqsave(&m->lock, flags);
1547 1544
1548 if (!m->current_pgpath) 1545 if (!m->current_pgpath)
1549 __choose_pgpath(m, 0); 1546 __choose_pgpath(m, 0);
1550 1547
1551 pgpath = m->current_pgpath; 1548 if (m->current_pgpath) {
1552 1549 if (!m->queue_io) {
1553 if (pgpath) { 1550 *bdev = m->current_pgpath->path.dev->bdev;
1554 *bdev = pgpath->path.dev->bdev; 1551 *mode = m->current_pgpath->path.dev->mode;
1555 *mode = pgpath->path.dev->mode; 1552 r = 0;
1553 } else {
1554 /* pg_init has not started or completed */
1555 r = -ENOTCONN;
1556 }
1557 } else {
1558 /* No path is available */
1559 if (m->queue_if_no_path)
1560 r = -ENOTCONN;
1561 else
1562 r = -EIO;
1556 } 1563 }
1557 1564
1558 if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
1559 r = -ENOTCONN;
1560 else if (!*bdev)
1561 r = -EIO;
1562
1563 spin_unlock_irqrestore(&m->lock, flags); 1565 spin_unlock_irqrestore(&m->lock, flags);
1564 1566
1565 if (r == -ENOTCONN && !fatal_signal_pending(current)) { 1567 if (r == -ENOTCONN) {
1566 spin_lock_irqsave(&m->lock, flags); 1568 spin_lock_irqsave(&m->lock, flags);
1567 if (!m->current_pg) { 1569 if (!m->current_pg) {
1568 /* Path status changed, redo selection */ 1570 /* Path status changed, redo selection */
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 1fa45695b68a..c219a053c7f6 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1207,6 +1207,12 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
1207 dm_block_t held_root; 1207 dm_block_t held_root;
1208 1208
1209 /* 1209 /*
1210 * We commit to ensure the btree roots which we increment in a
1211 * moment are up to date.
1212 */
1213 __commit_transaction(pmd);
1214
1215 /*
1210 * Copy the superblock. 1216 * Copy the superblock.
1211 */ 1217 */
1212 dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION); 1218 dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
@@ -1538,7 +1544,7 @@ static int __remove(struct dm_thin_device *td, dm_block_t block)
1538static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end) 1544static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
1539{ 1545{
1540 int r; 1546 int r;
1541 unsigned count; 1547 unsigned count, total_count = 0;
1542 struct dm_pool_metadata *pmd = td->pmd; 1548 struct dm_pool_metadata *pmd = td->pmd;
1543 dm_block_t keys[1] = { td->id }; 1549 dm_block_t keys[1] = { td->id };
1544 __le64 value; 1550 __le64 value;
@@ -1561,11 +1567,29 @@ static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_
1561 if (r) 1567 if (r)
1562 return r; 1568 return r;
1563 1569
1564 r = dm_btree_remove_leaves(&pmd->bl_info, mapping_root, &begin, end, &mapping_root, &count); 1570 /*
1565 if (r) 1571 * Remove leaves stops at the first unmapped entry, so we have to
1566 return r; 1572 * loop round finding mapped ranges.
1573 */
1574 while (begin < end) {
1575 r = dm_btree_lookup_next(&pmd->bl_info, mapping_root, &begin, &begin, &value);
1576 if (r == -ENODATA)
1577 break;
1578
1579 if (r)
1580 return r;
1581
1582 if (begin >= end)
1583 break;
1584
1585 r = dm_btree_remove_leaves(&pmd->bl_info, mapping_root, &begin, end, &mapping_root, &count);
1586 if (r)
1587 return r;
1588
1589 total_count += count;
1590 }
1567 1591
1568 td->mapped_blocks -= count; 1592 td->mapped_blocks -= total_count;
1569 td->changed = 1; 1593 td->changed = 1;
1570 1594
1571 /* 1595 /*
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 3897b90bd462..63903a5a5d9e 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2432,6 +2432,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2432 case PM_WRITE: 2432 case PM_WRITE:
2433 if (old_mode != new_mode) 2433 if (old_mode != new_mode)
2434 notify_of_pool_mode_change(pool, "write"); 2434 notify_of_pool_mode_change(pool, "write");
2435 pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
2435 dm_pool_metadata_read_write(pool->pmd); 2436 dm_pool_metadata_read_write(pool->pmd);
2436 pool->process_bio = process_bio; 2437 pool->process_bio = process_bio;
2437 pool->process_discard = process_discard_bio; 2438 pool->process_discard = process_discard_bio;
@@ -4249,10 +4250,9 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
4249{ 4250{
4250 struct thin_c *tc = ti->private; 4251 struct thin_c *tc = ti->private;
4251 struct pool *pool = tc->pool; 4252 struct pool *pool = tc->pool;
4252 struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md);
4253 4253
4254 if (!pool_limits->discard_granularity) 4254 if (!pool->pf.discard_enabled)
4255 return; /* pool's discard support is disabled */ 4255 return;
4256 4256
4257 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; 4257 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
4258 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ 4258 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6e15f3565892..5df40480228b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -591,7 +591,7 @@ retry:
591 591
592out: 592out:
593 dm_put_live_table(md, *srcu_idx); 593 dm_put_live_table(md, *srcu_idx);
594 if (r == -ENOTCONN) { 594 if (r == -ENOTCONN && !fatal_signal_pending(current)) {
595 msleep(10); 595 msleep(10);
596 goto retry; 596 goto retry;
597 } 597 }
@@ -603,9 +603,10 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
603{ 603{
604 struct mapped_device *md = bdev->bd_disk->private_data; 604 struct mapped_device *md = bdev->bd_disk->private_data;
605 struct dm_target *tgt; 605 struct dm_target *tgt;
606 struct block_device *tgt_bdev = NULL;
606 int srcu_idx, r; 607 int srcu_idx, r;
607 608
608 r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); 609 r = dm_get_live_table_for_ioctl(md, &tgt, &tgt_bdev, &mode, &srcu_idx);
609 if (r < 0) 610 if (r < 0)
610 return r; 611 return r;
611 612
@@ -620,7 +621,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
620 goto out; 621 goto out;
621 } 622 }
622 623
623 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 624 r = __blkdev_driver_ioctl(tgt_bdev, mode, cmd, arg);
624out: 625out:
625 dm_put_live_table(md, srcu_idx); 626 dm_put_live_table(md, srcu_idx);
626 return r; 627 return r;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 807095f4c793..61aacab424cf 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -314,8 +314,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
314 */ 314 */
315void mddev_suspend(struct mddev *mddev) 315void mddev_suspend(struct mddev *mddev)
316{ 316{
317 BUG_ON(mddev->suspended); 317 if (mddev->suspended++)
318 mddev->suspended = 1; 318 return;
319 synchronize_rcu(); 319 synchronize_rcu();
320 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 320 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
321 mddev->pers->quiesce(mddev, 1); 321 mddev->pers->quiesce(mddev, 1);
@@ -326,7 +326,8 @@ EXPORT_SYMBOL_GPL(mddev_suspend);
326 326
327void mddev_resume(struct mddev *mddev) 327void mddev_resume(struct mddev *mddev)
328{ 328{
329 mddev->suspended = 0; 329 if (--mddev->suspended)
330 return;
330 wake_up(&mddev->sb_wait); 331 wake_up(&mddev->sb_wait);
331 mddev->pers->quiesce(mddev, 0); 332 mddev->pers->quiesce(mddev, 0);
332 333
@@ -1652,7 +1653,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1652 rdev->journal_tail = le64_to_cpu(sb->journal_tail); 1653 rdev->journal_tail = le64_to_cpu(sb->journal_tail);
1653 if (mddev->recovery_cp == MaxSector) 1654 if (mddev->recovery_cp == MaxSector)
1654 set_bit(MD_JOURNAL_CLEAN, &mddev->flags); 1655 set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
1655 rdev->raid_disk = mddev->raid_disks; 1656 rdev->raid_disk = 0;
1656 break; 1657 break;
1657 default: 1658 default:
1658 rdev->saved_raid_disk = role; 1659 rdev->saved_raid_disk = role;
@@ -2773,6 +2774,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2773 /* Activating a spare .. or possibly reactivating 2774 /* Activating a spare .. or possibly reactivating
2774 * if we ever get bitmaps working here. 2775 * if we ever get bitmaps working here.
2775 */ 2776 */
2777 int err;
2776 2778
2777 if (rdev->raid_disk != -1) 2779 if (rdev->raid_disk != -1)
2778 return -EBUSY; 2780 return -EBUSY;
@@ -2794,9 +2796,15 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2794 rdev->saved_raid_disk = -1; 2796 rdev->saved_raid_disk = -1;
2795 clear_bit(In_sync, &rdev->flags); 2797 clear_bit(In_sync, &rdev->flags);
2796 clear_bit(Bitmap_sync, &rdev->flags); 2798 clear_bit(Bitmap_sync, &rdev->flags);
2797 remove_and_add_spares(rdev->mddev, rdev); 2799 err = rdev->mddev->pers->
2798 if (rdev->raid_disk == -1) 2800 hot_add_disk(rdev->mddev, rdev);
2799 return -EBUSY; 2801 if (err) {
2802 rdev->raid_disk = -1;
2803 return err;
2804 } else
2805 sysfs_notify_dirent_safe(rdev->sysfs_state);
2806 if (sysfs_link_rdev(rdev->mddev, rdev))
2807 /* failure here is OK */;
2800 /* don't wakeup anyone, leave that to userspace. */ 2808 /* don't wakeup anyone, leave that to userspace. */
2801 } else { 2809 } else {
2802 if (slot >= rdev->mddev->raid_disks && 2810 if (slot >= rdev->mddev->raid_disks &&
@@ -4318,8 +4326,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
4318 } 4326 }
4319 mddev_unlock(mddev); 4327 mddev_unlock(mddev);
4320 } 4328 }
4321 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 4329 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4322 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
4323 return -EBUSY; 4330 return -EBUSY;
4324 else if (cmd_match(page, "resync")) 4331 else if (cmd_match(page, "resync"))
4325 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4332 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -4332,8 +4339,12 @@ action_store(struct mddev *mddev, const char *page, size_t len)
4332 return -EINVAL; 4339 return -EINVAL;
4333 err = mddev_lock(mddev); 4340 err = mddev_lock(mddev);
4334 if (!err) { 4341 if (!err) {
4335 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4342 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4336 err = mddev->pers->start_reshape(mddev); 4343 err = -EBUSY;
4344 else {
4345 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4346 err = mddev->pers->start_reshape(mddev);
4347 }
4337 mddev_unlock(mddev); 4348 mddev_unlock(mddev);
4338 } 4349 }
4339 if (err) 4350 if (err)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 2bea51edfab7..ca0b643fe3c1 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -566,7 +566,9 @@ static inline char * mdname (struct mddev * mddev)
566static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) 566static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
567{ 567{
568 char nm[20]; 568 char nm[20];
569 if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { 569 if (!test_bit(Replacement, &rdev->flags) &&
570 !test_bit(Journal, &rdev->flags) &&
571 mddev->kobj.sd) {
570 sprintf(nm, "rd%d", rdev->raid_disk); 572 sprintf(nm, "rd%d", rdev->raid_disk);
571 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 573 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
572 } else 574 } else
@@ -576,7 +578,9 @@ static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
576static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) 578static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
577{ 579{
578 char nm[20]; 580 char nm[20];
579 if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { 581 if (!test_bit(Replacement, &rdev->flags) &&
582 !test_bit(Journal, &rdev->flags) &&
583 mddev->kobj.sd) {
580 sprintf(nm, "rd%d", rdev->raid_disk); 584 sprintf(nm, "rd%d", rdev->raid_disk);
581 sysfs_remove_link(&mddev->kobj, nm); 585 sysfs_remove_link(&mddev->kobj, nm);
582 } 586 }
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index c573402033b2..b1ced58eb5e1 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -63,6 +63,11 @@ int lower_bound(struct btree_node *n, uint64_t key)
63 return bsearch(n, key, 0); 63 return bsearch(n, key, 0);
64} 64}
65 65
66static int upper_bound(struct btree_node *n, uint64_t key)
67{
68 return bsearch(n, key, 1);
69}
70
66void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, 71void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
67 struct dm_btree_value_type *vt) 72 struct dm_btree_value_type *vt)
68{ 73{
@@ -252,6 +257,16 @@ static void pop_frame(struct del_stack *s)
252 dm_tm_unlock(s->tm, f->b); 257 dm_tm_unlock(s->tm, f->b);
253} 258}
254 259
260static void unlock_all_frames(struct del_stack *s)
261{
262 struct frame *f;
263
264 while (unprocessed_frames(s)) {
265 f = s->spine + s->top--;
266 dm_tm_unlock(s->tm, f->b);
267 }
268}
269
255int dm_btree_del(struct dm_btree_info *info, dm_block_t root) 270int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
256{ 271{
257 int r; 272 int r;
@@ -308,9 +323,13 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
308 pop_frame(s); 323 pop_frame(s);
309 } 324 }
310 } 325 }
311
312out: 326out:
327 if (r) {
328 /* cleanup all frames of del_stack */
329 unlock_all_frames(s);
330 }
313 kfree(s); 331 kfree(s);
332
314 return r; 333 return r;
315} 334}
316EXPORT_SYMBOL_GPL(dm_btree_del); 335EXPORT_SYMBOL_GPL(dm_btree_del);
@@ -392,6 +411,82 @@ int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
392} 411}
393EXPORT_SYMBOL_GPL(dm_btree_lookup); 412EXPORT_SYMBOL_GPL(dm_btree_lookup);
394 413
414static int dm_btree_lookup_next_single(struct dm_btree_info *info, dm_block_t root,
415 uint64_t key, uint64_t *rkey, void *value_le)
416{
417 int r, i;
418 uint32_t flags, nr_entries;
419 struct dm_block *node;
420 struct btree_node *n;
421
422 r = bn_read_lock(info, root, &node);
423 if (r)
424 return r;
425
426 n = dm_block_data(node);
427 flags = le32_to_cpu(n->header.flags);
428 nr_entries = le32_to_cpu(n->header.nr_entries);
429
430 if (flags & INTERNAL_NODE) {
431 i = lower_bound(n, key);
432 if (i < 0 || i >= nr_entries) {
433 r = -ENODATA;
434 goto out;
435 }
436
437 r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
438 if (r == -ENODATA && i < (nr_entries - 1)) {
439 i++;
440 r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
441 }
442
443 } else {
444 i = upper_bound(n, key);
445 if (i < 0 || i >= nr_entries) {
446 r = -ENODATA;
447 goto out;
448 }
449
450 *rkey = le64_to_cpu(n->keys[i]);
451 memcpy(value_le, value_ptr(n, i), info->value_type.size);
452 }
453out:
454 dm_tm_unlock(info->tm, node);
455 return r;
456}
457
458int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
459 uint64_t *keys, uint64_t *rkey, void *value_le)
460{
461 unsigned level;
462 int r = -ENODATA;
463 __le64 internal_value_le;
464 struct ro_spine spine;
465
466 init_ro_spine(&spine, info);
467 for (level = 0; level < info->levels - 1u; level++) {
468 r = btree_lookup_raw(&spine, root, keys[level],
469 lower_bound, rkey,
470 &internal_value_le, sizeof(uint64_t));
471 if (r)
472 goto out;
473
474 if (*rkey != keys[level]) {
475 r = -ENODATA;
476 goto out;
477 }
478
479 root = le64_to_cpu(internal_value_le);
480 }
481
482 r = dm_btree_lookup_next_single(info, root, keys[level], rkey, value_le);
483out:
484 exit_ro_spine(&spine);
485 return r;
486}
487
488EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
489
395/* 490/*
396 * Splits a node by creating a sibling node and shifting half the nodes 491 * Splits a node by creating a sibling node and shifting half the nodes
397 * contents across. Assumes there is a parent node, and it has room for 492 * contents across. Assumes there is a parent node, and it has room for
@@ -473,8 +568,10 @@ static int btree_split_sibling(struct shadow_spine *s, unsigned parent_index,
473 568
474 r = insert_at(sizeof(__le64), pn, parent_index + 1, 569 r = insert_at(sizeof(__le64), pn, parent_index + 1,
475 le64_to_cpu(rn->keys[0]), &location); 570 le64_to_cpu(rn->keys[0]), &location);
476 if (r) 571 if (r) {
572 unlock_block(s->info, right);
477 return r; 573 return r;
574 }
478 575
479 if (key < le64_to_cpu(rn->keys[0])) { 576 if (key < le64_to_cpu(rn->keys[0])) {
480 unlock_block(s->info, right); 577 unlock_block(s->info, right);
diff --git a/drivers/md/persistent-data/dm-btree.h b/drivers/md/persistent-data/dm-btree.h
index 11d8cf78621d..c74301fa5a37 100644
--- a/drivers/md/persistent-data/dm-btree.h
+++ b/drivers/md/persistent-data/dm-btree.h
@@ -110,6 +110,13 @@ int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
110 uint64_t *keys, void *value_le); 110 uint64_t *keys, void *value_le);
111 111
112/* 112/*
113 * Tries to find the first key where the bottom level key is >= to that
114 * given. Useful for skipping empty sections of the btree.
115 */
116int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
117 uint64_t *keys, uint64_t *rkey, void *value_le);
118
119/*
113 * Insertion (or overwrite an existing value). O(ln(n)) 120 * Insertion (or overwrite an existing value). O(ln(n))
114 */ 121 */
115int dm_btree_insert(struct dm_btree_info *info, dm_block_t root, 122int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
@@ -135,9 +142,10 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
135 uint64_t *keys, dm_block_t *new_root); 142 uint64_t *keys, dm_block_t *new_root);
136 143
137/* 144/*
138 * Removes values between 'keys' and keys2, where keys2 is keys with the 145 * Removes a _contiguous_ run of values starting from 'keys' and not
139 * final key replaced with 'end_key'. 'end_key' is the one-past-the-end 146 * reaching keys2 (where keys2 is keys with the final key replaced with
140 * value. 'keys' may be altered. 147 * 'end_key'). 'end_key' is the one-past-the-end value. 'keys' may be
148 * altered.
141 */ 149 */
142int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root, 150int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root,
143 uint64_t *keys, uint64_t end_key, 151 uint64_t *keys, uint64_t end_key,
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 53091295fce9..fca6dbcf9a47 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -136,7 +136,7 @@ static int brb_push(struct bop_ring_buffer *brb,
136 return 0; 136 return 0;
137} 137}
138 138
139static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result) 139static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
140{ 140{
141 struct block_op *bop; 141 struct block_op *bop;
142 142
@@ -147,6 +147,17 @@ static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
147 result->type = bop->type; 147 result->type = bop->type;
148 result->block = bop->block; 148 result->block = bop->block;
149 149
150 return 0;
151}
152
153static int brb_pop(struct bop_ring_buffer *brb)
154{
155 struct block_op *bop;
156
157 if (brb_empty(brb))
158 return -ENODATA;
159
160 bop = brb->bops + brb->begin;
150 brb->begin = brb_next(brb, brb->begin); 161 brb->begin = brb_next(brb, brb->begin);
151 162
152 return 0; 163 return 0;
@@ -211,7 +222,7 @@ static int apply_bops(struct sm_metadata *smm)
211 while (!brb_empty(&smm->uncommitted)) { 222 while (!brb_empty(&smm->uncommitted)) {
212 struct block_op bop; 223 struct block_op bop;
213 224
214 r = brb_pop(&smm->uncommitted, &bop); 225 r = brb_peek(&smm->uncommitted, &bop);
215 if (r) { 226 if (r) {
216 DMERR("bug in bop ring buffer"); 227 DMERR("bug in bop ring buffer");
217 break; 228 break;
@@ -220,6 +231,8 @@ static int apply_bops(struct sm_metadata *smm)
220 r = commit_bop(smm, &bop); 231 r = commit_bop(smm, &bop);
221 if (r) 232 if (r)
222 break; 233 break;
234
235 brb_pop(&smm->uncommitted);
223 } 236 }
224 237
225 return r; 238 return r;
@@ -683,7 +696,6 @@ static struct dm_space_map bootstrap_ops = {
683static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) 696static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
684{ 697{
685 int r, i; 698 int r, i;
686 enum allocation_event ev;
687 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); 699 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
688 dm_block_t old_len = smm->ll.nr_blocks; 700 dm_block_t old_len = smm->ll.nr_blocks;
689 701
@@ -705,11 +717,12 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
705 * allocate any new blocks. 717 * allocate any new blocks.
706 */ 718 */
707 do { 719 do {
708 for (i = old_len; !r && i < smm->begin; i++) { 720 for (i = old_len; !r && i < smm->begin; i++)
709 r = sm_ll_inc(&smm->ll, i, &ev); 721 r = add_bop(smm, BOP_INC, i);
710 if (r) 722
711 goto out; 723 if (r)
712 } 724 goto out;
725
713 old_len = smm->begin; 726 old_len = smm->begin;
714 727
715 r = apply_bops(smm); 728 r = apply_bops(smm);
@@ -754,7 +767,6 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
754{ 767{
755 int r; 768 int r;
756 dm_block_t i; 769 dm_block_t i;
757 enum allocation_event ev;
758 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); 770 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
759 771
760 smm->begin = superblock + 1; 772 smm->begin = superblock + 1;
@@ -782,7 +794,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
782 * allocated blocks that they were built from. 794 * allocated blocks that they were built from.
783 */ 795 */
784 for (i = superblock; !r && i < smm->begin; i++) 796 for (i = superblock; !r && i < smm->begin; i++)
785 r = sm_ll_inc(&smm->ll, i, &ev); 797 r = add_bop(smm, BOP_INC, i);
786 798
787 if (r) 799 if (r)
788 return r; 800 return r;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 41d70bc9ba2f..84e597e1c489 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1946,6 +1946,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1946 1946
1947 first = i; 1947 first = i;
1948 fbio = r10_bio->devs[i].bio; 1948 fbio = r10_bio->devs[i].bio;
1949 fbio->bi_iter.bi_size = r10_bio->sectors << 9;
1950 fbio->bi_iter.bi_idx = 0;
1949 1951
1950 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); 1952 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
1951 /* now find blocks with errors */ 1953 /* now find blocks with errors */
@@ -1989,7 +1991,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1989 bio_reset(tbio); 1991 bio_reset(tbio);
1990 1992
1991 tbio->bi_vcnt = vcnt; 1993 tbio->bi_vcnt = vcnt;
1992 tbio->bi_iter.bi_size = r10_bio->sectors << 9; 1994 tbio->bi_iter.bi_size = fbio->bi_iter.bi_size;
1993 tbio->bi_rw = WRITE; 1995 tbio->bi_rw = WRITE;
1994 tbio->bi_private = r10_bio; 1996 tbio->bi_private = r10_bio;
1995 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; 1997 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 8616fa8193bc..c2e60b4f292d 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -805,11 +805,11 @@ static void ivtv_init_struct2(struct ivtv *itv)
805{ 805{
806 int i; 806 int i;
807 807
808 for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS - 1; i++) 808 for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS; i++)
809 if (itv->card->video_inputs[i].video_type == 0) 809 if (itv->card->video_inputs[i].video_type == 0)
810 break; 810 break;
811 itv->nof_inputs = i; 811 itv->nof_inputs = i;
812 for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS - 1; i++) 812 for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS; i++)
813 if (itv->card->audio_inputs[i].audio_type == 0) 813 if (itv->card->audio_inputs[i].audio_type == 0)
814 break; 814 break;
815 itv->nof_audio_inputs = i; 815 itv->nof_audio_inputs = i;
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index fcbb49757614..565a59310747 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -134,7 +134,7 @@ struct airspy {
134 int urbs_submitted; 134 int urbs_submitted;
135 135
136 /* USB control message buffer */ 136 /* USB control message buffer */
137 #define BUF_SIZE 24 137 #define BUF_SIZE 128
138 u8 buf[BUF_SIZE]; 138 u8 buf[BUF_SIZE];
139 139
140 /* Current configuration */ 140 /* Current configuration */
diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c
index e05bfec90f46..0fe5cb2c260c 100644
--- a/drivers/media/usb/hackrf/hackrf.c
+++ b/drivers/media/usb/hackrf/hackrf.c
@@ -24,6 +24,15 @@
24#include <media/videobuf2-v4l2.h> 24#include <media/videobuf2-v4l2.h>
25#include <media/videobuf2-vmalloc.h> 25#include <media/videobuf2-vmalloc.h>
26 26
27/*
28 * Used Avago MGA-81563 RF amplifier could be destroyed pretty easily with too
29 * strong signal or transmitting to bad antenna.
30 * Set RF gain control to 'grabbed' state by default for sure.
31 */
32static bool hackrf_enable_rf_gain_ctrl;
33module_param_named(enable_rf_gain_ctrl, hackrf_enable_rf_gain_ctrl, bool, 0644);
34MODULE_PARM_DESC(enable_rf_gain_ctrl, "enable RX/TX RF amplifier control (warn: could damage amplifier)");
35
27/* HackRF USB API commands (from HackRF Library) */ 36/* HackRF USB API commands (from HackRF Library) */
28enum { 37enum {
29 CMD_SET_TRANSCEIVER_MODE = 0x01, 38 CMD_SET_TRANSCEIVER_MODE = 0x01,
@@ -1451,6 +1460,7 @@ static int hackrf_probe(struct usb_interface *intf,
1451 dev_err(dev->dev, "Could not initialize controls\n"); 1460 dev_err(dev->dev, "Could not initialize controls\n");
1452 goto err_v4l2_ctrl_handler_free_rx; 1461 goto err_v4l2_ctrl_handler_free_rx;
1453 } 1462 }
1463 v4l2_ctrl_grab(dev->rx_rf_gain, !hackrf_enable_rf_gain_ctrl);
1454 v4l2_ctrl_handler_setup(&dev->rx_ctrl_handler); 1464 v4l2_ctrl_handler_setup(&dev->rx_ctrl_handler);
1455 1465
1456 /* Register controls for transmitter */ 1466 /* Register controls for transmitter */
@@ -1471,6 +1481,7 @@ static int hackrf_probe(struct usb_interface *intf,
1471 dev_err(dev->dev, "Could not initialize controls\n"); 1481 dev_err(dev->dev, "Could not initialize controls\n");
1472 goto err_v4l2_ctrl_handler_free_tx; 1482 goto err_v4l2_ctrl_handler_free_tx;
1473 } 1483 }
1484 v4l2_ctrl_grab(dev->tx_rf_gain, !hackrf_enable_rf_gain_ctrl);
1474 v4l2_ctrl_handler_setup(&dev->tx_ctrl_handler); 1485 v4l2_ctrl_handler_setup(&dev->tx_ctrl_handler);
1475 1486
1476 /* Register the v4l2_device structure */ 1487 /* Register the v4l2_device structure */
@@ -1530,7 +1541,7 @@ err_v4l2_ctrl_handler_free_rx:
1530err_kfree: 1541err_kfree:
1531 kfree(dev); 1542 kfree(dev);
1532err: 1543err:
1533 dev_dbg(dev->dev, "failed=%d\n", ret); 1544 dev_dbg(&intf->dev, "failed=%d\n", ret);
1534 return ret; 1545 return ret;
1535} 1546}
1536 1547
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index e87459f6d686..acd1460cf787 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -22,6 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/compiler.h> 24#include <linux/compiler.h>
25#include <linux/sched.h>
25#include <linux/spinlock.h> 26#include <linux/spinlock.h>
26#include <linux/types.h> 27#include <linux/types.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index d2e75c88f4d2..f40909793490 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -497,6 +497,7 @@ static u64 calculate_sr(struct cxl_context *ctx)
497{ 497{
498 u64 sr = 0; 498 u64 sr = 0;
499 499
500 set_endian(sr);
500 if (ctx->master) 501 if (ctx->master)
501 sr |= CXL_PSL_SR_An_MP; 502 sr |= CXL_PSL_SR_An_MP;
502 if (mfspr(SPRN_LPCR) & LPCR_TC) 503 if (mfspr(SPRN_LPCR) & LPCR_TC)
@@ -506,7 +507,6 @@ static u64 calculate_sr(struct cxl_context *ctx)
506 sr |= CXL_PSL_SR_An_HV; 507 sr |= CXL_PSL_SR_An_HV;
507 } else { 508 } else {
508 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R; 509 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
509 set_endian(sr);
510 sr &= ~(CXL_PSL_SR_An_HV); 510 sr &= ~(CXL_PSL_SR_An_HV);
511 if (!test_tsk_thread_flag(current, TIF_32BIT)) 511 if (!test_tsk_thread_flag(current, TIF_32BIT))
512 sr |= CXL_PSL_SR_An_SF; 512 sr |= CXL_PSL_SR_An_SF;
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index 669c3452f278..9ed6038e47d2 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -46,10 +46,18 @@ static int parse_ofpart_partitions(struct mtd_info *master,
46 46
47 ofpart_node = of_get_child_by_name(mtd_node, "partitions"); 47 ofpart_node = of_get_child_by_name(mtd_node, "partitions");
48 if (!ofpart_node) { 48 if (!ofpart_node) {
49 pr_warn("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n", 49 /*
50 master->name, mtd_node->full_name); 50 * We might get here even when ofpart isn't used at all (e.g.,
51 * when using another parser), so don't be louder than
52 * KERN_DEBUG
53 */
54 pr_debug("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n",
55 master->name, mtd_node->full_name);
51 ofpart_node = mtd_node; 56 ofpart_node = mtd_node;
52 dedicated = false; 57 dedicated = false;
58 } else if (!of_device_is_compatible(ofpart_node, "fixed-partitions")) {
59 /* The 'partitions' subnode might be used by another parser */
60 return 0;
53 } 61 }
54 62
55 /* First count the subnodes */ 63 /* First count the subnodes */
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index b077e43b5ba9..c4cb15a3098c 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -236,7 +236,7 @@ int ubi_debugfs_init(void)
236 236
237 dfs_rootdir = debugfs_create_dir("ubi", NULL); 237 dfs_rootdir = debugfs_create_dir("ubi", NULL);
238 if (IS_ERR_OR_NULL(dfs_rootdir)) { 238 if (IS_ERR_OR_NULL(dfs_rootdir)) {
239 int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir); 239 int err = dfs_rootdir ? PTR_ERR(dfs_rootdir) : -ENODEV;
240 240
241 pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n", 241 pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n",
242 err); 242 err);
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 1fc23e48fe8e..10cf3b549959 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -1299,7 +1299,7 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
1299 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) 1299 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
1300 goto exit; 1300 goto exit;
1301 1301
1302 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC); 1302 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
1303 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc); 1303 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
1304 if (hdr_crc != crc) { 1304 if (hdr_crc != crc) {
1305 ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x", 1305 ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index eb4489f9082f..56065632a5b8 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -603,6 +603,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
603 return 0; 603 return 0;
604} 604}
605 605
606static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
606/** 607/**
607 * do_sync_erase - run the erase worker synchronously. 608 * do_sync_erase - run the erase worker synchronously.
608 * @ubi: UBI device description object 609 * @ubi: UBI device description object
@@ -615,20 +616,16 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
615static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, 616static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
616 int vol_id, int lnum, int torture) 617 int vol_id, int lnum, int torture)
617{ 618{
618 struct ubi_work *wl_wrk; 619 struct ubi_work wl_wrk;
619 620
620 dbg_wl("sync erase of PEB %i", e->pnum); 621 dbg_wl("sync erase of PEB %i", e->pnum);
621 622
622 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); 623 wl_wrk.e = e;
623 if (!wl_wrk) 624 wl_wrk.vol_id = vol_id;
624 return -ENOMEM; 625 wl_wrk.lnum = lnum;
625 626 wl_wrk.torture = torture;
626 wl_wrk->e = e;
627 wl_wrk->vol_id = vol_id;
628 wl_wrk->lnum = lnum;
629 wl_wrk->torture = torture;
630 627
631 return erase_worker(ubi, wl_wrk, 0); 628 return __erase_worker(ubi, &wl_wrk);
632} 629}
633 630
634/** 631/**
@@ -1014,7 +1011,7 @@ out_unlock:
1014} 1011}
1015 1012
1016/** 1013/**
1017 * erase_worker - physical eraseblock erase worker function. 1014 * __erase_worker - physical eraseblock erase worker function.
1018 * @ubi: UBI device description object 1015 * @ubi: UBI device description object
1019 * @wl_wrk: the work object 1016 * @wl_wrk: the work object
1020 * @shutdown: non-zero if the worker has to free memory and exit 1017 * @shutdown: non-zero if the worker has to free memory and exit
@@ -1025,8 +1022,7 @@ out_unlock:
1025 * needed. Returns zero in case of success and a negative error code in case of 1022 * needed. Returns zero in case of success and a negative error code in case of
1026 * failure. 1023 * failure.
1027 */ 1024 */
1028static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, 1025static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
1029 int shutdown)
1030{ 1026{
1031 struct ubi_wl_entry *e = wl_wrk->e; 1027 struct ubi_wl_entry *e = wl_wrk->e;
1032 int pnum = e->pnum; 1028 int pnum = e->pnum;
@@ -1034,21 +1030,11 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1034 int lnum = wl_wrk->lnum; 1030 int lnum = wl_wrk->lnum;
1035 int err, available_consumed = 0; 1031 int err, available_consumed = 0;
1036 1032
1037 if (shutdown) {
1038 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1039 kfree(wl_wrk);
1040 wl_entry_destroy(ubi, e);
1041 return 0;
1042 }
1043
1044 dbg_wl("erase PEB %d EC %d LEB %d:%d", 1033 dbg_wl("erase PEB %d EC %d LEB %d:%d",
1045 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum); 1034 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1046 1035
1047 err = sync_erase(ubi, e, wl_wrk->torture); 1036 err = sync_erase(ubi, e, wl_wrk->torture);
1048 if (!err) { 1037 if (!err) {
1049 /* Fine, we've erased it successfully */
1050 kfree(wl_wrk);
1051
1052 spin_lock(&ubi->wl_lock); 1038 spin_lock(&ubi->wl_lock);
1053 wl_tree_add(e, &ubi->free); 1039 wl_tree_add(e, &ubi->free);
1054 ubi->free_count++; 1040 ubi->free_count++;
@@ -1066,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1066 } 1052 }
1067 1053
1068 ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err); 1054 ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1069 kfree(wl_wrk);
1070 1055
1071 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || 1056 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1072 err == -EBUSY) { 1057 err == -EBUSY) {
@@ -1075,6 +1060,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1075 /* Re-schedule the LEB for erasure */ 1060 /* Re-schedule the LEB for erasure */
1076 err1 = schedule_erase(ubi, e, vol_id, lnum, 0); 1061 err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
1077 if (err1) { 1062 if (err1) {
1063 wl_entry_destroy(ubi, e);
1078 err = err1; 1064 err = err1;
1079 goto out_ro; 1065 goto out_ro;
1080 } 1066 }
@@ -1150,6 +1136,25 @@ out_ro:
1150 return err; 1136 return err;
1151} 1137}
1152 1138
1139static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1140 int shutdown)
1141{
1142 int ret;
1143
1144 if (shutdown) {
1145 struct ubi_wl_entry *e = wl_wrk->e;
1146
1147 dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
1148 kfree(wl_wrk);
1149 wl_entry_destroy(ubi, e);
1150 return 0;
1151 }
1152
1153 ret = __erase_worker(ubi, wl_wrk);
1154 kfree(wl_wrk);
1155 return ret;
1156}
1157
1153/** 1158/**
1154 * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system. 1159 * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1155 * @ubi: UBI device description object 1160 * @ubi: UBI device description object
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 57dadd52b428..1deb8ff90a89 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -501,8 +501,6 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
501 cf->data[2] |= CAN_ERR_PROT_FORM; 501 cf->data[2] |= CAN_ERR_PROT_FORM;
502 else if (status & SER) 502 else if (status & SER)
503 cf->data[2] |= CAN_ERR_PROT_STUFF; 503 cf->data[2] |= CAN_ERR_PROT_STUFF;
504 else
505 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
506 } 504 }
507 505
508 priv->can.state = state; 506 priv->can.state = state;
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 5d214d135332..f91b094288da 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -962,7 +962,6 @@ static int c_can_handle_bus_err(struct net_device *dev,
962 * type of the last error to occur on the CAN bus 962 * type of the last error to occur on the CAN bus
963 */ 963 */
964 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 964 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
965 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
966 965
967 switch (lec_type) { 966 switch (lec_type) {
968 case LEC_STUFF_ERROR: 967 case LEC_STUFF_ERROR:
@@ -975,8 +974,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
975 break; 974 break;
976 case LEC_ACK_ERROR: 975 case LEC_ACK_ERROR:
977 netdev_dbg(dev, "ack error\n"); 976 netdev_dbg(dev, "ack error\n");
978 cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | 977 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
979 CAN_ERR_PROT_LOC_ACK_DEL);
980 break; 978 break;
981 case LEC_BIT1_ERROR: 979 case LEC_BIT1_ERROR:
982 netdev_dbg(dev, "bit1 error\n"); 980 netdev_dbg(dev, "bit1 error\n");
@@ -988,8 +986,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
988 break; 986 break;
989 case LEC_CRC_ERROR: 987 case LEC_CRC_ERROR:
990 netdev_dbg(dev, "CRC error\n"); 988 netdev_dbg(dev, "CRC error\n");
991 cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | 989 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
992 CAN_ERR_PROT_LOC_CRC_DEL);
993 break; 990 break;
994 default: 991 default:
995 break; 992 break;
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 70a8cbb29e75..1e37313054f3 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -578,7 +578,7 @@ static int cc770_err(struct net_device *dev, u8 status)
578 cf->data[2] |= CAN_ERR_PROT_BIT0; 578 cf->data[2] |= CAN_ERR_PROT_BIT0;
579 break; 579 break;
580 case STAT_LEC_CRC: 580 case STAT_LEC_CRC:
581 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; 581 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
582 break; 582 break;
583 } 583 }
584 } 584 }
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 868fe945e35a..41c0fc9f3b14 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -535,13 +535,13 @@ static void do_bus_err(struct net_device *dev,
535 if (reg_esr & FLEXCAN_ESR_ACK_ERR) { 535 if (reg_esr & FLEXCAN_ESR_ACK_ERR) {
536 netdev_dbg(dev, "ACK_ERR irq\n"); 536 netdev_dbg(dev, "ACK_ERR irq\n");
537 cf->can_id |= CAN_ERR_ACK; 537 cf->can_id |= CAN_ERR_ACK;
538 cf->data[3] |= CAN_ERR_PROT_LOC_ACK; 538 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
539 tx_errors = 1; 539 tx_errors = 1;
540 } 540 }
541 if (reg_esr & FLEXCAN_ESR_CRC_ERR) { 541 if (reg_esr & FLEXCAN_ESR_CRC_ERR) {
542 netdev_dbg(dev, "CRC_ERR irq\n"); 542 netdev_dbg(dev, "CRC_ERR irq\n");
543 cf->data[2] |= CAN_ERR_PROT_BIT; 543 cf->data[2] |= CAN_ERR_PROT_BIT;
544 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; 544 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
545 rx_errors = 1; 545 rx_errors = 1;
546 } 546 }
547 if (reg_esr & FLEXCAN_ESR_FRM_ERR) { 547 if (reg_esr & FLEXCAN_ESR_FRM_ERR) {
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index c1e85368a198..5d04f5464faf 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1096,7 +1096,6 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
1096 cf->data[2] |= CAN_ERR_PROT_STUFF; 1096 cf->data[2] |= CAN_ERR_PROT_STUFF;
1097 break; 1097 break;
1098 default: 1098 default:
1099 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
1100 cf->data[3] = ecc & ECC_SEG; 1099 cf->data[3] = ecc & ECC_SEG;
1101 break; 1100 break;
1102 } 1101 }
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index ef655177bb5e..39cf911f7a1e 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -487,7 +487,6 @@ static int m_can_handle_lec_err(struct net_device *dev,
487 * type of the last error to occur on the CAN bus 487 * type of the last error to occur on the CAN bus
488 */ 488 */
489 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 489 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
490 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
491 490
492 switch (lec_type) { 491 switch (lec_type) {
493 case LEC_STUFF_ERROR: 492 case LEC_STUFF_ERROR:
@@ -500,8 +499,7 @@ static int m_can_handle_lec_err(struct net_device *dev,
500 break; 499 break;
501 case LEC_ACK_ERROR: 500 case LEC_ACK_ERROR:
502 netdev_dbg(dev, "ack error\n"); 501 netdev_dbg(dev, "ack error\n");
503 cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | 502 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
504 CAN_ERR_PROT_LOC_ACK_DEL);
505 break; 503 break;
506 case LEC_BIT1_ERROR: 504 case LEC_BIT1_ERROR:
507 netdev_dbg(dev, "bit1 error\n"); 505 netdev_dbg(dev, "bit1 error\n");
@@ -513,8 +511,7 @@ static int m_can_handle_lec_err(struct net_device *dev,
513 break; 511 break;
514 case LEC_CRC_ERROR: 512 case LEC_CRC_ERROR:
515 netdev_dbg(dev, "CRC error\n"); 513 netdev_dbg(dev, "CRC error\n");
516 cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | 514 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
517 CAN_ERR_PROT_LOC_CRC_DEL);
518 break; 515 break;
519 default: 516 default:
520 break; 517 break;
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index e187ca783da0..c1317889d3d8 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -559,8 +559,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
559 stats->rx_errors++; 559 stats->rx_errors++;
560 break; 560 break;
561 case PCH_CRC_ERR: 561 case PCH_CRC_ERR:
562 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | 562 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
563 CAN_ERR_PROT_LOC_CRC_DEL;
564 priv->can.can_stats.bus_error++; 563 priv->can.can_stats.bus_error++;
565 stats->rx_errors++; 564 stats->rx_errors++;
566 break; 565 break;
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
index 7bd54191f962..bc46be39549d 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar_can.c
@@ -241,17 +241,16 @@ static void rcar_can_error(struct net_device *ndev)
241 u8 ecsr; 241 u8 ecsr;
242 242
243 netdev_dbg(priv->ndev, "Bus error interrupt:\n"); 243 netdev_dbg(priv->ndev, "Bus error interrupt:\n");
244 if (skb) { 244 if (skb)
245 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; 245 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
246 cf->data[2] = CAN_ERR_PROT_UNSPEC; 246
247 }
248 ecsr = readb(&priv->regs->ecsr); 247 ecsr = readb(&priv->regs->ecsr);
249 if (ecsr & RCAR_CAN_ECSR_ADEF) { 248 if (ecsr & RCAR_CAN_ECSR_ADEF) {
250 netdev_dbg(priv->ndev, "ACK Delimiter Error\n"); 249 netdev_dbg(priv->ndev, "ACK Delimiter Error\n");
251 tx_errors++; 250 tx_errors++;
252 writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr); 251 writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
253 if (skb) 252 if (skb)
254 cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL; 253 cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
255 } 254 }
256 if (ecsr & RCAR_CAN_ECSR_BE0F) { 255 if (ecsr & RCAR_CAN_ECSR_BE0F) {
257 netdev_dbg(priv->ndev, "Bit Error (dominant)\n"); 256 netdev_dbg(priv->ndev, "Bit Error (dominant)\n");
@@ -272,7 +271,7 @@ static void rcar_can_error(struct net_device *ndev)
272 rx_errors++; 271 rx_errors++;
273 writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr); 272 writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
274 if (skb) 273 if (skb)
275 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; 274 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
276 } 275 }
277 if (ecsr & RCAR_CAN_ECSR_AEF) { 276 if (ecsr & RCAR_CAN_ECSR_AEF) {
278 netdev_dbg(priv->ndev, "ACK Error\n"); 277 netdev_dbg(priv->ndev, "ACK Error\n");
@@ -280,7 +279,7 @@ static void rcar_can_error(struct net_device *ndev)
280 writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr); 279 writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
281 if (skb) { 280 if (skb) {
282 cf->can_id |= CAN_ERR_ACK; 281 cf->can_id |= CAN_ERR_ACK;
283 cf->data[3] |= CAN_ERR_PROT_LOC_ACK; 282 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
284 } 283 }
285 } 284 }
286 if (ecsr & RCAR_CAN_ECSR_FEF) { 285 if (ecsr & RCAR_CAN_ECSR_FEF) {
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 7b92e911a616..8dda3b703d39 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -218,6 +218,9 @@ static void sja1000_start(struct net_device *dev)
218 priv->write_reg(priv, SJA1000_RXERR, 0x0); 218 priv->write_reg(priv, SJA1000_RXERR, 0x0);
219 priv->read_reg(priv, SJA1000_ECC); 219 priv->read_reg(priv, SJA1000_ECC);
220 220
221 /* clear interrupt flags */
222 priv->read_reg(priv, SJA1000_IR);
223
221 /* leave reset mode */ 224 /* leave reset mode */
222 set_normal_mode(dev); 225 set_normal_mode(dev);
223} 226}
@@ -446,7 +449,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
446 cf->data[2] |= CAN_ERR_PROT_STUFF; 449 cf->data[2] |= CAN_ERR_PROT_STUFF;
447 break; 450 break;
448 default: 451 default:
449 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
450 cf->data[3] = ecc & ECC_SEG; 452 cf->data[3] = ecc & ECC_SEG;
451 break; 453 break;
452 } 454 }
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index d9a42c646783..68ef0a4cd821 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -575,7 +575,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
575 cf->data[2] |= CAN_ERR_PROT_STUFF; 575 cf->data[2] |= CAN_ERR_PROT_STUFF;
576 break; 576 break;
577 default: 577 default:
578 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
579 cf->data[3] = (ecc & SUN4I_STA_ERR_SEG_CODE) 578 cf->data[3] = (ecc & SUN4I_STA_ERR_SEG_CODE)
580 >> 16; 579 >> 16;
581 break; 580 break;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index cf345cbfe819..680d1ff07a55 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -722,7 +722,6 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
722 if (err_status & HECC_BUS_ERROR) { 722 if (err_status & HECC_BUS_ERROR) {
723 ++priv->can.can_stats.bus_error; 723 ++priv->can.can_stats.bus_error;
724 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; 724 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
725 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
726 if (err_status & HECC_CANES_FE) { 725 if (err_status & HECC_CANES_FE) {
727 hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE); 726 hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE);
728 cf->data[2] |= CAN_ERR_PROT_FORM; 727 cf->data[2] |= CAN_ERR_PROT_FORM;
@@ -737,13 +736,11 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
737 } 736 }
738 if (err_status & HECC_CANES_CRCE) { 737 if (err_status & HECC_CANES_CRCE) {
739 hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE); 738 hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
740 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | 739 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
741 CAN_ERR_PROT_LOC_CRC_DEL;
742 } 740 }
743 if (err_status & HECC_CANES_ACKE) { 741 if (err_status & HECC_CANES_ACKE) {
744 hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE); 742 hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
745 cf->data[3] |= CAN_ERR_PROT_LOC_ACK | 743 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
746 CAN_ERR_PROT_LOC_ACK_DEL;
747 } 744 }
748 } 745 }
749 746
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 2d390384ef3b..fc5b75675cd8 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -377,7 +377,6 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
377 cf->data[2] |= CAN_ERR_PROT_STUFF; 377 cf->data[2] |= CAN_ERR_PROT_STUFF;
378 break; 378 break;
379 default: 379 default:
380 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
381 cf->data[3] = ecc & SJA1000_ECC_SEG; 380 cf->data[3] = ecc & SJA1000_ECC_SEG;
382 break; 381 break;
383 } 382 }
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 0e5a4493ba4f..113e64fcd73b 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -282,7 +282,6 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
282 cf->data[2] |= CAN_ERR_PROT_STUFF; 282 cf->data[2] |= CAN_ERR_PROT_STUFF;
283 break; 283 break;
284 default: 284 default:
285 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
286 cf->data[3] = ecc & SJA1000_ECC_SEG; 285 cf->data[3] = ecc & SJA1000_ECC_SEG;
287 break; 286 break;
288 } 287 }
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 8b17a9065b0b..022bfa13ebfa 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -944,10 +944,9 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
944 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; 944 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
945 945
946 if (es->leaf.error_factor & M16C_EF_ACKE) 946 if (es->leaf.error_factor & M16C_EF_ACKE)
947 cf->data[3] |= (CAN_ERR_PROT_LOC_ACK); 947 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
948 if (es->leaf.error_factor & M16C_EF_CRCE) 948 if (es->leaf.error_factor & M16C_EF_CRCE)
949 cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | 949 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
950 CAN_ERR_PROT_LOC_CRC_DEL);
951 if (es->leaf.error_factor & M16C_EF_FORME) 950 if (es->leaf.error_factor & M16C_EF_FORME)
952 cf->data[2] |= CAN_ERR_PROT_FORM; 951 cf->data[2] |= CAN_ERR_PROT_FORM;
953 if (es->leaf.error_factor & M16C_EF_STFE) 952 if (es->leaf.error_factor & M16C_EF_STFE)
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index de95b1ccba3e..a731720f1d13 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -401,9 +401,7 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
401 tx_errors = 1; 401 tx_errors = 1;
402 break; 402 break;
403 case USB_8DEV_STATUSMSG_CRC: 403 case USB_8DEV_STATUSMSG_CRC:
404 cf->data[2] |= CAN_ERR_PROT_UNSPEC; 404 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
405 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
406 CAN_ERR_PROT_LOC_CRC_DEL;
407 rx_errors = 1; 405 rx_errors = 1;
408 break; 406 break;
409 case USB_8DEV_STATUSMSG_BIT0: 407 case USB_8DEV_STATUSMSG_BIT0:
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index fc55e8e0351d..51670b322409 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -608,17 +608,15 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
608 608
609 /* Check for error interrupt */ 609 /* Check for error interrupt */
610 if (isr & XCAN_IXR_ERROR_MASK) { 610 if (isr & XCAN_IXR_ERROR_MASK) {
611 if (skb) { 611 if (skb)
612 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 612 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
613 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
614 }
615 613
616 /* Check for Ack error interrupt */ 614 /* Check for Ack error interrupt */
617 if (err_status & XCAN_ESR_ACKER_MASK) { 615 if (err_status & XCAN_ESR_ACKER_MASK) {
618 stats->tx_errors++; 616 stats->tx_errors++;
619 if (skb) { 617 if (skb) {
620 cf->can_id |= CAN_ERR_ACK; 618 cf->can_id |= CAN_ERR_ACK;
621 cf->data[3] |= CAN_ERR_PROT_LOC_ACK; 619 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
622 } 620 }
623 } 621 }
624 622
@@ -654,8 +652,7 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
654 stats->rx_errors++; 652 stats->rx_errors++;
655 if (skb) { 653 if (skb) {
656 cf->can_id |= CAN_ERR_PROT; 654 cf->can_id |= CAN_ERR_PROT;
657 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ | 655 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
658 CAN_ERR_PROT_LOC_CRC_DEL;
659 } 656 }
660 } 657 }
661 priv->can.can_stats.bus_error++; 658 priv->can.can_stats.bus_error++;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 955d06b9cdba..31c5e476fd64 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -29,6 +29,7 @@ source "drivers/net/ethernet/apm/Kconfig"
29source "drivers/net/ethernet/apple/Kconfig" 29source "drivers/net/ethernet/apple/Kconfig"
30source "drivers/net/ethernet/arc/Kconfig" 30source "drivers/net/ethernet/arc/Kconfig"
31source "drivers/net/ethernet/atheros/Kconfig" 31source "drivers/net/ethernet/atheros/Kconfig"
32source "drivers/net/ethernet/aurora/Kconfig"
32source "drivers/net/ethernet/cadence/Kconfig" 33source "drivers/net/ethernet/cadence/Kconfig"
33source "drivers/net/ethernet/adi/Kconfig" 34source "drivers/net/ethernet/adi/Kconfig"
34source "drivers/net/ethernet/broadcom/Kconfig" 35source "drivers/net/ethernet/broadcom/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 4a2ee98738f0..071f84eb6f3f 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_NET_XGENE) += apm/
15obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ 15obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
16obj-$(CONFIG_NET_VENDOR_ARC) += arc/ 16obj-$(CONFIG_NET_VENDOR_ARC) += arc/
17obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ 17obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
18obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/
18obj-$(CONFIG_NET_CADENCE) += cadence/ 19obj-$(CONFIG_NET_CADENCE) += cadence/
19obj-$(CONFIG_NET_BFIN) += adi/ 20obj-$(CONFIG_NET_BFIN) += adi/
20obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ 21obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 970781a9e677..f6a7161e3b85 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1849,7 +1849,7 @@ static int xgbe_exit(struct xgbe_prv_data *pdata)
1849 usleep_range(10, 15); 1849 usleep_range(10, 15);
1850 1850
1851 /* Poll Until Poll Condition */ 1851 /* Poll Until Poll Condition */
1852 while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) 1852 while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
1853 usleep_range(500, 600); 1853 usleep_range(500, 600);
1854 1854
1855 if (!count) 1855 if (!count)
@@ -1873,7 +1873,7 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
1873 /* Poll Until Poll Condition */ 1873 /* Poll Until Poll Condition */
1874 for (i = 0; i < pdata->tx_q_count; i++) { 1874 for (i = 0; i < pdata->tx_q_count; i++) {
1875 count = 2000; 1875 count = 2000;
1876 while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i, 1876 while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
1877 MTL_Q_TQOMR, FTQ)) 1877 MTL_Q_TQOMR, FTQ))
1878 usleep_range(500, 600); 1878 usleep_range(500, 600);
1879 1879
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 991412ce6f48..d0ae1a6cc212 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -289,6 +289,7 @@ static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
289 struct sk_buff *skb) 289 struct sk_buff *skb)
290{ 290{
291 struct device *dev = ndev_to_dev(tx_ring->ndev); 291 struct device *dev = ndev_to_dev(tx_ring->ndev);
292 struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
292 struct xgene_enet_raw_desc *raw_desc; 293 struct xgene_enet_raw_desc *raw_desc;
293 __le64 *exp_desc = NULL, *exp_bufs = NULL; 294 __le64 *exp_desc = NULL, *exp_bufs = NULL;
294 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr; 295 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
@@ -419,6 +420,7 @@ out:
419 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | 420 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
420 SET_VAL(USERINFO, tx_ring->tail)); 421 SET_VAL(USERINFO, tx_ring->tail));
421 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; 422 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
423 pdata->tx_level += count;
422 tx_ring->tail = tail; 424 tx_ring->tail = tail;
423 425
424 return count; 426 return count;
@@ -429,14 +431,13 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
429{ 431{
430 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 432 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
431 struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; 433 struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
432 struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring; 434 u32 tx_level = pdata->tx_level;
433 u32 tx_level, cq_level;
434 int count; 435 int count;
435 436
436 tx_level = pdata->ring_ops->len(tx_ring); 437 if (tx_level < pdata->txc_level)
437 cq_level = pdata->ring_ops->len(cp_ring); 438 tx_level += ((typeof(pdata->tx_level))~0U);
438 if (unlikely(tx_level > pdata->tx_qcnt_hi || 439
439 cq_level > pdata->cp_qcnt_hi)) { 440 if ((tx_level - pdata->txc_level) > pdata->tx_qcnt_hi) {
440 netif_stop_queue(ndev); 441 netif_stop_queue(ndev);
441 return NETDEV_TX_BUSY; 442 return NETDEV_TX_BUSY;
442 } 443 }
@@ -450,12 +451,12 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
450 return NETDEV_TX_OK; 451 return NETDEV_TX_OK;
451 } 452 }
452 453
453 pdata->ring_ops->wr_cmd(tx_ring, count);
454 skb_tx_timestamp(skb); 454 skb_tx_timestamp(skb);
455 455
456 pdata->stats.tx_packets++; 456 pdata->stats.tx_packets++;
457 pdata->stats.tx_bytes += skb->len; 457 pdata->stats.tx_bytes += skb->len;
458 458
459 pdata->ring_ops->wr_cmd(tx_ring, count);
459 return NETDEV_TX_OK; 460 return NETDEV_TX_OK;
460} 461}
461 462
@@ -539,10 +540,13 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
539 struct xgene_enet_raw_desc *raw_desc, *exp_desc; 540 struct xgene_enet_raw_desc *raw_desc, *exp_desc;
540 u16 head = ring->head; 541 u16 head = ring->head;
541 u16 slots = ring->slots - 1; 542 u16 slots = ring->slots - 1;
542 int ret, count = 0, processed = 0; 543 int ret, desc_count, count = 0, processed = 0;
544 bool is_completion;
543 545
544 do { 546 do {
545 raw_desc = &ring->raw_desc[head]; 547 raw_desc = &ring->raw_desc[head];
548 desc_count = 0;
549 is_completion = false;
546 exp_desc = NULL; 550 exp_desc = NULL;
547 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) 551 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
548 break; 552 break;
@@ -559,18 +563,24 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
559 } 563 }
560 dma_rmb(); 564 dma_rmb();
561 count++; 565 count++;
566 desc_count++;
562 } 567 }
563 if (is_rx_desc(raw_desc)) 568 if (is_rx_desc(raw_desc)) {
564 ret = xgene_enet_rx_frame(ring, raw_desc); 569 ret = xgene_enet_rx_frame(ring, raw_desc);
565 else 570 } else {
566 ret = xgene_enet_tx_completion(ring, raw_desc); 571 ret = xgene_enet_tx_completion(ring, raw_desc);
572 is_completion = true;
573 }
567 xgene_enet_mark_desc_slot_empty(raw_desc); 574 xgene_enet_mark_desc_slot_empty(raw_desc);
568 if (exp_desc) 575 if (exp_desc)
569 xgene_enet_mark_desc_slot_empty(exp_desc); 576 xgene_enet_mark_desc_slot_empty(exp_desc);
570 577
571 head = (head + 1) & slots; 578 head = (head + 1) & slots;
572 count++; 579 count++;
580 desc_count++;
573 processed++; 581 processed++;
582 if (is_completion)
583 pdata->txc_level += desc_count;
574 584
575 if (ret) 585 if (ret)
576 break; 586 break;
@@ -580,10 +590,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
580 pdata->ring_ops->wr_cmd(ring, -count); 590 pdata->ring_ops->wr_cmd(ring, -count);
581 ring->head = head; 591 ring->head = head;
582 592
583 if (netif_queue_stopped(ring->ndev)) { 593 if (netif_queue_stopped(ring->ndev))
584 if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low) 594 netif_start_queue(ring->ndev);
585 netif_wake_queue(ring->ndev);
586 }
587 } 595 }
588 596
589 return processed; 597 return processed;
@@ -688,10 +696,10 @@ static int xgene_enet_open(struct net_device *ndev)
688 mac_ops->tx_enable(pdata); 696 mac_ops->tx_enable(pdata);
689 mac_ops->rx_enable(pdata); 697 mac_ops->rx_enable(pdata);
690 698
699 xgene_enet_napi_enable(pdata);
691 ret = xgene_enet_register_irq(ndev); 700 ret = xgene_enet_register_irq(ndev);
692 if (ret) 701 if (ret)
693 return ret; 702 return ret;
694 xgene_enet_napi_enable(pdata);
695 703
696 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 704 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
697 phy_start(pdata->phy_dev); 705 phy_start(pdata->phy_dev);
@@ -715,13 +723,13 @@ static int xgene_enet_close(struct net_device *ndev)
715 else 723 else
716 cancel_delayed_work_sync(&pdata->link_work); 724 cancel_delayed_work_sync(&pdata->link_work);
717 725
718 xgene_enet_napi_disable(pdata);
719 xgene_enet_free_irq(ndev);
720 xgene_enet_process_ring(pdata->rx_ring, -1);
721
722 mac_ops->tx_disable(pdata); 726 mac_ops->tx_disable(pdata);
723 mac_ops->rx_disable(pdata); 727 mac_ops->rx_disable(pdata);
724 728
729 xgene_enet_free_irq(ndev);
730 xgene_enet_napi_disable(pdata);
731 xgene_enet_process_ring(pdata->rx_ring, -1);
732
725 return 0; 733 return 0;
726} 734}
727 735
@@ -1033,9 +1041,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
1033 pdata->tx_ring->cp_ring = cp_ring; 1041 pdata->tx_ring->cp_ring = cp_ring;
1034 pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); 1042 pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1035 1043
1036 pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2; 1044 pdata->tx_qcnt_hi = pdata->tx_ring->slots - 128;
1037 pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2;
1038 pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2;
1039 1045
1040 return 0; 1046 return 0;
1041 1047
@@ -1474,15 +1480,15 @@ static int xgene_enet_probe(struct platform_device *pdev)
1474 } 1480 }
1475 ndev->hw_features = ndev->features; 1481 ndev->hw_features = ndev->features;
1476 1482
1477 ret = register_netdev(ndev); 1483 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
1478 if (ret) { 1484 if (ret) {
1479 netdev_err(ndev, "Failed to register netdev\n"); 1485 netdev_err(ndev, "No usable DMA configuration\n");
1480 goto err; 1486 goto err;
1481 } 1487 }
1482 1488
1483 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); 1489 ret = register_netdev(ndev);
1484 if (ret) { 1490 if (ret) {
1485 netdev_err(ndev, "No usable DMA configuration\n"); 1491 netdev_err(ndev, "Failed to register netdev\n");
1486 goto err; 1492 goto err;
1487 } 1493 }
1488 1494
@@ -1490,14 +1496,17 @@ static int xgene_enet_probe(struct platform_device *pdev)
1490 if (ret) 1496 if (ret)
1491 goto err; 1497 goto err;
1492 1498
1493 xgene_enet_napi_add(pdata);
1494 mac_ops = pdata->mac_ops; 1499 mac_ops = pdata->mac_ops;
1495 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 1500 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
1496 ret = xgene_enet_mdio_config(pdata); 1501 ret = xgene_enet_mdio_config(pdata);
1497 else 1502 if (ret)
1503 goto err;
1504 } else {
1498 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); 1505 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
1506 }
1499 1507
1500 return ret; 1508 xgene_enet_napi_add(pdata);
1509 return 0;
1501err: 1510err:
1502 unregister_netdev(ndev); 1511 unregister_netdev(ndev);
1503 free_netdev(ndev); 1512 free_netdev(ndev);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index a6e56b88c0a0..1aa72c787f8d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -155,11 +155,11 @@ struct xgene_enet_pdata {
155 enum xgene_enet_id enet_id; 155 enum xgene_enet_id enet_id;
156 struct xgene_enet_desc_ring *tx_ring; 156 struct xgene_enet_desc_ring *tx_ring;
157 struct xgene_enet_desc_ring *rx_ring; 157 struct xgene_enet_desc_ring *rx_ring;
158 u16 tx_level;
159 u16 txc_level;
158 char *dev_name; 160 char *dev_name;
159 u32 rx_buff_cnt; 161 u32 rx_buff_cnt;
160 u32 tx_qcnt_hi; 162 u32 tx_qcnt_hi;
161 u32 cp_qcnt_hi;
162 u32 cp_qcnt_low;
163 u32 rx_irq; 163 u32 rx_irq;
164 u32 txc_irq; 164 u32 txc_irq;
165 u8 cq_cnt; 165 u8 cq_cnt;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c8af3ce3ea38..bd377a6b067d 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1534,6 +1534,8 @@ static const struct pci_device_id alx_pci_tbl[] = {
1534 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1534 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1535 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200), 1535 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
1536 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1536 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1537 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400),
1538 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1537 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), 1539 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
1538 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1540 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1539 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, 1541 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
index af006b44b2a6..0959e6824cb6 100644
--- a/drivers/net/ethernet/atheros/alx/reg.h
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -37,6 +37,7 @@
37 37
38#define ALX_DEV_ID_AR8161 0x1091 38#define ALX_DEV_ID_AR8161 0x1091
39#define ALX_DEV_ID_E2200 0xe091 39#define ALX_DEV_ID_E2200 0xe091
40#define ALX_DEV_ID_E2400 0xe0a1
40#define ALX_DEV_ID_AR8162 0x1090 41#define ALX_DEV_ID_AR8162 0x1090
41#define ALX_DEV_ID_AR8171 0x10A1 42#define ALX_DEV_ID_AR8171 0x10A1
42#define ALX_DEV_ID_AR8172 0x10A0 43#define ALX_DEV_ID_AR8172 0x10A0
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 2795d6db10e1..8b5988e210d5 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1016,13 +1016,12 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
1016 sizeof(struct atl1c_recv_ret_status) * rx_desc_count + 1016 sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
1017 8 * 4; 1017 8 * 4;
1018 1018
1019 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, 1019 ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size,
1020 &ring_header->dma); 1020 &ring_header->dma, GFP_KERNEL);
1021 if (unlikely(!ring_header->desc)) { 1021 if (unlikely(!ring_header->desc)) {
1022 dev_err(&pdev->dev, "pci_alloc_consistend failed\n"); 1022 dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
1023 goto err_nomem; 1023 goto err_nomem;
1024 } 1024 }
1025 memset(ring_header->desc, 0, ring_header->size);
1026 /* init TPD ring */ 1025 /* init TPD ring */
1027 1026
1028 tpd_ring[0].dma = roundup(ring_header->dma, 8); 1027 tpd_ring[0].dma = roundup(ring_header->dma, 8);
diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig
new file mode 100644
index 000000000000..8ba7f8ff3434
--- /dev/null
+++ b/drivers/net/ethernet/aurora/Kconfig
@@ -0,0 +1,21 @@
1config NET_VENDOR_AURORA
2 bool "Aurora VLSI devices"
3 help
4 If you have a network (Ethernet) device belonging to this class,
5 say Y.
6
7 Note that the answer to this question doesn't directly affect the
8 kernel: saying N will just cause the configurator to skip all
9 questions about Aurora devices. If you say Y, you will be asked
10 for your specific device in the following questions.
11
12if NET_VENDOR_AURORA
13
14config AURORA_NB8800
15 tristate "Aurora AU-NB8800 support"
16 depends on HAS_DMA
17 select PHYLIB
18 help
19 Support for the AU-NB8800 gigabit Ethernet controller.
20
21endif
diff --git a/drivers/net/ethernet/aurora/Makefile b/drivers/net/ethernet/aurora/Makefile
new file mode 100644
index 000000000000..6cb528a2fc26
--- /dev/null
+++ b/drivers/net/ethernet/aurora/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_AURORA_NB8800) += nb8800.o
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
new file mode 100644
index 000000000000..ecc4a334c507
--- /dev/null
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -0,0 +1,1552 @@
1/*
2 * Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
3 *
4 * Mostly rewritten, based on driver from Sigma Designs. Original
5 * copyright notice below.
6 *
7 *
8 * Driver for tangox SMP864x/SMP865x/SMP867x/SMP868x builtin Ethernet Mac.
9 *
10 * Copyright (C) 2005 Maxime Bizon <mbizon@freebox.fr>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 */
22
23#include <linux/module.h>
24#include <linux/etherdevice.h>
25#include <linux/delay.h>
26#include <linux/ethtool.h>
27#include <linux/interrupt.h>
28#include <linux/platform_device.h>
29#include <linux/of_device.h>
30#include <linux/of_mdio.h>
31#include <linux/of_net.h>
32#include <linux/dma-mapping.h>
33#include <linux/phy.h>
34#include <linux/cache.h>
35#include <linux/jiffies.h>
36#include <linux/io.h>
37#include <linux/iopoll.h>
38#include <asm/barrier.h>
39
40#include "nb8800.h"
41
42static void nb8800_tx_done(struct net_device *dev);
43static int nb8800_dma_stop(struct net_device *dev);
44
45static inline u8 nb8800_readb(struct nb8800_priv *priv, int reg)
46{
47 return readb_relaxed(priv->base + reg);
48}
49
50static inline u32 nb8800_readl(struct nb8800_priv *priv, int reg)
51{
52 return readl_relaxed(priv->base + reg);
53}
54
55static inline void nb8800_writeb(struct nb8800_priv *priv, int reg, u8 val)
56{
57 writeb_relaxed(val, priv->base + reg);
58}
59
60static inline void nb8800_writew(struct nb8800_priv *priv, int reg, u16 val)
61{
62 writew_relaxed(val, priv->base + reg);
63}
64
65static inline void nb8800_writel(struct nb8800_priv *priv, int reg, u32 val)
66{
67 writel_relaxed(val, priv->base + reg);
68}
69
70static inline void nb8800_maskb(struct nb8800_priv *priv, int reg,
71 u32 mask, u32 val)
72{
73 u32 old = nb8800_readb(priv, reg);
74 u32 new = (old & ~mask) | (val & mask);
75
76 if (new != old)
77 nb8800_writeb(priv, reg, new);
78}
79
80static inline void nb8800_maskl(struct nb8800_priv *priv, int reg,
81 u32 mask, u32 val)
82{
83 u32 old = nb8800_readl(priv, reg);
84 u32 new = (old & ~mask) | (val & mask);
85
86 if (new != old)
87 nb8800_writel(priv, reg, new);
88}
89
90static inline void nb8800_modb(struct nb8800_priv *priv, int reg, u8 bits,
91 bool set)
92{
93 nb8800_maskb(priv, reg, bits, set ? bits : 0);
94}
95
96static inline void nb8800_setb(struct nb8800_priv *priv, int reg, u8 bits)
97{
98 nb8800_maskb(priv, reg, bits, bits);
99}
100
101static inline void nb8800_clearb(struct nb8800_priv *priv, int reg, u8 bits)
102{
103 nb8800_maskb(priv, reg, bits, 0);
104}
105
106static inline void nb8800_modl(struct nb8800_priv *priv, int reg, u32 bits,
107 bool set)
108{
109 nb8800_maskl(priv, reg, bits, set ? bits : 0);
110}
111
112static inline void nb8800_setl(struct nb8800_priv *priv, int reg, u32 bits)
113{
114 nb8800_maskl(priv, reg, bits, bits);
115}
116
117static inline void nb8800_clearl(struct nb8800_priv *priv, int reg, u32 bits)
118{
119 nb8800_maskl(priv, reg, bits, 0);
120}
121
122static int nb8800_mdio_wait(struct mii_bus *bus)
123{
124 struct nb8800_priv *priv = bus->priv;
125 u32 val;
126
127 return readl_poll_timeout_atomic(priv->base + NB8800_MDIO_CMD,
128 val, !(val & MDIO_CMD_GO), 1, 1000);
129}
130
131static int nb8800_mdio_cmd(struct mii_bus *bus, u32 cmd)
132{
133 struct nb8800_priv *priv = bus->priv;
134 int err;
135
136 err = nb8800_mdio_wait(bus);
137 if (err)
138 return err;
139
140 nb8800_writel(priv, NB8800_MDIO_CMD, cmd);
141 udelay(10);
142 nb8800_writel(priv, NB8800_MDIO_CMD, cmd | MDIO_CMD_GO);
143
144 return nb8800_mdio_wait(bus);
145}
146
147static int nb8800_mdio_read(struct mii_bus *bus, int phy_id, int reg)
148{
149 struct nb8800_priv *priv = bus->priv;
150 u32 val;
151 int err;
152
153 err = nb8800_mdio_cmd(bus, MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg));
154 if (err)
155 return err;
156
157 val = nb8800_readl(priv, NB8800_MDIO_STS);
158 if (val & MDIO_STS_ERR)
159 return 0xffff;
160
161 return val & 0xffff;
162}
163
164static int nb8800_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
165{
166 u32 cmd = MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg) |
167 MDIO_CMD_DATA(val) | MDIO_CMD_WR;
168
169 return nb8800_mdio_cmd(bus, cmd);
170}
171
172static void nb8800_mac_tx(struct net_device *dev, bool enable)
173{
174 struct nb8800_priv *priv = netdev_priv(dev);
175
176 while (nb8800_readl(priv, NB8800_TXC_CR) & TCR_EN)
177 cpu_relax();
178
179 nb8800_modb(priv, NB8800_TX_CTL1, TX_EN, enable);
180}
181
182static void nb8800_mac_rx(struct net_device *dev, bool enable)
183{
184 nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_EN, enable);
185}
186
187static void nb8800_mac_af(struct net_device *dev, bool enable)
188{
189 nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_AF_EN, enable);
190}
191
192static void nb8800_start_rx(struct net_device *dev)
193{
194 nb8800_setl(netdev_priv(dev), NB8800_RXC_CR, RCR_EN);
195}
196
197static int nb8800_alloc_rx(struct net_device *dev, unsigned int i, bool napi)
198{
199 struct nb8800_priv *priv = netdev_priv(dev);
200 struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
201 struct nb8800_rx_buf *rxb = &priv->rx_bufs[i];
202 int size = L1_CACHE_ALIGN(RX_BUF_SIZE);
203 dma_addr_t dma_addr;
204 struct page *page;
205 unsigned long offset;
206 void *data;
207
208 data = napi ? napi_alloc_frag(size) : netdev_alloc_frag(size);
209 if (!data)
210 return -ENOMEM;
211
212 page = virt_to_head_page(data);
213 offset = data - page_address(page);
214
215 dma_addr = dma_map_page(&dev->dev, page, offset, RX_BUF_SIZE,
216 DMA_FROM_DEVICE);
217
218 if (dma_mapping_error(&dev->dev, dma_addr)) {
219 skb_free_frag(data);
220 return -ENOMEM;
221 }
222
223 rxb->page = page;
224 rxb->offset = offset;
225 rxd->desc.s_addr = dma_addr;
226
227 return 0;
228}
229
230static void nb8800_receive(struct net_device *dev, unsigned int i,
231 unsigned int len)
232{
233 struct nb8800_priv *priv = netdev_priv(dev);
234 struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
235 struct page *page = priv->rx_bufs[i].page;
236 int offset = priv->rx_bufs[i].offset;
237 void *data = page_address(page) + offset;
238 dma_addr_t dma = rxd->desc.s_addr;
239 struct sk_buff *skb;
240 unsigned int size;
241 int err;
242
243 size = len <= RX_COPYBREAK ? len : RX_COPYHDR;
244
245 skb = napi_alloc_skb(&priv->napi, size);
246 if (!skb) {
247 netdev_err(dev, "rx skb allocation failed\n");
248 dev->stats.rx_dropped++;
249 return;
250 }
251
252 if (len <= RX_COPYBREAK) {
253 dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE);
254 memcpy(skb_put(skb, len), data, len);
255 dma_sync_single_for_device(&dev->dev, dma, len,
256 DMA_FROM_DEVICE);
257 } else {
258 err = nb8800_alloc_rx(dev, i, true);
259 if (err) {
260 netdev_err(dev, "rx buffer allocation failed\n");
261 dev->stats.rx_dropped++;
262 return;
263 }
264
265 dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE);
266 memcpy(skb_put(skb, RX_COPYHDR), data, RX_COPYHDR);
267 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
268 offset + RX_COPYHDR, len - RX_COPYHDR,
269 RX_BUF_SIZE);
270 }
271
272 skb->protocol = eth_type_trans(skb, dev);
273 napi_gro_receive(&priv->napi, skb);
274}
275
276static void nb8800_rx_error(struct net_device *dev, u32 report)
277{
278 if (report & RX_LENGTH_ERR)
279 dev->stats.rx_length_errors++;
280
281 if (report & RX_FCS_ERR)
282 dev->stats.rx_crc_errors++;
283
284 if (report & RX_FIFO_OVERRUN)
285 dev->stats.rx_fifo_errors++;
286
287 if (report & RX_ALIGNMENT_ERROR)
288 dev->stats.rx_frame_errors++;
289
290 dev->stats.rx_errors++;
291}
292
293static int nb8800_poll(struct napi_struct *napi, int budget)
294{
295 struct net_device *dev = napi->dev;
296 struct nb8800_priv *priv = netdev_priv(dev);
297 struct nb8800_rx_desc *rxd;
298 unsigned int last = priv->rx_eoc;
299 unsigned int next;
300 int work = 0;
301
302 nb8800_tx_done(dev);
303
304again:
305 while (work < budget) {
306 struct nb8800_rx_buf *rxb;
307 unsigned int len;
308
309 next = (last + 1) % RX_DESC_COUNT;
310
311 rxb = &priv->rx_bufs[next];
312 rxd = &priv->rx_descs[next];
313
314 if (!rxd->report)
315 break;
316
317 len = RX_BYTES_TRANSFERRED(rxd->report);
318
319 if (IS_RX_ERROR(rxd->report))
320 nb8800_rx_error(dev, rxd->report);
321 else
322 nb8800_receive(dev, next, len);
323
324 dev->stats.rx_packets++;
325 dev->stats.rx_bytes += len;
326
327 if (rxd->report & RX_MULTICAST_PKT)
328 dev->stats.multicast++;
329
330 rxd->report = 0;
331 last = next;
332 work++;
333 }
334
335 if (work) {
336 priv->rx_descs[last].desc.config |= DESC_EOC;
337 wmb(); /* ensure new EOC is written before clearing old */
338 priv->rx_descs[priv->rx_eoc].desc.config &= ~DESC_EOC;
339 priv->rx_eoc = last;
340 nb8800_start_rx(dev);
341 }
342
343 if (work < budget) {
344 nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
345
346 /* If a packet arrived after we last checked but
347 * before writing RX_ITR, the interrupt will be
348 * delayed, so we retrieve it now.
349 */
350 if (priv->rx_descs[next].report)
351 goto again;
352
353 napi_complete_done(napi, work);
354 }
355
356 return work;
357}
358
359static void __nb8800_tx_dma_start(struct net_device *dev)
360{
361 struct nb8800_priv *priv = netdev_priv(dev);
362 struct nb8800_tx_buf *txb;
363 u32 txc_cr;
364
365 txb = &priv->tx_bufs[priv->tx_queue];
366 if (!txb->ready)
367 return;
368
369 txc_cr = nb8800_readl(priv, NB8800_TXC_CR);
370 if (txc_cr & TCR_EN)
371 return;
372
373 nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
374 wmb(); /* ensure desc addr is written before starting DMA */
375 nb8800_writel(priv, NB8800_TXC_CR, txc_cr | TCR_EN);
376
377 priv->tx_queue = (priv->tx_queue + txb->chain_len) % TX_DESC_COUNT;
378}
379
380static void nb8800_tx_dma_start(struct net_device *dev)
381{
382 struct nb8800_priv *priv = netdev_priv(dev);
383
384 spin_lock_irq(&priv->tx_lock);
385 __nb8800_tx_dma_start(dev);
386 spin_unlock_irq(&priv->tx_lock);
387}
388
389static void nb8800_tx_dma_start_irq(struct net_device *dev)
390{
391 struct nb8800_priv *priv = netdev_priv(dev);
392
393 spin_lock(&priv->tx_lock);
394 __nb8800_tx_dma_start(dev);
395 spin_unlock(&priv->tx_lock);
396}
397
398static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
399{
400 struct nb8800_priv *priv = netdev_priv(dev);
401 struct nb8800_tx_desc *txd;
402 struct nb8800_tx_buf *txb;
403 struct nb8800_dma_desc *desc;
404 dma_addr_t dma_addr;
405 unsigned int dma_len;
406 unsigned int align;
407 unsigned int next;
408
409 if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) {
410 netif_stop_queue(dev);
411 return NETDEV_TX_BUSY;
412 }
413
414 align = (8 - (uintptr_t)skb->data) & 7;
415
416 dma_len = skb->len - align;
417 dma_addr = dma_map_single(&dev->dev, skb->data + align,
418 dma_len, DMA_TO_DEVICE);
419
420 if (dma_mapping_error(&dev->dev, dma_addr)) {
421 netdev_err(dev, "tx dma mapping error\n");
422 kfree_skb(skb);
423 dev->stats.tx_dropped++;
424 return NETDEV_TX_OK;
425 }
426
427 if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) {
428 netif_stop_queue(dev);
429 skb->xmit_more = 0;
430 }
431
432 next = priv->tx_next;
433 txb = &priv->tx_bufs[next];
434 txd = &priv->tx_descs[next];
435 desc = &txd->desc[0];
436
437 next = (next + 1) % TX_DESC_COUNT;
438
439 if (align) {
440 memcpy(txd->buf, skb->data, align);
441
442 desc->s_addr =
443 txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
444 desc->n_addr = txb->dma_desc + sizeof(txd->desc[0]);
445 desc->config = DESC_BTS(2) | DESC_DS | align;
446
447 desc++;
448 }
449
450 desc->s_addr = dma_addr;
451 desc->n_addr = priv->tx_bufs[next].dma_desc;
452 desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len;
453
454 if (!skb->xmit_more)
455 desc->config |= DESC_EOC;
456
457 txb->skb = skb;
458 txb->dma_addr = dma_addr;
459 txb->dma_len = dma_len;
460
461 if (!priv->tx_chain) {
462 txb->chain_len = 1;
463 priv->tx_chain = txb;
464 } else {
465 priv->tx_chain->chain_len++;
466 }
467
468 netdev_sent_queue(dev, skb->len);
469
470 priv->tx_next = next;
471
472 if (!skb->xmit_more) {
473 smp_wmb();
474 priv->tx_chain->ready = true;
475 priv->tx_chain = NULL;
476 nb8800_tx_dma_start(dev);
477 }
478
479 return NETDEV_TX_OK;
480}
481
482static void nb8800_tx_error(struct net_device *dev, u32 report)
483{
484 if (report & TX_LATE_COLLISION)
485 dev->stats.collisions++;
486
487 if (report & TX_PACKET_DROPPED)
488 dev->stats.tx_dropped++;
489
490 if (report & TX_FIFO_UNDERRUN)
491 dev->stats.tx_fifo_errors++;
492
493 dev->stats.tx_errors++;
494}
495
496static void nb8800_tx_done(struct net_device *dev)
497{
498 struct nb8800_priv *priv = netdev_priv(dev);
499 unsigned int limit = priv->tx_next;
500 unsigned int done = priv->tx_done;
501 unsigned int packets = 0;
502 unsigned int len = 0;
503
504 while (done != limit) {
505 struct nb8800_tx_desc *txd = &priv->tx_descs[done];
506 struct nb8800_tx_buf *txb = &priv->tx_bufs[done];
507 struct sk_buff *skb;
508
509 if (!txd->report)
510 break;
511
512 skb = txb->skb;
513 len += skb->len;
514
515 dma_unmap_single(&dev->dev, txb->dma_addr, txb->dma_len,
516 DMA_TO_DEVICE);
517
518 if (IS_TX_ERROR(txd->report)) {
519 nb8800_tx_error(dev, txd->report);
520 kfree_skb(skb);
521 } else {
522 consume_skb(skb);
523 }
524
525 dev->stats.tx_packets++;
526 dev->stats.tx_bytes += TX_BYTES_TRANSFERRED(txd->report);
527 dev->stats.collisions += TX_EARLY_COLLISIONS(txd->report);
528
529 txb->skb = NULL;
530 txb->ready = false;
531 txd->report = 0;
532
533 done = (done + 1) % TX_DESC_COUNT;
534 packets++;
535 }
536
537 if (packets) {
538 smp_mb__before_atomic();
539 atomic_add(packets, &priv->tx_free);
540 netdev_completed_queue(dev, packets, len);
541 netif_wake_queue(dev);
542 priv->tx_done = done;
543 }
544}
545
546static irqreturn_t nb8800_irq(int irq, void *dev_id)
547{
548 struct net_device *dev = dev_id;
549 struct nb8800_priv *priv = netdev_priv(dev);
550 irqreturn_t ret = IRQ_NONE;
551 u32 val;
552
553 /* tx interrupt */
554 val = nb8800_readl(priv, NB8800_TXC_SR);
555 if (val) {
556 nb8800_writel(priv, NB8800_TXC_SR, val);
557
558 if (val & TSR_DI)
559 nb8800_tx_dma_start_irq(dev);
560
561 if (val & TSR_TI)
562 napi_schedule_irqoff(&priv->napi);
563
564 if (unlikely(val & TSR_DE))
565 netdev_err(dev, "TX DMA error\n");
566
567 /* should never happen with automatic status retrieval */
568 if (unlikely(val & TSR_TO))
569 netdev_err(dev, "TX Status FIFO overflow\n");
570
571 ret = IRQ_HANDLED;
572 }
573
574 /* rx interrupt */
575 val = nb8800_readl(priv, NB8800_RXC_SR);
576 if (val) {
577 nb8800_writel(priv, NB8800_RXC_SR, val);
578
579 if (likely(val & (RSR_RI | RSR_DI))) {
580 nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_poll);
581 napi_schedule_irqoff(&priv->napi);
582 }
583
584 if (unlikely(val & RSR_DE))
585 netdev_err(dev, "RX DMA error\n");
586
587 /* should never happen with automatic status retrieval */
588 if (unlikely(val & RSR_RO))
589 netdev_err(dev, "RX Status FIFO overflow\n");
590
591 ret = IRQ_HANDLED;
592 }
593
594 return ret;
595}
596
597static void nb8800_mac_config(struct net_device *dev)
598{
599 struct nb8800_priv *priv = netdev_priv(dev);
600 bool gigabit = priv->speed == SPEED_1000;
601 u32 mac_mode_mask = RGMII_MODE | HALF_DUPLEX | GMAC_MODE;
602 u32 mac_mode = 0;
603 u32 slot_time;
604 u32 phy_clk;
605 u32 ict;
606
607 if (!priv->duplex)
608 mac_mode |= HALF_DUPLEX;
609
610 if (gigabit) {
611 if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
612 mac_mode |= RGMII_MODE;
613
614 mac_mode |= GMAC_MODE;
615 phy_clk = 125000000;
616
617 /* Should be 512 but register is only 8 bits */
618 slot_time = 255;
619 } else {
620 phy_clk = 25000000;
621 slot_time = 128;
622 }
623
624 ict = DIV_ROUND_UP(phy_clk, clk_get_rate(priv->clk));
625
626 nb8800_writeb(priv, NB8800_IC_THRESHOLD, ict);
627 nb8800_writeb(priv, NB8800_SLOT_TIME, slot_time);
628 nb8800_maskb(priv, NB8800_MAC_MODE, mac_mode_mask, mac_mode);
629}
630
631static void nb8800_pause_config(struct net_device *dev)
632{
633 struct nb8800_priv *priv = netdev_priv(dev);
634 struct phy_device *phydev = priv->phydev;
635 u32 rxcr;
636
637 if (priv->pause_aneg) {
638 if (!phydev || !phydev->link)
639 return;
640
641 priv->pause_rx = phydev->pause;
642 priv->pause_tx = phydev->pause ^ phydev->asym_pause;
643 }
644
645 nb8800_modb(priv, NB8800_RX_CTL, RX_PAUSE_EN, priv->pause_rx);
646
647 rxcr = nb8800_readl(priv, NB8800_RXC_CR);
648 if (!!(rxcr & RCR_FL) == priv->pause_tx)
649 return;
650
651 if (netif_running(dev)) {
652 napi_disable(&priv->napi);
653 netif_tx_lock_bh(dev);
654 nb8800_dma_stop(dev);
655 nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
656 nb8800_start_rx(dev);
657 netif_tx_unlock_bh(dev);
658 napi_enable(&priv->napi);
659 } else {
660 nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
661 }
662}
663
664static void nb8800_link_reconfigure(struct net_device *dev)
665{
666 struct nb8800_priv *priv = netdev_priv(dev);
667 struct phy_device *phydev = priv->phydev;
668 int change = 0;
669
670 if (phydev->link) {
671 if (phydev->speed != priv->speed) {
672 priv->speed = phydev->speed;
673 change = 1;
674 }
675
676 if (phydev->duplex != priv->duplex) {
677 priv->duplex = phydev->duplex;
678 change = 1;
679 }
680
681 if (change)
682 nb8800_mac_config(dev);
683
684 nb8800_pause_config(dev);
685 }
686
687 if (phydev->link != priv->link) {
688 priv->link = phydev->link;
689 change = 1;
690 }
691
692 if (change)
693 phy_print_status(priv->phydev);
694}
695
696static void nb8800_update_mac_addr(struct net_device *dev)
697{
698 struct nb8800_priv *priv = netdev_priv(dev);
699 int i;
700
701 for (i = 0; i < ETH_ALEN; i++)
702 nb8800_writeb(priv, NB8800_SRC_ADDR(i), dev->dev_addr[i]);
703
704 for (i = 0; i < ETH_ALEN; i++)
705 nb8800_writeb(priv, NB8800_UC_ADDR(i), dev->dev_addr[i]);
706}
707
708static int nb8800_set_mac_address(struct net_device *dev, void *addr)
709{
710 struct sockaddr *sock = addr;
711
712 if (netif_running(dev))
713 return -EBUSY;
714
715 ether_addr_copy(dev->dev_addr, sock->sa_data);
716 nb8800_update_mac_addr(dev);
717
718 return 0;
719}
720
721static void nb8800_mc_init(struct net_device *dev, int val)
722{
723 struct nb8800_priv *priv = netdev_priv(dev);
724
725 nb8800_writeb(priv, NB8800_MC_INIT, val);
726 readb_poll_timeout_atomic(priv->base + NB8800_MC_INIT, val, !val,
727 1, 1000);
728}
729
730static void nb8800_set_rx_mode(struct net_device *dev)
731{
732 struct nb8800_priv *priv = netdev_priv(dev);
733 struct netdev_hw_addr *ha;
734 int i;
735
736 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
737 nb8800_mac_af(dev, false);
738 return;
739 }
740
741 nb8800_mac_af(dev, true);
742 nb8800_mc_init(dev, 0);
743
744 netdev_for_each_mc_addr(ha, dev) {
745 for (i = 0; i < ETH_ALEN; i++)
746 nb8800_writeb(priv, NB8800_MC_ADDR(i), ha->addr[i]);
747
748 nb8800_mc_init(dev, 0xff);
749 }
750}
751
752#define RX_DESC_SIZE (RX_DESC_COUNT * sizeof(struct nb8800_rx_desc))
753#define TX_DESC_SIZE (TX_DESC_COUNT * sizeof(struct nb8800_tx_desc))
754
755static void nb8800_dma_free(struct net_device *dev)
756{
757 struct nb8800_priv *priv = netdev_priv(dev);
758 unsigned int i;
759
760 if (priv->rx_bufs) {
761 for (i = 0; i < RX_DESC_COUNT; i++)
762 if (priv->rx_bufs[i].page)
763 put_page(priv->rx_bufs[i].page);
764
765 kfree(priv->rx_bufs);
766 priv->rx_bufs = NULL;
767 }
768
769 if (priv->tx_bufs) {
770 for (i = 0; i < TX_DESC_COUNT; i++)
771 kfree_skb(priv->tx_bufs[i].skb);
772
773 kfree(priv->tx_bufs);
774 priv->tx_bufs = NULL;
775 }
776
777 if (priv->rx_descs) {
778 dma_free_coherent(dev->dev.parent, RX_DESC_SIZE, priv->rx_descs,
779 priv->rx_desc_dma);
780 priv->rx_descs = NULL;
781 }
782
783 if (priv->tx_descs) {
784 dma_free_coherent(dev->dev.parent, TX_DESC_SIZE, priv->tx_descs,
785 priv->tx_desc_dma);
786 priv->tx_descs = NULL;
787 }
788}
789
790static void nb8800_dma_reset(struct net_device *dev)
791{
792 struct nb8800_priv *priv = netdev_priv(dev);
793 struct nb8800_rx_desc *rxd;
794 struct nb8800_tx_desc *txd;
795 unsigned int i;
796
797 for (i = 0; i < RX_DESC_COUNT; i++) {
798 dma_addr_t rx_dma = priv->rx_desc_dma + i * sizeof(*rxd);
799
800 rxd = &priv->rx_descs[i];
801 rxd->desc.n_addr = rx_dma + sizeof(*rxd);
802 rxd->desc.r_addr =
803 rx_dma + offsetof(struct nb8800_rx_desc, report);
804 rxd->desc.config = priv->rx_dma_config;
805 rxd->report = 0;
806 }
807
808 rxd->desc.n_addr = priv->rx_desc_dma;
809 rxd->desc.config |= DESC_EOC;
810
811 priv->rx_eoc = RX_DESC_COUNT - 1;
812
813 for (i = 0; i < TX_DESC_COUNT; i++) {
814 struct nb8800_tx_buf *txb = &priv->tx_bufs[i];
815 dma_addr_t r_dma = txb->dma_desc +
816 offsetof(struct nb8800_tx_desc, report);
817
818 txd = &priv->tx_descs[i];
819 txd->desc[0].r_addr = r_dma;
820 txd->desc[1].r_addr = r_dma;
821 txd->report = 0;
822 }
823
824 priv->tx_next = 0;
825 priv->tx_queue = 0;
826 priv->tx_done = 0;
827 atomic_set(&priv->tx_free, TX_DESC_COUNT);
828
829 nb8800_writel(priv, NB8800_RX_DESC_ADDR, priv->rx_desc_dma);
830
831 wmb(); /* ensure all setup is written before starting */
832}
833
834static int nb8800_dma_init(struct net_device *dev)
835{
836 struct nb8800_priv *priv = netdev_priv(dev);
837 unsigned int n_rx = RX_DESC_COUNT;
838 unsigned int n_tx = TX_DESC_COUNT;
839 unsigned int i;
840 int err;
841
842 priv->rx_descs = dma_alloc_coherent(dev->dev.parent, RX_DESC_SIZE,
843 &priv->rx_desc_dma, GFP_KERNEL);
844 if (!priv->rx_descs)
845 goto err_out;
846
847 priv->rx_bufs = kcalloc(n_rx, sizeof(*priv->rx_bufs), GFP_KERNEL);
848 if (!priv->rx_bufs)
849 goto err_out;
850
851 for (i = 0; i < n_rx; i++) {
852 err = nb8800_alloc_rx(dev, i, false);
853 if (err)
854 goto err_out;
855 }
856
857 priv->tx_descs = dma_alloc_coherent(dev->dev.parent, TX_DESC_SIZE,
858 &priv->tx_desc_dma, GFP_KERNEL);
859 if (!priv->tx_descs)
860 goto err_out;
861
862 priv->tx_bufs = kcalloc(n_tx, sizeof(*priv->tx_bufs), GFP_KERNEL);
863 if (!priv->tx_bufs)
864 goto err_out;
865
866 for (i = 0; i < n_tx; i++)
867 priv->tx_bufs[i].dma_desc =
868 priv->tx_desc_dma + i * sizeof(struct nb8800_tx_desc);
869
870 nb8800_dma_reset(dev);
871
872 return 0;
873
874err_out:
875 nb8800_dma_free(dev);
876
877 return -ENOMEM;
878}
879
880static int nb8800_dma_stop(struct net_device *dev)
881{
882 struct nb8800_priv *priv = netdev_priv(dev);
883 struct nb8800_tx_buf *txb = &priv->tx_bufs[0];
884 struct nb8800_tx_desc *txd = &priv->tx_descs[0];
885 int retry = 5;
886 u32 txcr;
887 u32 rxcr;
888 int err;
889 unsigned int i;
890
891 /* wait for tx to finish */
892 err = readl_poll_timeout_atomic(priv->base + NB8800_TXC_CR, txcr,
893 !(txcr & TCR_EN) &&
894 priv->tx_done == priv->tx_next,
895 1000, 1000000);
896 if (err)
897 return err;
898
899 /* The rx DMA only stops if it reaches the end of chain.
900 * To make this happen, we set the EOC flag on all rx
901 * descriptors, put the device in loopback mode, and send
902 * a few dummy frames. The interrupt handler will ignore
903 * these since NAPI is disabled and no real frames are in
904 * the tx queue.
905 */
906
907 for (i = 0; i < RX_DESC_COUNT; i++)
908 priv->rx_descs[i].desc.config |= DESC_EOC;
909
910 txd->desc[0].s_addr =
911 txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
912 txd->desc[0].config = DESC_BTS(2) | DESC_DS | DESC_EOF | DESC_EOC | 8;
913 memset(txd->buf, 0, sizeof(txd->buf));
914
915 nb8800_mac_af(dev, false);
916 nb8800_setb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
917
918 do {
919 nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
920 wmb();
921 nb8800_writel(priv, NB8800_TXC_CR, txcr | TCR_EN);
922
923 err = readl_poll_timeout_atomic(priv->base + NB8800_RXC_CR,
924 rxcr, !(rxcr & RCR_EN),
925 1000, 100000);
926 } while (err && --retry);
927
928 nb8800_mac_af(dev, true);
929 nb8800_clearb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
930 nb8800_dma_reset(dev);
931
932 return retry ? 0 : -ETIMEDOUT;
933}
934
935static void nb8800_pause_adv(struct net_device *dev)
936{
937 struct nb8800_priv *priv = netdev_priv(dev);
938 u32 adv = 0;
939
940 if (!priv->phydev)
941 return;
942
943 if (priv->pause_rx)
944 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
945 if (priv->pause_tx)
946 adv ^= ADVERTISED_Asym_Pause;
947
948 priv->phydev->supported |= adv;
949 priv->phydev->advertising |= adv;
950}
951
952static int nb8800_open(struct net_device *dev)
953{
954 struct nb8800_priv *priv = netdev_priv(dev);
955 int err;
956
957 /* clear any pending interrupts */
958 nb8800_writel(priv, NB8800_RXC_SR, 0xf);
959 nb8800_writel(priv, NB8800_TXC_SR, 0xf);
960
961 err = nb8800_dma_init(dev);
962 if (err)
963 return err;
964
965 err = request_irq(dev->irq, nb8800_irq, 0, dev_name(&dev->dev), dev);
966 if (err)
967 goto err_free_dma;
968
969 nb8800_mac_rx(dev, true);
970 nb8800_mac_tx(dev, true);
971
972 priv->phydev = of_phy_connect(dev, priv->phy_node,
973 nb8800_link_reconfigure, 0,
974 priv->phy_mode);
975 if (!priv->phydev)
976 goto err_free_irq;
977
978 nb8800_pause_adv(dev);
979
980 netdev_reset_queue(dev);
981 napi_enable(&priv->napi);
982 netif_start_queue(dev);
983
984 nb8800_start_rx(dev);
985 phy_start(priv->phydev);
986
987 return 0;
988
989err_free_irq:
990 free_irq(dev->irq, dev);
991err_free_dma:
992 nb8800_dma_free(dev);
993
994 return err;
995}
996
997static int nb8800_stop(struct net_device *dev)
998{
999 struct nb8800_priv *priv = netdev_priv(dev);
1000
1001 phy_stop(priv->phydev);
1002
1003 netif_stop_queue(dev);
1004 napi_disable(&priv->napi);
1005
1006 nb8800_dma_stop(dev);
1007 nb8800_mac_rx(dev, false);
1008 nb8800_mac_tx(dev, false);
1009
1010 phy_disconnect(priv->phydev);
1011 priv->phydev = NULL;
1012
1013 free_irq(dev->irq, dev);
1014
1015 nb8800_dma_free(dev);
1016
1017 return 0;
1018}
1019
1020static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1021{
1022 struct nb8800_priv *priv = netdev_priv(dev);
1023
1024 return phy_mii_ioctl(priv->phydev, rq, cmd);
1025}
1026
1027static const struct net_device_ops nb8800_netdev_ops = {
1028 .ndo_open = nb8800_open,
1029 .ndo_stop = nb8800_stop,
1030 .ndo_start_xmit = nb8800_xmit,
1031 .ndo_set_mac_address = nb8800_set_mac_address,
1032 .ndo_set_rx_mode = nb8800_set_rx_mode,
1033 .ndo_do_ioctl = nb8800_ioctl,
1034 .ndo_change_mtu = eth_change_mtu,
1035 .ndo_validate_addr = eth_validate_addr,
1036};
1037
1038static int nb8800_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1039{
1040 struct nb8800_priv *priv = netdev_priv(dev);
1041
1042 if (!priv->phydev)
1043 return -ENODEV;
1044
1045 return phy_ethtool_gset(priv->phydev, cmd);
1046}
1047
1048static int nb8800_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1049{
1050 struct nb8800_priv *priv = netdev_priv(dev);
1051
1052 if (!priv->phydev)
1053 return -ENODEV;
1054
1055 return phy_ethtool_sset(priv->phydev, cmd);
1056}
1057
1058static int nb8800_nway_reset(struct net_device *dev)
1059{
1060 struct nb8800_priv *priv = netdev_priv(dev);
1061
1062 if (!priv->phydev)
1063 return -ENODEV;
1064
1065 return genphy_restart_aneg(priv->phydev);
1066}
1067
1068static void nb8800_get_pauseparam(struct net_device *dev,
1069 struct ethtool_pauseparam *pp)
1070{
1071 struct nb8800_priv *priv = netdev_priv(dev);
1072
1073 pp->autoneg = priv->pause_aneg;
1074 pp->rx_pause = priv->pause_rx;
1075 pp->tx_pause = priv->pause_tx;
1076}
1077
1078static int nb8800_set_pauseparam(struct net_device *dev,
1079 struct ethtool_pauseparam *pp)
1080{
1081 struct nb8800_priv *priv = netdev_priv(dev);
1082
1083 priv->pause_aneg = pp->autoneg;
1084 priv->pause_rx = pp->rx_pause;
1085 priv->pause_tx = pp->tx_pause;
1086
1087 nb8800_pause_adv(dev);
1088
1089 if (!priv->pause_aneg)
1090 nb8800_pause_config(dev);
1091 else if (priv->phydev)
1092 phy_start_aneg(priv->phydev);
1093
1094 return 0;
1095}
1096
1097static const char nb8800_stats_names[][ETH_GSTRING_LEN] = {
1098 "rx_bytes_ok",
1099 "rx_frames_ok",
1100 "rx_undersize_frames",
1101 "rx_fragment_frames",
1102 "rx_64_byte_frames",
1103 "rx_127_byte_frames",
1104 "rx_255_byte_frames",
1105 "rx_511_byte_frames",
1106 "rx_1023_byte_frames",
1107 "rx_max_size_frames",
1108 "rx_oversize_frames",
1109 "rx_bad_fcs_frames",
1110 "rx_broadcast_frames",
1111 "rx_multicast_frames",
1112 "rx_control_frames",
1113 "rx_pause_frames",
1114 "rx_unsup_control_frames",
1115 "rx_align_error_frames",
1116 "rx_overrun_frames",
1117 "rx_jabber_frames",
1118 "rx_bytes",
1119 "rx_frames",
1120
1121 "tx_bytes_ok",
1122 "tx_frames_ok",
1123 "tx_64_byte_frames",
1124 "tx_127_byte_frames",
1125 "tx_255_byte_frames",
1126 "tx_511_byte_frames",
1127 "tx_1023_byte_frames",
1128 "tx_max_size_frames",
1129 "tx_oversize_frames",
1130 "tx_broadcast_frames",
1131 "tx_multicast_frames",
1132 "tx_control_frames",
1133 "tx_pause_frames",
1134 "tx_underrun_frames",
1135 "tx_single_collision_frames",
1136 "tx_multi_collision_frames",
1137 "tx_deferred_collision_frames",
1138 "tx_late_collision_frames",
1139 "tx_excessive_collision_frames",
1140 "tx_bytes",
1141 "tx_frames",
1142 "tx_collisions",
1143};
1144
1145#define NB8800_NUM_STATS ARRAY_SIZE(nb8800_stats_names)
1146
1147static int nb8800_get_sset_count(struct net_device *dev, int sset)
1148{
1149 if (sset == ETH_SS_STATS)
1150 return NB8800_NUM_STATS;
1151
1152 return -EOPNOTSUPP;
1153}
1154
1155static void nb8800_get_strings(struct net_device *dev, u32 sset, u8 *buf)
1156{
1157 if (sset == ETH_SS_STATS)
1158 memcpy(buf, &nb8800_stats_names, sizeof(nb8800_stats_names));
1159}
1160
1161static u32 nb8800_read_stat(struct net_device *dev, int index)
1162{
1163 struct nb8800_priv *priv = netdev_priv(dev);
1164
1165 nb8800_writeb(priv, NB8800_STAT_INDEX, index);
1166
1167 return nb8800_readl(priv, NB8800_STAT_DATA);
1168}
1169
1170static void nb8800_get_ethtool_stats(struct net_device *dev,
1171 struct ethtool_stats *estats, u64 *st)
1172{
1173 unsigned int i;
1174 u32 rx, tx;
1175
1176 for (i = 0; i < NB8800_NUM_STATS / 2; i++) {
1177 rx = nb8800_read_stat(dev, i);
1178 tx = nb8800_read_stat(dev, i | 0x80);
1179 st[i] = rx;
1180 st[i + NB8800_NUM_STATS / 2] = tx;
1181 }
1182}
1183
1184static const struct ethtool_ops nb8800_ethtool_ops = {
1185 .get_settings = nb8800_get_settings,
1186 .set_settings = nb8800_set_settings,
1187 .nway_reset = nb8800_nway_reset,
1188 .get_link = ethtool_op_get_link,
1189 .get_pauseparam = nb8800_get_pauseparam,
1190 .set_pauseparam = nb8800_set_pauseparam,
1191 .get_sset_count = nb8800_get_sset_count,
1192 .get_strings = nb8800_get_strings,
1193 .get_ethtool_stats = nb8800_get_ethtool_stats,
1194};
1195
1196static int nb8800_hw_init(struct net_device *dev)
1197{
1198 struct nb8800_priv *priv = netdev_priv(dev);
1199 u32 val;
1200
1201 val = TX_RETRY_EN | TX_PAD_EN | TX_APPEND_FCS;
1202 nb8800_writeb(priv, NB8800_TX_CTL1, val);
1203
1204 /* Collision retry count */
1205 nb8800_writeb(priv, NB8800_TX_CTL2, 5);
1206
1207 val = RX_PAD_STRIP | RX_AF_EN;
1208 nb8800_writeb(priv, NB8800_RX_CTL, val);
1209
1210 /* Chosen by fair dice roll */
1211 nb8800_writeb(priv, NB8800_RANDOM_SEED, 4);
1212
1213 /* TX cycles per deferral period */
1214 nb8800_writeb(priv, NB8800_TX_SDP, 12);
1215
1216 /* The following three threshold values have been
1217 * experimentally determined for good results.
1218 */
1219
1220 /* RX/TX FIFO threshold for partial empty (64-bit entries) */
1221 nb8800_writeb(priv, NB8800_PE_THRESHOLD, 0);
1222
1223 /* RX/TX FIFO threshold for partial full (64-bit entries) */
1224 nb8800_writeb(priv, NB8800_PF_THRESHOLD, 255);
1225
1226 /* Buffer size for transmit (64-bit entries) */
1227 nb8800_writeb(priv, NB8800_TX_BUFSIZE, 64);
1228
1229 /* Configure tx DMA */
1230
1231 val = nb8800_readl(priv, NB8800_TXC_CR);
1232 val &= TCR_LE; /* keep endian setting */
1233 val |= TCR_DM; /* DMA descriptor mode */
1234 val |= TCR_RS; /* automatically store tx status */
1235 val |= TCR_DIE; /* interrupt on DMA chain completion */
1236 val |= TCR_TFI(7); /* interrupt after 7 frames transmitted */
1237 val |= TCR_BTS(2); /* 32-byte bus transaction size */
1238 nb8800_writel(priv, NB8800_TXC_CR, val);
1239
1240 /* TX complete interrupt after 10 ms or 7 frames (see above) */
1241 val = clk_get_rate(priv->clk) / 100;
1242 nb8800_writel(priv, NB8800_TX_ITR, val);
1243
1244 /* Configure rx DMA */
1245
1246 val = nb8800_readl(priv, NB8800_RXC_CR);
1247 val &= RCR_LE; /* keep endian setting */
1248 val |= RCR_DM; /* DMA descriptor mode */
1249 val |= RCR_RS; /* automatically store rx status */
1250 val |= RCR_DIE; /* interrupt at end of DMA chain */
1251 val |= RCR_RFI(7); /* interrupt after 7 frames received */
1252 val |= RCR_BTS(2); /* 32-byte bus transaction size */
1253 nb8800_writel(priv, NB8800_RXC_CR, val);
1254
1255 /* The rx interrupt can fire before the DMA has completed
1256 * unless a small delay is added. 50 us is hopefully enough.
1257 */
1258 priv->rx_itr_irq = clk_get_rate(priv->clk) / 20000;
1259
1260 /* In NAPI poll mode we want to disable interrupts, but the
1261 * hardware does not permit this. Delay 10 ms instead.
1262 */
1263 priv->rx_itr_poll = clk_get_rate(priv->clk) / 100;
1264
1265 nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
1266
1267 priv->rx_dma_config = RX_BUF_SIZE | DESC_BTS(2) | DESC_DS | DESC_EOF;
1268
1269 /* Flow control settings */
1270
1271 /* Pause time of 0.1 ms */
1272 val = 100000 / 512;
1273 nb8800_writeb(priv, NB8800_PQ1, val >> 8);
1274 nb8800_writeb(priv, NB8800_PQ2, val & 0xff);
1275
1276 /* Auto-negotiate by default */
1277 priv->pause_aneg = true;
1278 priv->pause_rx = true;
1279 priv->pause_tx = true;
1280
1281 nb8800_mc_init(dev, 0);
1282
1283 return 0;
1284}
1285
1286static int nb8800_tangox_init(struct net_device *dev)
1287{
1288 struct nb8800_priv *priv = netdev_priv(dev);
1289 u32 pad_mode = PAD_MODE_MII;
1290
1291 switch (priv->phy_mode) {
1292 case PHY_INTERFACE_MODE_MII:
1293 case PHY_INTERFACE_MODE_GMII:
1294 pad_mode = PAD_MODE_MII;
1295 break;
1296
1297 case PHY_INTERFACE_MODE_RGMII:
1298 pad_mode = PAD_MODE_RGMII;
1299 break;
1300
1301 case PHY_INTERFACE_MODE_RGMII_TXID:
1302 pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
1303 break;
1304
1305 default:
1306 dev_err(dev->dev.parent, "unsupported phy mode %s\n",
1307 phy_modes(priv->phy_mode));
1308 return -EINVAL;
1309 }
1310
1311 nb8800_writeb(priv, NB8800_TANGOX_PAD_MODE, pad_mode);
1312
1313 return 0;
1314}
1315
1316static int nb8800_tangox_reset(struct net_device *dev)
1317{
1318 struct nb8800_priv *priv = netdev_priv(dev);
1319 int clk_div;
1320
1321 nb8800_writeb(priv, NB8800_TANGOX_RESET, 0);
1322 usleep_range(1000, 10000);
1323 nb8800_writeb(priv, NB8800_TANGOX_RESET, 1);
1324
1325 wmb(); /* ensure reset is cleared before proceeding */
1326
1327 clk_div = DIV_ROUND_UP(clk_get_rate(priv->clk), 2 * MAX_MDC_CLOCK);
1328 nb8800_writew(priv, NB8800_TANGOX_MDIO_CLKDIV, clk_div);
1329
1330 return 0;
1331}
1332
1333static const struct nb8800_ops nb8800_tangox_ops = {
1334 .init = nb8800_tangox_init,
1335 .reset = nb8800_tangox_reset,
1336};
1337
1338static int nb8800_tango4_init(struct net_device *dev)
1339{
1340 struct nb8800_priv *priv = netdev_priv(dev);
1341 int err;
1342
1343 err = nb8800_tangox_init(dev);
1344 if (err)
1345 return err;
1346
1347 /* On tango4 interrupt on DMA completion per frame works and gives
1348 * better performance despite generating more rx interrupts.
1349 */
1350
1351 /* Disable unnecessary interrupt on rx completion */
1352 nb8800_clearl(priv, NB8800_RXC_CR, RCR_RFI(7));
1353
1354 /* Request interrupt on descriptor DMA completion */
1355 priv->rx_dma_config |= DESC_ID;
1356
1357 return 0;
1358}
1359
1360static const struct nb8800_ops nb8800_tango4_ops = {
1361 .init = nb8800_tango4_init,
1362 .reset = nb8800_tangox_reset,
1363};
1364
1365static const struct of_device_id nb8800_dt_ids[] = {
1366 {
1367 .compatible = "aurora,nb8800",
1368 },
1369 {
1370 .compatible = "sigma,smp8642-ethernet",
1371 .data = &nb8800_tangox_ops,
1372 },
1373 {
1374 .compatible = "sigma,smp8734-ethernet",
1375 .data = &nb8800_tango4_ops,
1376 },
1377 { }
1378};
1379
1380static int nb8800_probe(struct platform_device *pdev)
1381{
1382 const struct of_device_id *match;
1383 const struct nb8800_ops *ops = NULL;
1384 struct nb8800_priv *priv;
1385 struct resource *res;
1386 struct net_device *dev;
1387 struct mii_bus *bus;
1388 const unsigned char *mac;
1389 void __iomem *base;
1390 int irq;
1391 int ret;
1392
1393 match = of_match_device(nb8800_dt_ids, &pdev->dev);
1394 if (match)
1395 ops = match->data;
1396
1397 irq = platform_get_irq(pdev, 0);
1398 if (irq <= 0) {
1399 dev_err(&pdev->dev, "No IRQ\n");
1400 return -EINVAL;
1401 }
1402
1403 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1404 base = devm_ioremap_resource(&pdev->dev, res);
1405 if (IS_ERR(base))
1406 return PTR_ERR(base);
1407
1408 dev_dbg(&pdev->dev, "AU-NB8800 Ethernet at %pa\n", &res->start);
1409
1410 dev = alloc_etherdev(sizeof(*priv));
1411 if (!dev)
1412 return -ENOMEM;
1413
1414 platform_set_drvdata(pdev, dev);
1415 SET_NETDEV_DEV(dev, &pdev->dev);
1416
1417 priv = netdev_priv(dev);
1418 priv->base = base;
1419
1420 priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
1421 if (priv->phy_mode < 0)
1422 priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
1423
1424 priv->clk = devm_clk_get(&pdev->dev, NULL);
1425 if (IS_ERR(priv->clk)) {
1426 dev_err(&pdev->dev, "failed to get clock\n");
1427 ret = PTR_ERR(priv->clk);
1428 goto err_free_dev;
1429 }
1430
1431 ret = clk_prepare_enable(priv->clk);
1432 if (ret)
1433 goto err_free_dev;
1434
1435 spin_lock_init(&priv->tx_lock);
1436
1437 if (ops && ops->reset) {
1438 ret = ops->reset(dev);
1439 if (ret)
1440 goto err_free_dev;
1441 }
1442
1443 bus = devm_mdiobus_alloc(&pdev->dev);
1444 if (!bus) {
1445 ret = -ENOMEM;
1446 goto err_disable_clk;
1447 }
1448
1449 bus->name = "nb8800-mii";
1450 bus->read = nb8800_mdio_read;
1451 bus->write = nb8800_mdio_write;
1452 bus->parent = &pdev->dev;
1453 snprintf(bus->id, MII_BUS_ID_SIZE, "%lx.nb8800-mii",
1454 (unsigned long)res->start);
1455 bus->priv = priv;
1456
1457 ret = of_mdiobus_register(bus, pdev->dev.of_node);
1458 if (ret) {
1459 dev_err(&pdev->dev, "failed to register MII bus\n");
1460 goto err_disable_clk;
1461 }
1462
1463 priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1464 if (!priv->phy_node) {
1465 dev_err(&pdev->dev, "no PHY specified\n");
1466 ret = -ENODEV;
1467 goto err_free_bus;
1468 }
1469
1470 priv->mii_bus = bus;
1471
1472 ret = nb8800_hw_init(dev);
1473 if (ret)
1474 goto err_free_bus;
1475
1476 if (ops && ops->init) {
1477 ret = ops->init(dev);
1478 if (ret)
1479 goto err_free_bus;
1480 }
1481
1482 dev->netdev_ops = &nb8800_netdev_ops;
1483 dev->ethtool_ops = &nb8800_ethtool_ops;
1484 dev->flags |= IFF_MULTICAST;
1485 dev->irq = irq;
1486
1487 mac = of_get_mac_address(pdev->dev.of_node);
1488 if (mac)
1489 ether_addr_copy(dev->dev_addr, mac);
1490
1491 if (!is_valid_ether_addr(dev->dev_addr))
1492 eth_hw_addr_random(dev);
1493
1494 nb8800_update_mac_addr(dev);
1495
1496 netif_carrier_off(dev);
1497
1498 ret = register_netdev(dev);
1499 if (ret) {
1500 netdev_err(dev, "failed to register netdev\n");
1501 goto err_free_dma;
1502 }
1503
1504 netif_napi_add(dev, &priv->napi, nb8800_poll, NAPI_POLL_WEIGHT);
1505
1506 netdev_info(dev, "MAC address %pM\n", dev->dev_addr);
1507
1508 return 0;
1509
1510err_free_dma:
1511 nb8800_dma_free(dev);
1512err_free_bus:
1513 mdiobus_unregister(bus);
1514err_disable_clk:
1515 clk_disable_unprepare(priv->clk);
1516err_free_dev:
1517 free_netdev(dev);
1518
1519 return ret;
1520}
1521
1522static int nb8800_remove(struct platform_device *pdev)
1523{
1524 struct net_device *ndev = platform_get_drvdata(pdev);
1525 struct nb8800_priv *priv = netdev_priv(ndev);
1526
1527 unregister_netdev(ndev);
1528
1529 mdiobus_unregister(priv->mii_bus);
1530
1531 clk_disable_unprepare(priv->clk);
1532
1533 nb8800_dma_free(ndev);
1534 free_netdev(ndev);
1535
1536 return 0;
1537}
1538
1539static struct platform_driver nb8800_driver = {
1540 .driver = {
1541 .name = "nb8800",
1542 .of_match_table = nb8800_dt_ids,
1543 },
1544 .probe = nb8800_probe,
1545 .remove = nb8800_remove,
1546};
1547
1548module_platform_driver(nb8800_driver);
1549
1550MODULE_DESCRIPTION("Aurora AU-NB8800 Ethernet driver");
1551MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>");
1552MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h
new file mode 100644
index 000000000000..e5adbc2aac9f
--- /dev/null
+++ b/drivers/net/ethernet/aurora/nb8800.h
@@ -0,0 +1,316 @@
1#ifndef _NB8800_H_
2#define _NB8800_H_
3
4#include <linux/types.h>
5#include <linux/skbuff.h>
6#include <linux/phy.h>
7#include <linux/clk.h>
8#include <linux/bitops.h>
9
10#define RX_DESC_COUNT 256
11#define TX_DESC_COUNT 256
12
13#define NB8800_DESC_LOW 4
14
15#define RX_BUF_SIZE 1552
16
17#define RX_COPYBREAK 256
18#define RX_COPYHDR 128
19
20#define MAX_MDC_CLOCK 2500000
21
22/* Stargate Solutions SSN8800 core registers */
23#define NB8800_TX_CTL1 0x000
24#define TX_TPD BIT(5)
25#define TX_APPEND_FCS BIT(4)
26#define TX_PAD_EN BIT(3)
27#define TX_RETRY_EN BIT(2)
28#define TX_EN BIT(0)
29
30#define NB8800_TX_CTL2 0x001
31
32#define NB8800_RX_CTL 0x004
33#define RX_BC_DISABLE BIT(7)
34#define RX_RUNT BIT(6)
35#define RX_AF_EN BIT(5)
36#define RX_PAUSE_EN BIT(3)
37#define RX_SEND_CRC BIT(2)
38#define RX_PAD_STRIP BIT(1)
39#define RX_EN BIT(0)
40
41#define NB8800_RANDOM_SEED 0x008
42#define NB8800_TX_SDP 0x14
43#define NB8800_TX_TPDP1 0x18
44#define NB8800_TX_TPDP2 0x19
45#define NB8800_SLOT_TIME 0x1c
46
47#define NB8800_MDIO_CMD 0x020
48#define MDIO_CMD_GO BIT(31)
49#define MDIO_CMD_WR BIT(26)
50#define MDIO_CMD_ADDR(x) ((x) << 21)
51#define MDIO_CMD_REG(x) ((x) << 16)
52#define MDIO_CMD_DATA(x) ((x) << 0)
53
54#define NB8800_MDIO_STS 0x024
55#define MDIO_STS_ERR BIT(31)
56
57#define NB8800_MC_ADDR(i) (0x028 + (i))
58#define NB8800_MC_INIT 0x02e
59#define NB8800_UC_ADDR(i) (0x03c + (i))
60
61#define NB8800_MAC_MODE 0x044
62#define RGMII_MODE BIT(7)
63#define HALF_DUPLEX BIT(4)
64#define BURST_EN BIT(3)
65#define LOOPBACK_EN BIT(2)
66#define GMAC_MODE BIT(0)
67
68#define NB8800_IC_THRESHOLD 0x050
69#define NB8800_PE_THRESHOLD 0x051
70#define NB8800_PF_THRESHOLD 0x052
71#define NB8800_TX_BUFSIZE 0x054
72#define NB8800_FIFO_CTL 0x056
73#define NB8800_PQ1 0x060
74#define NB8800_PQ2 0x061
75#define NB8800_SRC_ADDR(i) (0x06a + (i))
76#define NB8800_STAT_DATA 0x078
77#define NB8800_STAT_INDEX 0x07c
78#define NB8800_STAT_CLEAR 0x07d
79
80#define NB8800_SLEEP_MODE 0x07e
81#define SLEEP_MODE BIT(0)
82
83#define NB8800_WAKEUP 0x07f
84#define WAKEUP BIT(0)
85
86/* Aurora NB8800 host interface registers */
87#define NB8800_TXC_CR 0x100
88#define TCR_LK BIT(12)
89#define TCR_DS BIT(11)
90#define TCR_BTS(x) (((x) & 0x7) << 8)
91#define TCR_DIE BIT(7)
92#define TCR_TFI(x) (((x) & 0x7) << 4)
93#define TCR_LE BIT(3)
94#define TCR_RS BIT(2)
95#define TCR_DM BIT(1)
96#define TCR_EN BIT(0)
97
98#define NB8800_TXC_SR 0x104
99#define TSR_DE BIT(3)
100#define TSR_DI BIT(2)
101#define TSR_TO BIT(1)
102#define TSR_TI BIT(0)
103
104#define NB8800_TX_SAR 0x108
105#define NB8800_TX_DESC_ADDR 0x10c
106
107#define NB8800_TX_REPORT_ADDR 0x110
108#define TX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xffff)
109#define TX_FIRST_DEFERRAL BIT(7)
110#define TX_EARLY_COLLISIONS(x) (((x) >> 3) & 0xf)
111#define TX_LATE_COLLISION BIT(2)
112#define TX_PACKET_DROPPED BIT(1)
113#define TX_FIFO_UNDERRUN BIT(0)
114#define IS_TX_ERROR(r) ((r) & 0x07)
115
116#define NB8800_TX_FIFO_SR 0x114
117#define NB8800_TX_ITR 0x118
118
119#define NB8800_RXC_CR 0x200
120#define RCR_FL BIT(13)
121#define RCR_LK BIT(12)
122#define RCR_DS BIT(11)
123#define RCR_BTS(x) (((x) & 7) << 8)
124#define RCR_DIE BIT(7)
125#define RCR_RFI(x) (((x) & 7) << 4)
126#define RCR_LE BIT(3)
127#define RCR_RS BIT(2)
128#define RCR_DM BIT(1)
129#define RCR_EN BIT(0)
130
131#define NB8800_RXC_SR 0x204
132#define RSR_DE BIT(3)
133#define RSR_DI BIT(2)
134#define RSR_RO BIT(1)
135#define RSR_RI BIT(0)
136
137#define NB8800_RX_SAR 0x208
138#define NB8800_RX_DESC_ADDR 0x20c
139
140#define NB8800_RX_REPORT_ADDR 0x210
141#define RX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xFFFF)
142#define RX_MULTICAST_PKT BIT(9)
143#define RX_BROADCAST_PKT BIT(8)
144#define RX_LENGTH_ERR BIT(7)
145#define RX_FCS_ERR BIT(6)
146#define RX_RUNT_PKT BIT(5)
147#define RX_FIFO_OVERRUN BIT(4)
148#define RX_LATE_COLLISION BIT(3)
149#define RX_ALIGNMENT_ERROR BIT(2)
150#define RX_ERROR_MASK 0xfc
151#define IS_RX_ERROR(r) ((r) & RX_ERROR_MASK)
152
153#define NB8800_RX_FIFO_SR 0x214
154#define NB8800_RX_ITR 0x218
155
156/* Sigma Designs SMP86xx additional registers */
157#define NB8800_TANGOX_PAD_MODE 0x400
158#define PAD_MODE_MASK 0x7
159#define PAD_MODE_MII 0x0
160#define PAD_MODE_RGMII 0x1
161#define PAD_MODE_GTX_CLK_INV BIT(3)
162#define PAD_MODE_GTX_CLK_DELAY BIT(4)
163
164#define NB8800_TANGOX_MDIO_CLKDIV 0x420
165#define NB8800_TANGOX_RESET 0x424
166
167/* Hardware DMA descriptor */
168struct nb8800_dma_desc {
169 u32 s_addr; /* start address */
170 u32 n_addr; /* next descriptor address */
171 u32 r_addr; /* report address */
172 u32 config;
173} __aligned(8);
174
175#define DESC_ID BIT(23)
176#define DESC_EOC BIT(22)
177#define DESC_EOF BIT(21)
178#define DESC_LK BIT(20)
179#define DESC_DS BIT(19)
180#define DESC_BTS(x) (((x) & 0x7) << 16)
181
182/* DMA descriptor and associated data for rx.
183 * Allocated from coherent memory.
184 */
185struct nb8800_rx_desc {
186 /* DMA descriptor */
187 struct nb8800_dma_desc desc;
188
189 /* Status report filled in by hardware */
190 u32 report;
191};
192
193/* Address of buffer on rx ring */
194struct nb8800_rx_buf {
195 struct page *page;
196 unsigned long offset;
197};
198
199/* DMA descriptors and associated data for tx.
200 * Allocated from coherent memory.
201 */
202struct nb8800_tx_desc {
203 /* DMA descriptor. The second descriptor is used if packet
204 * data is unaligned.
205 */
206 struct nb8800_dma_desc desc[2];
207
208 /* Status report filled in by hardware */
209 u32 report;
210
211 /* Bounce buffer for initial unaligned part of packet */
212 u8 buf[8] __aligned(8);
213};
214
215/* Packet in tx queue */
216struct nb8800_tx_buf {
217 /* Currently queued skb */
218 struct sk_buff *skb;
219
220 /* DMA address of the first descriptor */
221 dma_addr_t dma_desc;
222
223 /* DMA address of packet data */
224 dma_addr_t dma_addr;
225
226 /* Length of DMA mapping, less than skb->len if alignment
227 * buffer is used.
228 */
229 unsigned int dma_len;
230
231 /* Number of packets in chain starting here */
232 unsigned int chain_len;
233
234 /* Packet chain ready to be submitted to hardware */
235 bool ready;
236};
237
238struct nb8800_priv {
239 struct napi_struct napi;
240
241 void __iomem *base;
242
243 /* RX DMA descriptors */
244 struct nb8800_rx_desc *rx_descs;
245
246 /* RX buffers referenced by DMA descriptors */
247 struct nb8800_rx_buf *rx_bufs;
248
249 /* Current end of chain */
250 u32 rx_eoc;
251
252 /* Value for rx interrupt time register in NAPI interrupt mode */
253 u32 rx_itr_irq;
254
255 /* Value for rx interrupt time register in NAPI poll mode */
256 u32 rx_itr_poll;
257
258 /* Value for config field of rx DMA descriptors */
259 u32 rx_dma_config;
260
261 /* TX DMA descriptors */
262 struct nb8800_tx_desc *tx_descs;
263
264 /* TX packet queue */
265 struct nb8800_tx_buf *tx_bufs;
266
267 /* Number of free tx queue entries */
268 atomic_t tx_free;
269
270 /* First free tx queue entry */
271 u32 tx_next;
272
273 /* Next buffer to transmit */
274 u32 tx_queue;
275
276 /* Start of current packet chain */
277 struct nb8800_tx_buf *tx_chain;
278
279 /* Next buffer to reclaim */
280 u32 tx_done;
281
282 /* Lock for DMA activation */
283 spinlock_t tx_lock;
284
285 struct mii_bus *mii_bus;
286 struct device_node *phy_node;
287 struct phy_device *phydev;
288
289 /* PHY connection type from DT */
290 int phy_mode;
291
292 /* Current link status */
293 int speed;
294 int duplex;
295 int link;
296
297 /* Pause settings */
298 bool pause_aneg;
299 bool pause_rx;
300 bool pause_tx;
301
302 /* DMA base address of rx descriptors, see rx_descs above */
303 dma_addr_t rx_desc_dma;
304
305 /* DMA base address of tx descriptors, see tx_descs above */
306 dma_addr_t tx_desc_dma;
307
308 struct clk *clk;
309};
310
311struct nb8800_ops {
312 int (*init)(struct net_device *dev);
313 int (*reset)(struct net_device *dev);
314};
315
316#endif /* _NB8800_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index f8d7a2f06950..c82ab87fcbe8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3430,25 +3430,29 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3430 return rc; 3430 return rc;
3431} 3431}
3432 3432
3433#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) 3433/* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3434#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
3435
3436/* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3437#define BNX2X_NUM_TSO_WIN_SUB_BDS 3
3438
3439#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3434/* check if packet requires linearization (packet is too fragmented) 3440/* check if packet requires linearization (packet is too fragmented)
3435 no need to check fragmentation if page size > 8K (there will be no 3441 no need to check fragmentation if page size > 8K (there will be no
3436 violation to FW restrictions) */ 3442 violation to FW restrictions) */
3437static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, 3443static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3438 u32 xmit_type) 3444 u32 xmit_type)
3439{ 3445{
3440 int to_copy = 0; 3446 int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3441 int hlen = 0; 3447 int to_copy = 0, hlen = 0;
3442 int first_bd_sz = 0;
3443 3448
3444 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ 3449 if (xmit_type & XMIT_GSO_ENC)
3445 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) { 3450 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3446 3451
3452 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3447 if (xmit_type & XMIT_GSO) { 3453 if (xmit_type & XMIT_GSO) {
3448 unsigned short lso_mss = skb_shinfo(skb)->gso_size; 3454 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3449 /* Check if LSO packet needs to be copied: 3455 int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3450 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3451 int wnd_size = MAX_FETCH_BD - 3;
3452 /* Number of windows to check */ 3456 /* Number of windows to check */
3453 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; 3457 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3454 int wnd_idx = 0; 3458 int wnd_idx = 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c9b036789184..2e611dc5f162 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10139,8 +10139,8 @@ static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
10139 DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); 10139 DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
10140 return; 10140 return;
10141 } 10141 }
10142 bp->vxlan_dst_port--; 10142 bp->vxlan_dst_port_count--;
10143 if (bp->vxlan_dst_port) 10143 if (bp->vxlan_dst_port_count)
10144 return; 10144 return;
10145 10145
10146 if (netif_running(bp->dev)) { 10146 if (netif_running(bp->dev)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index db15c5ee09c5..07f5f239cb65 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2693,17 +2693,16 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
2693 req.ver_upd = DRV_VER_UPD; 2693 req.ver_upd = DRV_VER_UPD;
2694 2694
2695 if (BNXT_PF(bp)) { 2695 if (BNXT_PF(bp)) {
2696 unsigned long vf_req_snif_bmap[4]; 2696 DECLARE_BITMAP(vf_req_snif_bmap, 256);
2697 u32 *data = (u32 *)vf_req_snif_bmap; 2697 u32 *data = (u32 *)vf_req_snif_bmap;
2698 2698
2699 memset(vf_req_snif_bmap, 0, 32); 2699 memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
2700 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) 2700 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
2701 __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap); 2701 __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
2702 2702
2703 for (i = 0; i < 8; i++) { 2703 for (i = 0; i < 8; i++)
2704 req.vf_req_fwd[i] = cpu_to_le32(*data); 2704 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
2705 data++; 2705
2706 }
2707 req.enables |= 2706 req.enables |=
2708 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 2707 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
2709 } 2708 }
@@ -3625,6 +3624,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
3625 pf->fw_fid = le16_to_cpu(resp->fid); 3624 pf->fw_fid = le16_to_cpu(resp->fid);
3626 pf->port_id = le16_to_cpu(resp->port_id); 3625 pf->port_id = le16_to_cpu(resp->port_id);
3627 memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN); 3626 memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
3627 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
3628 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 3628 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3629 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 3629 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
3630 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 3630 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
@@ -3648,8 +3648,11 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
3648 3648
3649 vf->fw_fid = le16_to_cpu(resp->fid); 3649 vf->fw_fid = le16_to_cpu(resp->fid);
3650 memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN); 3650 memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
3651 if (!is_valid_ether_addr(vf->mac_addr)) 3651 if (is_valid_ether_addr(vf->mac_addr))
3652 random_ether_addr(vf->mac_addr); 3652 /* overwrite netdev dev_adr with admin VF MAC */
3653 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
3654 else
3655 random_ether_addr(bp->dev->dev_addr);
3653 3656
3654 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 3657 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3655 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 3658 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
@@ -3880,6 +3883,8 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
3880#endif 3883#endif
3881} 3884}
3882 3885
3886static int bnxt_cfg_rx_mode(struct bnxt *);
3887
3883static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 3888static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
3884{ 3889{
3885 int rc = 0; 3890 int rc = 0;
@@ -3946,11 +3951,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
3946 bp->vnic_info[0].rx_mask |= 3951 bp->vnic_info[0].rx_mask |=
3947 CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 3952 CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
3948 3953
3949 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 3954 rc = bnxt_cfg_rx_mode(bp);
3950 if (rc) { 3955 if (rc)
3951 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc);
3952 goto err_out; 3956 goto err_out;
3953 }
3954 3957
3955 rc = bnxt_hwrm_set_coal(bp); 3958 rc = bnxt_hwrm_set_coal(bp);
3956 if (rc) 3959 if (rc)
@@ -4599,7 +4602,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4599 bp->nge_port_cnt = 1; 4602 bp->nge_port_cnt = 1;
4600 } 4603 }
4601 4604
4602 bp->state = BNXT_STATE_OPEN; 4605 set_bit(BNXT_STATE_OPEN, &bp->state);
4603 bnxt_enable_int(bp); 4606 bnxt_enable_int(bp);
4604 /* Enable TX queues */ 4607 /* Enable TX queues */
4605 bnxt_tx_enable(bp); 4608 bnxt_tx_enable(bp);
@@ -4675,8 +4678,10 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4675 /* Change device state to avoid TX queue wake up's */ 4678 /* Change device state to avoid TX queue wake up's */
4676 bnxt_tx_disable(bp); 4679 bnxt_tx_disable(bp);
4677 4680
4678 bp->state = BNXT_STATE_CLOSED; 4681 clear_bit(BNXT_STATE_OPEN, &bp->state);
4679 cancel_work_sync(&bp->sp_task); 4682 smp_mb__after_atomic();
4683 while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
4684 msleep(20);
4680 4685
4681 /* Flush rings before disabling interrupts */ 4686 /* Flush rings before disabling interrupts */
4682 bnxt_shutdown_nic(bp, irq_re_init); 4687 bnxt_shutdown_nic(bp, irq_re_init);
@@ -4865,7 +4870,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
4865 } 4870 }
4866} 4871}
4867 4872
4868static void bnxt_cfg_rx_mode(struct bnxt *bp) 4873static int bnxt_cfg_rx_mode(struct bnxt *bp)
4869{ 4874{
4870 struct net_device *dev = bp->dev; 4875 struct net_device *dev = bp->dev;
4871 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 4876 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
@@ -4914,6 +4919,7 @@ static void bnxt_cfg_rx_mode(struct bnxt *bp)
4914 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", 4919 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
4915 rc); 4920 rc);
4916 vnic->uc_filter_count = i; 4921 vnic->uc_filter_count = i;
4922 return rc;
4917 } 4923 }
4918 } 4924 }
4919 4925
@@ -4922,6 +4928,8 @@ skip_uc:
4922 if (rc) 4928 if (rc)
4923 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", 4929 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
4924 rc); 4930 rc);
4931
4932 return rc;
4925} 4933}
4926 4934
4927static netdev_features_t bnxt_fix_features(struct net_device *dev, 4935static netdev_features_t bnxt_fix_features(struct net_device *dev,
@@ -5023,8 +5031,10 @@ static void bnxt_dbg_dump_states(struct bnxt *bp)
5023static void bnxt_reset_task(struct bnxt *bp) 5031static void bnxt_reset_task(struct bnxt *bp)
5024{ 5032{
5025 bnxt_dbg_dump_states(bp); 5033 bnxt_dbg_dump_states(bp);
5026 if (netif_running(bp->dev)) 5034 if (netif_running(bp->dev)) {
5027 bnxt_tx_disable(bp); /* prevent tx timout again */ 5035 bnxt_close_nic(bp, false, false);
5036 bnxt_open_nic(bp, false, false);
5037 }
5028} 5038}
5029 5039
5030static void bnxt_tx_timeout(struct net_device *dev) 5040static void bnxt_tx_timeout(struct net_device *dev)
@@ -5074,8 +5084,12 @@ static void bnxt_sp_task(struct work_struct *work)
5074 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 5084 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
5075 int rc; 5085 int rc;
5076 5086
5077 if (bp->state != BNXT_STATE_OPEN) 5087 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5088 smp_mb__after_atomic();
5089 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
5090 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5078 return; 5091 return;
5092 }
5079 5093
5080 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 5094 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
5081 bnxt_cfg_rx_mode(bp); 5095 bnxt_cfg_rx_mode(bp);
@@ -5099,8 +5113,19 @@ static void bnxt_sp_task(struct work_struct *work)
5099 bnxt_hwrm_tunnel_dst_port_free( 5113 bnxt_hwrm_tunnel_dst_port_free(
5100 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 5114 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
5101 } 5115 }
5102 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 5116 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) {
5117 /* bnxt_reset_task() calls bnxt_close_nic() which waits
5118 * for BNXT_STATE_IN_SP_TASK to clear.
5119 */
5120 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5121 rtnl_lock();
5103 bnxt_reset_task(bp); 5122 bnxt_reset_task(bp);
5123 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5124 rtnl_unlock();
5125 }
5126
5127 smp_mb__before_atomic();
5128 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5104} 5129}
5105 5130
5106static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 5131static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
@@ -5179,7 +5204,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
5179 bp->timer.function = bnxt_timer; 5204 bp->timer.function = bnxt_timer;
5180 bp->current_interval = BNXT_TIMER_INTERVAL; 5205 bp->current_interval = BNXT_TIMER_INTERVAL;
5181 5206
5182 bp->state = BNXT_STATE_CLOSED; 5207 clear_bit(BNXT_STATE_OPEN, &bp->state);
5183 5208
5184 return 0; 5209 return 0;
5185 5210
@@ -5212,13 +5237,27 @@ init_err:
5212static int bnxt_change_mac_addr(struct net_device *dev, void *p) 5237static int bnxt_change_mac_addr(struct net_device *dev, void *p)
5213{ 5238{
5214 struct sockaddr *addr = p; 5239 struct sockaddr *addr = p;
5240 struct bnxt *bp = netdev_priv(dev);
5241 int rc = 0;
5215 5242
5216 if (!is_valid_ether_addr(addr->sa_data)) 5243 if (!is_valid_ether_addr(addr->sa_data))
5217 return -EADDRNOTAVAIL; 5244 return -EADDRNOTAVAIL;
5218 5245
5246#ifdef CONFIG_BNXT_SRIOV
5247 if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr))
5248 return -EADDRNOTAVAIL;
5249#endif
5250
5251 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
5252 return 0;
5253
5219 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 5254 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5255 if (netif_running(dev)) {
5256 bnxt_close_nic(bp, false, false);
5257 rc = bnxt_open_nic(bp, false, false);
5258 }
5220 5259
5221 return 0; 5260 return rc;
5222} 5261}
5223 5262
5224/* rtnl_lock held */ 5263/* rtnl_lock held */
@@ -5686,15 +5725,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5686 bnxt_set_tpa_flags(bp); 5725 bnxt_set_tpa_flags(bp);
5687 bnxt_set_ring_params(bp); 5726 bnxt_set_ring_params(bp);
5688 dflt_rings = netif_get_num_default_rss_queues(); 5727 dflt_rings = netif_get_num_default_rss_queues();
5689 if (BNXT_PF(bp)) { 5728 if (BNXT_PF(bp))
5690 memcpy(dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
5691 bp->pf.max_irqs = max_irqs; 5729 bp->pf.max_irqs = max_irqs;
5692 } else {
5693#if defined(CONFIG_BNXT_SRIOV) 5730#if defined(CONFIG_BNXT_SRIOV)
5694 memcpy(dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); 5731 else
5695 bp->vf.max_irqs = max_irqs; 5732 bp->vf.max_irqs = max_irqs;
5696#endif 5733#endif
5697 }
5698 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); 5734 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
5699 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 5735 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
5700 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 5736 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 674bc5159b91..f199f4cc8ffe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -925,9 +925,9 @@ struct bnxt {
925 925
926 struct timer_list timer; 926 struct timer_list timer;
927 927
928 int state; 928 unsigned long state;
929#define BNXT_STATE_CLOSED 0 929#define BNXT_STATE_OPEN 0
930#define BNXT_STATE_OPEN 1 930#define BNXT_STATE_IN_SP_TASK 1
931 931
932 struct bnxt_irq *irq_tbl; 932 struct bnxt_irq *irq_tbl;
933 u8 mac_addr[ETH_ALEN]; 933 u8 mac_addr[ETH_ALEN];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index f4cf68861069..ea044bbcd384 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -21,7 +21,7 @@
21#ifdef CONFIG_BNXT_SRIOV 21#ifdef CONFIG_BNXT_SRIOV
22static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) 22static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
23{ 23{
24 if (bp->state != BNXT_STATE_OPEN) { 24 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
25 netdev_err(bp->dev, "vf ndo called though PF is down\n"); 25 netdev_err(bp->dev, "vf ndo called though PF is down\n");
26 return -EINVAL; 26 return -EINVAL;
27 } 27 }
@@ -804,10 +804,9 @@ void bnxt_update_vf_mac(struct bnxt *bp)
804 if (!is_valid_ether_addr(resp->perm_mac_address)) 804 if (!is_valid_ether_addr(resp->perm_mac_address))
805 goto update_vf_mac_exit; 805 goto update_vf_mac_exit;
806 806
807 if (ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) 807 if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
808 goto update_vf_mac_exit; 808 memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
809 809 /* overwrite netdev dev_adr with admin VF MAC */
810 memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
811 memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); 810 memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
812update_vf_mac_exit: 811update_vf_mac_exit:
813 mutex_unlock(&bp->hwrm_cmd_lock); 812 mutex_unlock(&bp->hwrm_cmd_lock);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 88c1e1a834f8..169059c92f80 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1682,6 +1682,8 @@ static void macb_init_hw(struct macb *bp)
1682 macb_set_hwaddr(bp); 1682 macb_set_hwaddr(bp);
1683 1683
1684 config = macb_mdc_clk_div(bp); 1684 config = macb_mdc_clk_div(bp);
1685 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
1686 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
1685 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ 1687 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
1686 config |= MACB_BIT(PAE); /* PAuse Enable */ 1688 config |= MACB_BIT(PAE); /* PAuse Enable */
1687 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ 1689 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
@@ -2416,6 +2418,8 @@ static int macb_init(struct platform_device *pdev)
2416 /* Set MII management clock divider */ 2418 /* Set MII management clock divider */
2417 val = macb_mdc_clk_div(bp); 2419 val = macb_mdc_clk_div(bp);
2418 val |= macb_dbw(bp); 2420 val |= macb_dbw(bp);
2421 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2422 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
2419 macb_writel(bp, NCFGR, val); 2423 macb_writel(bp, NCFGR, val);
2420 2424
2421 return 0; 2425 return 0;
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 6e1faea00ca8..d83b0db77821 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -215,12 +215,17 @@
215/* GEM specific NCFGR bitfields. */ 215/* GEM specific NCFGR bitfields. */
216#define GEM_GBE_OFFSET 10 /* Gigabit mode enable */ 216#define GEM_GBE_OFFSET 10 /* Gigabit mode enable */
217#define GEM_GBE_SIZE 1 217#define GEM_GBE_SIZE 1
218#define GEM_PCSSEL_OFFSET 11
219#define GEM_PCSSEL_SIZE 1
218#define GEM_CLK_OFFSET 18 /* MDC clock division */ 220#define GEM_CLK_OFFSET 18 /* MDC clock division */
219#define GEM_CLK_SIZE 3 221#define GEM_CLK_SIZE 3
220#define GEM_DBW_OFFSET 21 /* Data bus width */ 222#define GEM_DBW_OFFSET 21 /* Data bus width */
221#define GEM_DBW_SIZE 2 223#define GEM_DBW_SIZE 2
222#define GEM_RXCOEN_OFFSET 24 224#define GEM_RXCOEN_OFFSET 24
223#define GEM_RXCOEN_SIZE 1 225#define GEM_RXCOEN_SIZE 1
226#define GEM_SGMIIEN_OFFSET 27
227#define GEM_SGMIIEN_SIZE 1
228
224 229
225/* Constants for data bus width. */ 230/* Constants for data bus width. */
226#define GEM_DBW32 0 /* 32 bit AMBA AHB data bus width */ 231#define GEM_DBW32 0 /* 32 bit AMBA AHB data bus width */
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index d3950b20feb9..39ca6744a4e6 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -120,10 +120,9 @@
120 * Calculated for SCLK of 700Mhz 120 * Calculated for SCLK of 700Mhz
121 * value written should be a 1/16th of what is expected 121 * value written should be a 1/16th of what is expected
122 * 122 *
123 * 1 tick per 0.05usec = value of 2.2 123 * 1 tick per 0.025usec
124 * This 10% would be covered in CQ timer thresh value
125 */ 124 */
126#define NICPF_CLK_PER_INT_TICK 2 125#define NICPF_CLK_PER_INT_TICK 1
127 126
128/* Time to wait before we decide that a SQ is stuck. 127/* Time to wait before we decide that a SQ is stuck.
129 * 128 *
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index c561fdcb79a7..5f24d11cb16a 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -615,6 +615,21 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
615 return 0; 615 return 0;
616} 616}
617 617
618static void nic_enable_vf(struct nicpf *nic, int vf, bool enable)
619{
620 int bgx, lmac;
621
622 nic->vf_enabled[vf] = enable;
623
624 if (vf >= nic->num_vf_en)
625 return;
626
627 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
628 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
629
630 bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable);
631}
632
618/* Interrupt handler to handle mailbox messages from VFs */ 633/* Interrupt handler to handle mailbox messages from VFs */
619static void nic_handle_mbx_intr(struct nicpf *nic, int vf) 634static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
620{ 635{
@@ -714,14 +729,14 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
714 break; 729 break;
715 case NIC_MBOX_MSG_CFG_DONE: 730 case NIC_MBOX_MSG_CFG_DONE:
716 /* Last message of VF config msg sequence */ 731 /* Last message of VF config msg sequence */
717 nic->vf_enabled[vf] = true; 732 nic_enable_vf(nic, vf, true);
718 goto unlock; 733 goto unlock;
719 case NIC_MBOX_MSG_SHUTDOWN: 734 case NIC_MBOX_MSG_SHUTDOWN:
720 /* First msg in VF teardown sequence */ 735 /* First msg in VF teardown sequence */
721 nic->vf_enabled[vf] = false;
722 if (vf >= nic->num_vf_en) 736 if (vf >= nic->num_vf_en)
723 nic->sqs_used[vf - nic->num_vf_en] = false; 737 nic->sqs_used[vf - nic->num_vf_en] = false;
724 nic->pqs_vf[vf] = 0; 738 nic->pqs_vf[vf] = 0;
739 nic_enable_vf(nic, vf, false);
725 break; 740 break;
726 case NIC_MBOX_MSG_ALLOC_SQS: 741 case NIC_MBOX_MSG_ALLOC_SQS:
727 nic_alloc_sqs(nic, &mbx.sqs_alloc); 742 nic_alloc_sqs(nic, &mbx.sqs_alloc);
@@ -1074,8 +1089,7 @@ static void nic_remove(struct pci_dev *pdev)
1074 1089
1075 if (nic->check_link) { 1090 if (nic->check_link) {
1076 /* Destroy work Queue */ 1091 /* Destroy work Queue */
1077 cancel_delayed_work(&nic->dwork); 1092 cancel_delayed_work_sync(&nic->dwork);
1078 flush_workqueue(nic->check_link);
1079 destroy_workqueue(nic->check_link); 1093 destroy_workqueue(nic->check_link);
1080 } 1094 }
1081 1095
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index af54c10945c2..a12b2e38cf61 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -112,6 +112,13 @@ static int nicvf_get_settings(struct net_device *netdev,
112 112
113 cmd->supported = 0; 113 cmd->supported = 0;
114 cmd->transceiver = XCVR_EXTERNAL; 114 cmd->transceiver = XCVR_EXTERNAL;
115
116 if (!nic->link_up) {
117 cmd->duplex = DUPLEX_UNKNOWN;
118 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
119 return 0;
120 }
121
115 if (nic->speed <= 1000) { 122 if (nic->speed <= 1000) {
116 cmd->port = PORT_MII; 123 cmd->port = PORT_MII;
117 cmd->autoneg = AUTONEG_ENABLE; 124 cmd->autoneg = AUTONEG_ENABLE;
@@ -125,6 +132,13 @@ static int nicvf_get_settings(struct net_device *netdev,
125 return 0; 132 return 0;
126} 133}
127 134
135static u32 nicvf_get_link(struct net_device *netdev)
136{
137 struct nicvf *nic = netdev_priv(netdev);
138
139 return nic->link_up;
140}
141
128static void nicvf_get_drvinfo(struct net_device *netdev, 142static void nicvf_get_drvinfo(struct net_device *netdev,
129 struct ethtool_drvinfo *info) 143 struct ethtool_drvinfo *info)
130{ 144{
@@ -660,7 +674,7 @@ static int nicvf_set_channels(struct net_device *dev,
660 674
661static const struct ethtool_ops nicvf_ethtool_ops = { 675static const struct ethtool_ops nicvf_ethtool_ops = {
662 .get_settings = nicvf_get_settings, 676 .get_settings = nicvf_get_settings,
663 .get_link = ethtool_op_get_link, 677 .get_link = nicvf_get_link,
664 .get_drvinfo = nicvf_get_drvinfo, 678 .get_drvinfo = nicvf_get_drvinfo,
665 .get_msglevel = nicvf_get_msglevel, 679 .get_msglevel = nicvf_get_msglevel,
666 .set_msglevel = nicvf_set_msglevel, 680 .set_msglevel = nicvf_set_msglevel,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 7f709cbdcd87..dde8dc720cd3 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1057,6 +1057,7 @@ int nicvf_stop(struct net_device *netdev)
1057 1057
1058 netif_carrier_off(netdev); 1058 netif_carrier_off(netdev);
1059 netif_tx_stop_all_queues(nic->netdev); 1059 netif_tx_stop_all_queues(nic->netdev);
1060 nic->link_up = false;
1060 1061
1061 /* Teardown secondary qsets first */ 1062 /* Teardown secondary qsets first */
1062 if (!nic->sqs_mode) { 1063 if (!nic->sqs_mode) {
@@ -1211,9 +1212,6 @@ int nicvf_open(struct net_device *netdev)
1211 nic->drv_stats.txq_stop = 0; 1212 nic->drv_stats.txq_stop = 0;
1212 nic->drv_stats.txq_wake = 0; 1213 nic->drv_stats.txq_wake = 0;
1213 1214
1214 netif_carrier_on(netdev);
1215 netif_tx_start_all_queues(netdev);
1216
1217 return 0; 1215 return 0;
1218cleanup: 1216cleanup:
1219 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); 1217 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index e404ea837727..206b6a71a545 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -592,7 +592,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
592 /* Set threshold value for interrupt generation */ 592 /* Set threshold value for interrupt generation */
593 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); 593 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
594 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, 594 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
595 qidx, nic->cq_coalesce_usecs); 595 qidx, CMP_QUEUE_TIMER_THRESH);
596} 596}
597 597
598/* Configures transmit queue */ 598/* Configures transmit queue */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index fb4957d09914..033e8306e91c 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -76,7 +76,7 @@
76#define CMP_QSIZE CMP_QUEUE_SIZE2 76#define CMP_QSIZE CMP_QUEUE_SIZE2
77#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) 77#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
78#define CMP_QUEUE_CQE_THRESH 0 78#define CMP_QUEUE_CQE_THRESH 0
79#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ 79#define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */
80 80
81#define RBDR_SIZE RBDR_SIZE0 81#define RBDR_SIZE RBDR_SIZE0
82#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) 82#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 180aa9fabf48..9df26c2263bc 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -186,6 +186,23 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
186} 186}
187EXPORT_SYMBOL(bgx_set_lmac_mac); 187EXPORT_SYMBOL(bgx_set_lmac_mac);
188 188
189void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
190{
191 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
192 u64 cfg;
193
194 if (!bgx)
195 return;
196
197 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
198 if (enable)
199 cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
200 else
201 cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
202 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
203}
204EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
205
189static void bgx_sgmii_change_link_state(struct lmac *lmac) 206static void bgx_sgmii_change_link_state(struct lmac *lmac)
190{ 207{
191 struct bgx *bgx = lmac->bgx; 208 struct bgx *bgx = lmac->bgx;
@@ -612,6 +629,8 @@ static void bgx_poll_for_link(struct work_struct *work)
612 lmac->last_duplex = 1; 629 lmac->last_duplex = 1;
613 } else { 630 } else {
614 lmac->link_up = 0; 631 lmac->link_up = 0;
632 lmac->last_speed = SPEED_UNKNOWN;
633 lmac->last_duplex = DUPLEX_UNKNOWN;
615 } 634 }
616 635
617 if (lmac->last_link != lmac->link_up) { 636 if (lmac->last_link != lmac->link_up) {
@@ -654,8 +673,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
654 } 673 }
655 674
656 /* Enable lmac */ 675 /* Enable lmac */
657 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, 676 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
658 CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
659 677
660 /* Restore default cfg, incase low level firmware changed it */ 678 /* Restore default cfg, incase low level firmware changed it */
661 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); 679 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
@@ -695,8 +713,7 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
695 lmac = &bgx->lmac[lmacid]; 713 lmac = &bgx->lmac[lmacid];
696 if (lmac->check_link) { 714 if (lmac->check_link) {
697 /* Destroy work queue */ 715 /* Destroy work queue */
698 cancel_delayed_work(&lmac->dwork); 716 cancel_delayed_work_sync(&lmac->dwork);
699 flush_workqueue(lmac->check_link);
700 destroy_workqueue(lmac->check_link); 717 destroy_workqueue(lmac->check_link);
701 } 718 }
702 719
@@ -1009,6 +1026,9 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1009 struct bgx *bgx = NULL; 1026 struct bgx *bgx = NULL;
1010 u8 lmac; 1027 u8 lmac;
1011 1028
1029 /* Load octeon mdio driver */
1030 octeon_mdiobus_force_mod_depencency();
1031
1012 bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); 1032 bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
1013 if (!bgx) 1033 if (!bgx)
1014 return -ENOMEM; 1034 return -ENOMEM;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 07b7ec66c60d..149e179363a1 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -182,6 +182,8 @@ enum MCAST_MODE {
182#define BCAST_ACCEPT 1 182#define BCAST_ACCEPT 1
183#define CAM_ACCEPT 1 183#define CAM_ACCEPT 1
184 184
185void octeon_mdiobus_force_mod_depencency(void);
186void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable);
185void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac); 187void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
186unsigned bgx_get_map(int node); 188unsigned bgx_get_map(int node);
187int bgx_get_lmac_count(int node, int bgx); 189int bgx_get_lmac_count(int node, int bgx);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index ed41559bae77..b553409e04ad 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -98,8 +98,7 @@ static int csr0 = 0x01A00000 | 0x4800;
98#elif defined(__mips__) 98#elif defined(__mips__)
99static int csr0 = 0x00200000 | 0x4000; 99static int csr0 = 0x00200000 | 0x4000;
100#else 100#else
101#warning Processor architecture undefined! 101static int csr0;
102static int csr0 = 0x00A00000 | 0x4800;
103#endif 102#endif
104 103
105/* Operational parameters that usually are not changed. */ 104/* Operational parameters that usually are not changed. */
@@ -1982,6 +1981,12 @@ static int __init tulip_init (void)
1982 pr_info("%s", version); 1981 pr_info("%s", version);
1983#endif 1982#endif
1984 1983
1984 if (!csr0) {
1985 pr_warn("tulip: unknown CPU architecture, using default csr0\n");
1986 /* default to 8 longword cache line alignment */
1987 csr0 = 0x00A00000 | 0x4800;
1988 }
1989
1985 /* copy module parms into globals */ 1990 /* copy module parms into globals */
1986 tulip_rx_copybreak = rx_copybreak; 1991 tulip_rx_copybreak = rx_copybreak;
1987 tulip_max_interrupt_work = max_interrupt_work; 1992 tulip_max_interrupt_work = max_interrupt_work;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 9beb3d34d4ba..3c0e4d5c5fef 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -907,7 +907,7 @@ static void init_registers(struct net_device *dev)
907#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM) 907#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
908 i |= 0x4800; 908 i |= 0x4800;
909#else 909#else
910#warning Processor architecture undefined 910 dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n");
911 i |= 0x4800; 911 i |= 0x4800;
912#endif 912#endif
913 iowrite32(i, ioaddr + PCIBusCfg); 913 iowrite32(i, ioaddr + PCIBusCfg);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index d463563e1f70..6ee78c203eca 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -848,8 +848,6 @@ void be_roce_dev_remove(struct be_adapter *);
848/* 848/*
849 * internal function to open-close roce device during ifup-ifdown. 849 * internal function to open-close roce device during ifup-ifdown.
850 */ 850 */
851void be_roce_dev_open(struct be_adapter *);
852void be_roce_dev_close(struct be_adapter *);
853void be_roce_dev_shutdown(struct be_adapter *); 851void be_roce_dev_shutdown(struct be_adapter *);
854 852
855#endif /* BE_H */ 853#endif /* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index b6ad02909d6b..8a1d9fffd7d6 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3299,8 +3299,10 @@ static int be_msix_register(struct be_adapter *adapter)
3299 3299
3300 return 0; 3300 return 0;
3301err_msix: 3301err_msix:
3302 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--) 3302 for (i--; i >= 0; i--) {
3303 eqo = &adapter->eq_obj[i];
3303 free_irq(be_msix_vec_get(adapter, eqo), eqo); 3304 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3305 }
3304 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n", 3306 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
3305 status); 3307 status);
3306 be_msix_disable(adapter); 3308 be_msix_disable(adapter);
@@ -3432,8 +3434,6 @@ static int be_close(struct net_device *netdev)
3432 3434
3433 be_disable_if_filters(adapter); 3435 be_disable_if_filters(adapter);
3434 3436
3435 be_roce_dev_close(adapter);
3436
3437 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { 3437 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3438 for_all_evt_queues(adapter, eqo, i) { 3438 for_all_evt_queues(adapter, eqo, i) {
3439 napi_disable(&eqo->napi); 3439 napi_disable(&eqo->napi);
@@ -3601,8 +3601,6 @@ static int be_open(struct net_device *netdev)
3601 be_link_status_update(adapter, link_status); 3601 be_link_status_update(adapter, link_status);
3602 3602
3603 netif_tx_start_all_queues(netdev); 3603 netif_tx_start_all_queues(netdev);
3604 be_roce_dev_open(adapter);
3605
3606#ifdef CONFIG_BE2NET_VXLAN 3604#ifdef CONFIG_BE2NET_VXLAN
3607 if (skyhawk_chip(adapter)) 3605 if (skyhawk_chip(adapter))
3608 vxlan_get_rx_port(netdev); 3606 vxlan_get_rx_port(netdev);
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 60368207bf58..4089156a7f5e 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -116,40 +116,6 @@ void be_roce_dev_remove(struct be_adapter *adapter)
116 } 116 }
117} 117}
118 118
119static void _be_roce_dev_open(struct be_adapter *adapter)
120{
121 if (ocrdma_drv && adapter->ocrdma_dev &&
122 ocrdma_drv->state_change_handler)
123 ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
124 BE_DEV_UP);
125}
126
127void be_roce_dev_open(struct be_adapter *adapter)
128{
129 if (be_roce_supported(adapter)) {
130 mutex_lock(&be_adapter_list_lock);
131 _be_roce_dev_open(adapter);
132 mutex_unlock(&be_adapter_list_lock);
133 }
134}
135
136static void _be_roce_dev_close(struct be_adapter *adapter)
137{
138 if (ocrdma_drv && adapter->ocrdma_dev &&
139 ocrdma_drv->state_change_handler)
140 ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
141 BE_DEV_DOWN);
142}
143
144void be_roce_dev_close(struct be_adapter *adapter)
145{
146 if (be_roce_supported(adapter)) {
147 mutex_lock(&be_adapter_list_lock);
148 _be_roce_dev_close(adapter);
149 mutex_unlock(&be_adapter_list_lock);
150 }
151}
152
153void be_roce_dev_shutdown(struct be_adapter *adapter) 119void be_roce_dev_shutdown(struct be_adapter *adapter)
154{ 120{
155 if (be_roce_supported(adapter)) { 121 if (be_roce_supported(adapter)) {
@@ -177,8 +143,6 @@ int be_roce_register_driver(struct ocrdma_driver *drv)
177 143
178 _be_roce_dev_add(dev); 144 _be_roce_dev_add(dev);
179 netdev = dev->netdev; 145 netdev = dev->netdev;
180 if (netif_running(netdev) && netif_oper_up(netdev))
181 _be_roce_dev_open(dev);
182 } 146 }
183 mutex_unlock(&be_adapter_list_lock); 147 mutex_unlock(&be_adapter_list_lock);
184 return 0; 148 return 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index cde6ef905ec4..fde609789483 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -60,9 +60,7 @@ struct ocrdma_driver {
60 void (*state_change_handler) (struct ocrdma_dev *, u32 new_state); 60 void (*state_change_handler) (struct ocrdma_dev *, u32 new_state);
61}; 61};
62 62
63enum { 63enum be_roce_event {
64 BE_DEV_UP = 0,
65 BE_DEV_DOWN = 1,
66 BE_DEV_SHUTDOWN = 2 64 BE_DEV_SHUTDOWN = 2
67}; 65};
68 66
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 63c2bcf8031a..b1026689b78f 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -48,21 +48,15 @@ static void nps_enet_read_rx_fifo(struct net_device *ndev,
48 *reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); 48 *reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
49 else { /* !dst_is_aligned */ 49 else { /* !dst_is_aligned */
50 for (i = 0; i < len; i++, reg++) { 50 for (i = 0; i < len; i++, reg++) {
51 u32 buf = 51 u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
52 nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); 52 put_unaligned(buf, reg);
53
54 /* to accommodate word-unaligned address of "reg"
55 * we have to do memcpy_toio() instead of simple "=".
56 */
57 memcpy_toio((void __iomem *)reg, &buf, sizeof(buf));
58 } 53 }
59 } 54 }
60 55
61 /* copy last bytes (if any) */ 56 /* copy last bytes (if any) */
62 if (last) { 57 if (last) {
63 u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); 58 u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
64 59 memcpy((u8*)reg, &buf, last);
65 memcpy_toio((void __iomem *)reg, &buf, last);
66 } 60 }
67} 61}
68 62
@@ -367,7 +361,7 @@ static void nps_enet_send_frame(struct net_device *ndev,
367 struct nps_enet_tx_ctl tx_ctrl; 361 struct nps_enet_tx_ctl tx_ctrl;
368 short length = skb->len; 362 short length = skb->len;
369 u32 i, len = DIV_ROUND_UP(length, sizeof(u32)); 363 u32 i, len = DIV_ROUND_UP(length, sizeof(u32));
370 u32 *src = (u32 *)virt_to_phys(skb->data); 364 u32 *src = (void *)skb->data;
371 bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32)); 365 bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32));
372 366
373 tx_ctrl.value = 0; 367 tx_ctrl.value = 0;
@@ -375,17 +369,11 @@ static void nps_enet_send_frame(struct net_device *ndev,
375 if (src_is_aligned) 369 if (src_is_aligned)
376 for (i = 0; i < len; i++, src++) 370 for (i = 0; i < len; i++, src++)
377 nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src); 371 nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src);
378 else { /* !src_is_aligned */ 372 else /* !src_is_aligned */
379 for (i = 0; i < len; i++, src++) { 373 for (i = 0; i < len; i++, src++)
380 u32 buf; 374 nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF,
381 375 get_unaligned(src));
382 /* to accommodate word-unaligned address of "src" 376
383 * we have to do memcpy_fromio() instead of simple "="
384 */
385 memcpy_fromio(&buf, (void __iomem *)src, sizeof(buf));
386 nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, buf);
387 }
388 }
389 /* Write the length of the Frame */ 377 /* Write the length of the Frame */
390 tx_ctrl.nt = length; 378 tx_ctrl.nt = length;
391 379
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index ff76d4e9dc1b..bee32a9d9876 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -7,7 +7,8 @@ config NET_VENDOR_FREESCALE
7 default y 7 default y
8 depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ 8 depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
9 M523x || M527x || M5272 || M528x || M520x || M532x || \ 9 M523x || M527x || M5272 || M528x || M520x || M532x || \
10 ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) 10 ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
11 ARCH_LAYERSCAPE
11 ---help--- 12 ---help---
12 If you have a network (Ethernet) card belonging to this class, say Y. 13 If you have a network (Ethernet) card belonging to this class, say Y.
13 14
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index 08f5b911d96b..52e0091b4fb2 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -552,7 +552,7 @@ static void tx_restart(struct net_device *dev)
552 cbd_t __iomem *prev_bd; 552 cbd_t __iomem *prev_bd;
553 cbd_t __iomem *last_tx_bd; 553 cbd_t __iomem *last_tx_bd;
554 554
555 last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t)); 555 last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t));
556 556
557 /* get the current bd held in TBPTR and scan back from this point */ 557 /* get the current bd held in TBPTR and scan back from this point */
558 recheck_bd = curr_tbptr = (cbd_t __iomem *) 558 recheck_bd = curr_tbptr = (cbd_t __iomem *)
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 55c36230e176..40071dad1c57 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -464,7 +464,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
464 * address). Print error message but continue anyway. 464 * address). Print error message but continue anyway.
465 */ 465 */
466 if ((void *)tbipa > priv->map + resource_size(&res) - 4) 466 if ((void *)tbipa > priv->map + resource_size(&res) - 4)
467 dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n", 467 dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n",
468 ((void *)tbipa - priv->map) + 4); 468 ((void *)tbipa - priv->map) + 4);
469 469
470 iowrite32be(be32_to_cpup(prop), tbipa); 470 iowrite32be(be32_to_cpup(prop), tbipa);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 3e6b9b437497..3e233d924cce 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -647,9 +647,9 @@ static int gfar_parse_group(struct device_node *np,
647 if (model && strcasecmp(model, "FEC")) { 647 if (model && strcasecmp(model, "FEC")) {
648 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); 648 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
649 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); 649 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
650 if (gfar_irq(grp, TX)->irq == NO_IRQ || 650 if (!gfar_irq(grp, TX)->irq ||
651 gfar_irq(grp, RX)->irq == NO_IRQ || 651 !gfar_irq(grp, RX)->irq ||
652 gfar_irq(grp, ER)->irq == NO_IRQ) 652 !gfar_irq(grp, ER)->irq)
653 return -EINVAL; 653 return -EINVAL;
654 } 654 }
655 655
@@ -894,7 +894,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
894 FSL_GIANFAR_DEV_HAS_VLAN | 894 FSL_GIANFAR_DEV_HAS_VLAN |
895 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 895 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
896 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | 896 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
897 FSL_GIANFAR_DEV_HAS_TIMER; 897 FSL_GIANFAR_DEV_HAS_TIMER |
898 FSL_GIANFAR_DEV_HAS_RX_FILER;
898 899
899 err = of_property_read_string(np, "phy-connection-type", &ctype); 900 err = of_property_read_string(np, "phy-connection-type", &ctype);
900 901
@@ -1396,8 +1397,9 @@ static int gfar_probe(struct platform_device *ofdev)
1396 priv->rx_queue[i]->rxic = DEFAULT_RXIC; 1397 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1397 } 1398 }
1398 1399
1399 /* always enable rx filer */ 1400 /* Always enable rx filer if available */
1400 priv->rx_filer_enable = 1; 1401 priv->rx_filer_enable =
1402 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
1401 /* Enable most messages by default */ 1403 /* Enable most messages by default */
1402 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1404 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1403 /* use pritority h/w tx queue scheduling for single queue devices */ 1405 /* use pritority h/w tx queue scheduling for single queue devices */
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index f266b20f9ef5..cb77667971a7 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -923,6 +923,7 @@ struct gfar {
923#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 923#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
924#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800 924#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800
925#define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000 925#define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000
926#define FSL_GIANFAR_DEV_HAS_RX_FILER 0x00002000
926 927
927#if (MAXGROUPS == 2) 928#if (MAXGROUPS == 2)
928#define DEFAULT_MAPPING 0xAA 929#define DEFAULT_MAPPING 0xAA
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 664d0c261269..b40fba929d65 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -467,7 +467,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
467 467
468 etsects->irq = platform_get_irq(dev, 0); 468 etsects->irq = platform_get_irq(dev, 0);
469 469
470 if (etsects->irq == NO_IRQ) { 470 if (etsects->irq < 0) {
471 pr_err("irq not in device tree\n"); 471 pr_err("irq not in device tree\n");
472 goto no_node; 472 goto no_node;
473 } 473 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 2a98eba660c0..b674414a4d72 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1259,12 +1259,8 @@ int hns_dsaf_set_mac_uc_entry(
1259 if (MAC_IS_ALL_ZEROS(mac_entry->addr) || 1259 if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
1260 MAC_IS_BROADCAST(mac_entry->addr) || 1260 MAC_IS_BROADCAST(mac_entry->addr) ||
1261 MAC_IS_MULTICAST(mac_entry->addr)) { 1261 MAC_IS_MULTICAST(mac_entry->addr)) {
1262 dev_err(dsaf_dev->dev, 1262 dev_err(dsaf_dev->dev, "set_uc %s Mac %pM err!\n",
1263 "set_uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n", 1263 dsaf_dev->ae_dev.name, mac_entry->addr);
1264 dsaf_dev->ae_dev.name, mac_entry->addr[0],
1265 mac_entry->addr[1], mac_entry->addr[2],
1266 mac_entry->addr[3], mac_entry->addr[4],
1267 mac_entry->addr[5]);
1268 return -EINVAL; 1264 return -EINVAL;
1269 } 1265 }
1270 1266
@@ -1331,12 +1327,8 @@ int hns_dsaf_set_mac_mc_entry(
1331 1327
1332 /* mac addr check */ 1328 /* mac addr check */
1333 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { 1329 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
1334 dev_err(dsaf_dev->dev, 1330 dev_err(dsaf_dev->dev, "set uc %s Mac %pM err!\n",
1335 "set uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n", 1331 dsaf_dev->ae_dev.name, mac_entry->addr);
1336 dsaf_dev->ae_dev.name, mac_entry->addr[0],
1337 mac_entry->addr[1], mac_entry->addr[2],
1338 mac_entry->addr[3],
1339 mac_entry->addr[4], mac_entry->addr[5]);
1340 return -EINVAL; 1332 return -EINVAL;
1341 } 1333 }
1342 1334
@@ -1410,11 +1402,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
1410 1402
1411 /*chechk mac addr */ 1403 /*chechk mac addr */
1412 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { 1404 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
1413 dev_err(dsaf_dev->dev, 1405 dev_err(dsaf_dev->dev, "set_entry failed,addr %pM!\n",
1414 "set_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n", 1406 mac_entry->addr);
1415 mac_entry->addr[0], mac_entry->addr[1],
1416 mac_entry->addr[2], mac_entry->addr[3],
1417 mac_entry->addr[4], mac_entry->addr[5]);
1418 return -EINVAL; 1407 return -EINVAL;
1419 } 1408 }
1420 1409
@@ -1497,9 +1486,8 @@ int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
1497 1486
1498 /*check mac addr */ 1487 /*check mac addr */
1499 if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) { 1488 if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) {
1500 dev_err(dsaf_dev->dev, 1489 dev_err(dsaf_dev->dev, "del_entry failed,addr %pM!\n",
1501 "del_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n", 1490 addr);
1502 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1503 return -EINVAL; 1491 return -EINVAL;
1504 } 1492 }
1505 1493
@@ -1563,11 +1551,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
1563 1551
1564 /*check mac addr */ 1552 /*check mac addr */
1565 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { 1553 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
1566 dev_err(dsaf_dev->dev, 1554 dev_err(dsaf_dev->dev, "del_port failed, addr %pM!\n",
1567 "del_port failed, addr %02x:%02x:%02x:%02x:%02x:%02x!\n", 1555 mac_entry->addr);
1568 mac_entry->addr[0], mac_entry->addr[1],
1569 mac_entry->addr[2], mac_entry->addr[3],
1570 mac_entry->addr[4], mac_entry->addr[5]);
1571 return -EINVAL; 1556 return -EINVAL;
1572 } 1557 }
1573 1558
@@ -1644,11 +1629,8 @@ int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
1644 /* check macaddr */ 1629 /* check macaddr */
1645 if (MAC_IS_ALL_ZEROS(mac_entry->addr) || 1630 if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
1646 MAC_IS_BROADCAST(mac_entry->addr)) { 1631 MAC_IS_BROADCAST(mac_entry->addr)) {
1647 dev_err(dsaf_dev->dev, 1632 dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n",
1648 "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n", 1633 mac_entry->addr);
1649 mac_entry->addr[0], mac_entry->addr[1],
1650 mac_entry->addr[2], mac_entry->addr[3],
1651 mac_entry->addr[4], mac_entry->addr[5]);
1652 return -EINVAL; 1634 return -EINVAL;
1653 } 1635 }
1654 1636
@@ -1695,11 +1677,8 @@ int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
1695 /*check mac addr */ 1677 /*check mac addr */
1696 if (MAC_IS_ALL_ZEROS(mac_entry->addr) || 1678 if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
1697 MAC_IS_BROADCAST(mac_entry->addr)) { 1679 MAC_IS_BROADCAST(mac_entry->addr)) {
1698 dev_err(dsaf_dev->dev, 1680 dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n",
1699 "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n", 1681 mac_entry->addr);
1700 mac_entry->addr[0], mac_entry->addr[1],
1701 mac_entry->addr[2], mac_entry->addr[3],
1702 mac_entry->addr[4], mac_entry->addr[5]);
1703 return -EINVAL; 1682 return -EINVAL;
1704 } 1683 }
1705 1684
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index b475e1bf2e6f..bdbd80423b17 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -898,7 +898,7 @@
898#define XGMAC_PAUSE_CTL_RSP_MODE_B 2 898#define XGMAC_PAUSE_CTL_RSP_MODE_B 2
899#define XGMAC_PAUSE_CTL_TX_XOFF_B 3 899#define XGMAC_PAUSE_CTL_TX_XOFF_B 3
900 900
901static inline void dsaf_write_reg(void *base, u32 reg, u32 value) 901static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
902{ 902{
903 u8 __iomem *reg_addr = ACCESS_ONCE(base); 903 u8 __iomem *reg_addr = ACCESS_ONCE(base);
904 904
@@ -908,7 +908,7 @@ static inline void dsaf_write_reg(void *base, u32 reg, u32 value)
908#define dsaf_write_dev(a, reg, value) \ 908#define dsaf_write_dev(a, reg, value) \
909 dsaf_write_reg((a)->io_base, (reg), (value)) 909 dsaf_write_reg((a)->io_base, (reg), (value))
910 910
911static inline u32 dsaf_read_reg(u8 *base, u32 reg) 911static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
912{ 912{
913 u8 __iomem *reg_addr = ACCESS_ONCE(base); 913 u8 __iomem *reg_addr = ACCESS_ONCE(base);
914 914
@@ -927,8 +927,8 @@ static inline u32 dsaf_read_reg(u8 *base, u32 reg)
927#define dsaf_set_bit(origin, shift, val) \ 927#define dsaf_set_bit(origin, shift, val) \
928 dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) 928 dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
929 929
930static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, 930static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
931 u32 val) 931 u32 shift, u32 val)
932{ 932{
933 u32 origin = dsaf_read_reg(base, reg); 933 u32 origin = dsaf_read_reg(base, reg);
934 934
@@ -947,7 +947,8 @@ static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
947#define dsaf_get_bit(origin, shift) \ 947#define dsaf_get_bit(origin, shift) \
948 dsaf_get_field((origin), (1ull << (shift)), (shift)) 948 dsaf_get_field((origin), (1ull << (shift)), (shift))
949 949
950static inline u32 dsaf_get_reg_field(void *base, u32 reg, u32 mask, u32 shift) 950static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
951 u32 shift)
951{ 952{
952 u32 origin; 953 u32 origin;
953 954
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 639263d5e833..7781e80896a6 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -627,8 +627,10 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
627 627
628 /* verify the skb head is not shared */ 628 /* verify the skb head is not shared */
629 err = skb_cow_head(skb, 0); 629 err = skb_cow_head(skb, 0);
630 if (err) 630 if (err) {
631 dev_kfree_skb(skb);
631 return NETDEV_TX_OK; 632 return NETDEV_TX_OK;
633 }
632 634
633 /* locate vlan header */ 635 /* locate vlan header */
634 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); 636 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 0ff8f01e57ee..1fd5ea82a9bc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -567,10 +567,6 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
567 goto init_adminq_exit; 567 goto init_adminq_exit;
568 } 568 }
569 569
570 /* initialize locks */
571 mutex_init(&hw->aq.asq_mutex);
572 mutex_init(&hw->aq.arq_mutex);
573
574 /* Set up register offsets */ 570 /* Set up register offsets */
575 i40e_adminq_init_regs(hw); 571 i40e_adminq_init_regs(hw);
576 572
@@ -664,8 +660,6 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
664 i40e_shutdown_asq(hw); 660 i40e_shutdown_asq(hw);
665 i40e_shutdown_arq(hw); 661 i40e_shutdown_arq(hw);
666 662
667 /* destroy the locks */
668
669 if (hw->nvm_buff.va) 663 if (hw->nvm_buff.va)
670 i40e_free_virt_mem(hw, &hw->nvm_buff); 664 i40e_free_virt_mem(hw, &hw->nvm_buff);
671 665
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b825f978d441..4a9873ec28c7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -10295,6 +10295,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10295 /* set up a default setting for link flow control */ 10295 /* set up a default setting for link flow control */
10296 pf->hw.fc.requested_mode = I40E_FC_NONE; 10296 pf->hw.fc.requested_mode = I40E_FC_NONE;
10297 10297
10298 /* set up the locks for the AQ, do this only once in probe
10299 * and destroy them only once in remove
10300 */
10301 mutex_init(&hw->aq.asq_mutex);
10302 mutex_init(&hw->aq.arq_mutex);
10303
10298 err = i40e_init_adminq(hw); 10304 err = i40e_init_adminq(hw);
10299 10305
10300 /* provide nvm, fw, api versions */ 10306 /* provide nvm, fw, api versions */
@@ -10697,7 +10703,6 @@ static void i40e_remove(struct pci_dev *pdev)
10697 set_bit(__I40E_DOWN, &pf->state); 10703 set_bit(__I40E_DOWN, &pf->state);
10698 del_timer_sync(&pf->service_timer); 10704 del_timer_sync(&pf->service_timer);
10699 cancel_work_sync(&pf->service_task); 10705 cancel_work_sync(&pf->service_task);
10700 i40e_fdir_teardown(pf);
10701 10706
10702 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 10707 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
10703 i40e_free_vfs(pf); 10708 i40e_free_vfs(pf);
@@ -10740,6 +10745,10 @@ static void i40e_remove(struct pci_dev *pdev)
10740 "Failed to destroy the Admin Queue resources: %d\n", 10745 "Failed to destroy the Admin Queue resources: %d\n",
10741 ret_code); 10746 ret_code);
10742 10747
10748 /* destroy the locks only once, here */
10749 mutex_destroy(&hw->aq.arq_mutex);
10750 mutex_destroy(&hw->aq.asq_mutex);
10751
10743 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 10752 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
10744 i40e_clear_interrupt_scheme(pf); 10753 i40e_clear_interrupt_scheme(pf);
10745 for (i = 0; i < pf->num_alloc_vsi; i++) { 10754 for (i = 0; i < pf->num_alloc_vsi; i++) {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index fd123ca60761..3f65e39b3fe4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -551,10 +551,6 @@ i40e_status i40evf_init_adminq(struct i40e_hw *hw)
551 goto init_adminq_exit; 551 goto init_adminq_exit;
552 } 552 }
553 553
554 /* initialize locks */
555 mutex_init(&hw->aq.asq_mutex);
556 mutex_init(&hw->aq.arq_mutex);
557
558 /* Set up register offsets */ 554 /* Set up register offsets */
559 i40e_adminq_init_regs(hw); 555 i40e_adminq_init_regs(hw);
560 556
@@ -596,8 +592,6 @@ i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
596 i40e_shutdown_asq(hw); 592 i40e_shutdown_asq(hw);
597 i40e_shutdown_arq(hw); 593 i40e_shutdown_arq(hw);
598 594
599 /* destroy the locks */
600
601 if (hw->nvm_buff.va) 595 if (hw->nvm_buff.va)
602 i40e_free_virt_mem(hw, &hw->nvm_buff); 596 i40e_free_virt_mem(hw, &hw->nvm_buff);
603 597
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index d962164dfb0f..99d2cffae0cd 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -2476,6 +2476,12 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2476 hw->bus.device = PCI_SLOT(pdev->devfn); 2476 hw->bus.device = PCI_SLOT(pdev->devfn);
2477 hw->bus.func = PCI_FUNC(pdev->devfn); 2477 hw->bus.func = PCI_FUNC(pdev->devfn);
2478 2478
2479 /* set up the locks for the AQ, do this only once in probe
2480 * and destroy them only once in remove
2481 */
2482 mutex_init(&hw->aq.asq_mutex);
2483 mutex_init(&hw->aq.arq_mutex);
2484
2479 INIT_LIST_HEAD(&adapter->mac_filter_list); 2485 INIT_LIST_HEAD(&adapter->mac_filter_list);
2480 INIT_LIST_HEAD(&adapter->vlan_filter_list); 2486 INIT_LIST_HEAD(&adapter->vlan_filter_list);
2481 2487
@@ -2629,6 +2635,10 @@ static void i40evf_remove(struct pci_dev *pdev)
2629 if (hw->aq.asq.count) 2635 if (hw->aq.asq.count)
2630 i40evf_shutdown_adminq(hw); 2636 i40evf_shutdown_adminq(hw);
2631 2637
2638 /* destroy the locks only once, here */
2639 mutex_destroy(&hw->aq.arq_mutex);
2640 mutex_destroy(&hw->aq.asq_mutex);
2641
2632 iounmap(hw->hw_addr); 2642 iounmap(hw->hw_addr);
2633 pci_release_regions(pdev); 2643 pci_release_regions(pdev);
2634 2644
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 47395ff5d908..aed8d029b23d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7920,6 +7920,9 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
7920 */ 7920 */
7921 if (netif_running(dev)) 7921 if (netif_running(dev))
7922 ixgbe_close(dev); 7922 ixgbe_close(dev);
7923 else
7924 ixgbe_reset(adapter);
7925
7923 ixgbe_clear_interrupt_scheme(adapter); 7926 ixgbe_clear_interrupt_scheme(adapter);
7924 7927
7925#ifdef CONFIG_IXGBE_DCB 7928#ifdef CONFIG_IXGBE_DCB
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e84c7f2634d3..ed622fa29dfa 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -36,7 +36,7 @@
36 36
37/* Registers */ 37/* Registers */
38#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) 38#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
39#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1) 39#define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
40#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) 40#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
41#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) 41#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
42#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) 42#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
@@ -62,6 +62,7 @@
62#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) 62#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
63#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) 63#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
64#define MVNETA_BASE_ADDR_ENABLE 0x2290 64#define MVNETA_BASE_ADDR_ENABLE 0x2290
65#define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
65#define MVNETA_PORT_CONFIG 0x2400 66#define MVNETA_PORT_CONFIG 0x2400
66#define MVNETA_UNI_PROMISC_MODE BIT(0) 67#define MVNETA_UNI_PROMISC_MODE BIT(0)
67#define MVNETA_DEF_RXQ(q) ((q) << 1) 68#define MVNETA_DEF_RXQ(q) ((q) << 1)
@@ -159,7 +160,7 @@
159 160
160#define MVNETA_INTR_ENABLE 0x25b8 161#define MVNETA_INTR_ENABLE 0x25b8
161#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 162#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
162#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF 163#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
163 164
164#define MVNETA_RXQ_CMD 0x2680 165#define MVNETA_RXQ_CMD 0x2680
165#define MVNETA_RXQ_DISABLE_SHIFT 8 166#define MVNETA_RXQ_DISABLE_SHIFT 8
@@ -242,6 +243,7 @@
242#define MVNETA_VLAN_TAG_LEN 4 243#define MVNETA_VLAN_TAG_LEN 4
243 244
244#define MVNETA_CPU_D_CACHE_LINE_SIZE 32 245#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
246#define MVNETA_TX_CSUM_DEF_SIZE 1600
245#define MVNETA_TX_CSUM_MAX_SIZE 9800 247#define MVNETA_TX_CSUM_MAX_SIZE 9800
246#define MVNETA_ACC_MODE_EXT 1 248#define MVNETA_ACC_MODE_EXT 1
247 249
@@ -1579,12 +1581,16 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1579 } 1581 }
1580 1582
1581 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); 1583 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
1582 if (!skb)
1583 goto err_drop_frame;
1584 1584
1585 /* After refill old buffer has to be unmapped regardless
1586 * the skb is successfully built or not.
1587 */
1585 dma_unmap_single(dev->dev.parent, phys_addr, 1588 dma_unmap_single(dev->dev.parent, phys_addr,
1586 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); 1589 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1587 1590
1591 if (!skb)
1592 goto err_drop_frame;
1593
1588 rcvd_pkts++; 1594 rcvd_pkts++;
1589 rcvd_bytes += rx_bytes; 1595 rcvd_bytes += rx_bytes;
1590 1596
@@ -3191,6 +3197,7 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
3191 } 3197 }
3192 3198
3193 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); 3199 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
3200 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
3194} 3201}
3195 3202
3196/* Power up the port */ 3203/* Power up the port */
@@ -3250,6 +3257,7 @@ static int mvneta_probe(struct platform_device *pdev)
3250 char hw_mac_addr[ETH_ALEN]; 3257 char hw_mac_addr[ETH_ALEN];
3251 const char *mac_from; 3258 const char *mac_from;
3252 const char *managed; 3259 const char *managed;
3260 int tx_csum_limit;
3253 int phy_mode; 3261 int phy_mode;
3254 int err; 3262 int err;
3255 int cpu; 3263 int cpu;
@@ -3350,8 +3358,21 @@ static int mvneta_probe(struct platform_device *pdev)
3350 } 3358 }
3351 } 3359 }
3352 3360
3353 if (of_device_is_compatible(dn, "marvell,armada-370-neta")) 3361 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
3354 pp->tx_csum_limit = 1600; 3362 if (tx_csum_limit < 0 ||
3363 tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
3364 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
3365 dev_info(&pdev->dev,
3366 "Wrong TX csum limit in DT, set to %dB\n",
3367 MVNETA_TX_CSUM_DEF_SIZE);
3368 }
3369 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
3370 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
3371 } else {
3372 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
3373 }
3374
3375 pp->tx_csum_limit = tx_csum_limit;
3355 3376
3356 pp->tx_ring_size = MVNETA_MAX_TXD; 3377 pp->tx_ring_size = MVNETA_MAX_TXD;
3357 pp->rx_ring_size = MVNETA_MAX_RXD; 3378 pp->rx_ring_size = MVNETA_MAX_RXD;
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index d9884fd15b45..a4beccf1fd46 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -3413,16 +3413,23 @@ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3413} 3413}
3414 3414
3415/* Free all buffers from the pool */ 3415/* Free all buffers from the pool */
3416static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) 3416static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3417 struct mvpp2_bm_pool *bm_pool)
3417{ 3418{
3418 int i; 3419 int i;
3419 3420
3420 for (i = 0; i < bm_pool->buf_num; i++) { 3421 for (i = 0; i < bm_pool->buf_num; i++) {
3422 dma_addr_t buf_phys_addr;
3421 u32 vaddr; 3423 u32 vaddr;
3422 3424
3423 /* Get buffer virtual address (indirect access) */ 3425 /* Get buffer virtual address (indirect access) */
3424 mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 3426 buf_phys_addr = mvpp2_read(priv,
3427 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3425 vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG); 3428 vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
3429
3430 dma_unmap_single(dev, buf_phys_addr,
3431 bm_pool->buf_size, DMA_FROM_DEVICE);
3432
3426 if (!vaddr) 3433 if (!vaddr)
3427 break; 3434 break;
3428 dev_kfree_skb_any((struct sk_buff *)vaddr); 3435 dev_kfree_skb_any((struct sk_buff *)vaddr);
@@ -3439,7 +3446,7 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3439{ 3446{
3440 u32 val; 3447 u32 val;
3441 3448
3442 mvpp2_bm_bufs_free(priv, bm_pool); 3449 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
3443 if (bm_pool->buf_num) { 3450 if (bm_pool->buf_num) {
3444 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); 3451 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3445 return 0; 3452 return 0;
@@ -3692,7 +3699,8 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3692 MVPP2_BM_LONG_BUF_NUM : 3699 MVPP2_BM_LONG_BUF_NUM :
3693 MVPP2_BM_SHORT_BUF_NUM; 3700 MVPP2_BM_SHORT_BUF_NUM;
3694 else 3701 else
3695 mvpp2_bm_bufs_free(port->priv, new_pool); 3702 mvpp2_bm_bufs_free(port->dev->dev.parent,
3703 port->priv, new_pool);
3696 3704
3697 new_pool->pkt_size = pkt_size; 3705 new_pool->pkt_size = pkt_size;
3698 3706
@@ -3756,7 +3764,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
3756 int pkt_size = MVPP2_RX_PKT_SIZE(mtu); 3764 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3757 3765
3758 /* Update BM pool with new buffer size */ 3766 /* Update BM pool with new buffer size */
3759 mvpp2_bm_bufs_free(port->priv, port_pool); 3767 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
3760 if (port_pool->buf_num) { 3768 if (port_pool->buf_num) {
3761 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id); 3769 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
3762 return -EIO; 3770 return -EIO;
@@ -4401,11 +4409,10 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4401 4409
4402 mvpp2_txq_inc_get(txq_pcpu); 4410 mvpp2_txq_inc_get(txq_pcpu);
4403 4411
4404 if (!skb)
4405 continue;
4406
4407 dma_unmap_single(port->dev->dev.parent, buf_phys_addr, 4412 dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
4408 skb_headlen(skb), DMA_TO_DEVICE); 4413 skb_headlen(skb), DMA_TO_DEVICE);
4414 if (!skb)
4415 continue;
4409 dev_kfree_skb_any(skb); 4416 dev_kfree_skb_any(skb);
4410 } 4417 }
4411} 4418}
@@ -5092,7 +5099,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5092 struct mvpp2_rx_queue *rxq) 5099 struct mvpp2_rx_queue *rxq)
5093{ 5100{
5094 struct net_device *dev = port->dev; 5101 struct net_device *dev = port->dev;
5095 int rx_received, rx_filled, i; 5102 int rx_received;
5103 int rx_done = 0;
5096 u32 rcvd_pkts = 0; 5104 u32 rcvd_pkts = 0;
5097 u32 rcvd_bytes = 0; 5105 u32 rcvd_bytes = 0;
5098 5106
@@ -5101,17 +5109,18 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5101 if (rx_todo > rx_received) 5109 if (rx_todo > rx_received)
5102 rx_todo = rx_received; 5110 rx_todo = rx_received;
5103 5111
5104 rx_filled = 0; 5112 while (rx_done < rx_todo) {
5105 for (i = 0; i < rx_todo; i++) {
5106 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 5113 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5107 struct mvpp2_bm_pool *bm_pool; 5114 struct mvpp2_bm_pool *bm_pool;
5108 struct sk_buff *skb; 5115 struct sk_buff *skb;
5116 dma_addr_t phys_addr;
5109 u32 bm, rx_status; 5117 u32 bm, rx_status;
5110 int pool, rx_bytes, err; 5118 int pool, rx_bytes, err;
5111 5119
5112 rx_filled++; 5120 rx_done++;
5113 rx_status = rx_desc->status; 5121 rx_status = rx_desc->status;
5114 rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; 5122 rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
5123 phys_addr = rx_desc->buf_phys_addr;
5115 5124
5116 bm = mvpp2_bm_cookie_build(rx_desc); 5125 bm = mvpp2_bm_cookie_build(rx_desc);
5117 pool = mvpp2_bm_cookie_pool_get(bm); 5126 pool = mvpp2_bm_cookie_pool_get(bm);
@@ -5128,8 +5137,10 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5128 * comprised by the RX descriptor. 5137 * comprised by the RX descriptor.
5129 */ 5138 */
5130 if (rx_status & MVPP2_RXD_ERR_SUMMARY) { 5139 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5140 err_drop_frame:
5131 dev->stats.rx_errors++; 5141 dev->stats.rx_errors++;
5132 mvpp2_rx_error(port, rx_desc); 5142 mvpp2_rx_error(port, rx_desc);
5143 /* Return the buffer to the pool */
5133 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, 5144 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
5134 rx_desc->buf_cookie); 5145 rx_desc->buf_cookie);
5135 continue; 5146 continue;
@@ -5137,6 +5148,15 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5137 5148
5138 skb = (struct sk_buff *)rx_desc->buf_cookie; 5149 skb = (struct sk_buff *)rx_desc->buf_cookie;
5139 5150
5151 err = mvpp2_rx_refill(port, bm_pool, bm, 0);
5152 if (err) {
5153 netdev_err(port->dev, "failed to refill BM pools\n");
5154 goto err_drop_frame;
5155 }
5156
5157 dma_unmap_single(dev->dev.parent, phys_addr,
5158 bm_pool->buf_size, DMA_FROM_DEVICE);
5159
5140 rcvd_pkts++; 5160 rcvd_pkts++;
5141 rcvd_bytes += rx_bytes; 5161 rcvd_bytes += rx_bytes;
5142 atomic_inc(&bm_pool->in_use); 5162 atomic_inc(&bm_pool->in_use);
@@ -5147,12 +5167,6 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5147 mvpp2_rx_csum(port, rx_status, skb); 5167 mvpp2_rx_csum(port, rx_status, skb);
5148 5168
5149 napi_gro_receive(&port->napi, skb); 5169 napi_gro_receive(&port->napi, skb);
5150
5151 err = mvpp2_rx_refill(port, bm_pool, bm, 0);
5152 if (err) {
5153 netdev_err(port->dev, "failed to refill BM pools\n");
5154 rx_filled--;
5155 }
5156 } 5170 }
5157 5171
5158 if (rcvd_pkts) { 5172 if (rcvd_pkts) {
@@ -5166,7 +5180,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5166 5180
5167 /* Update Rx queue management counters */ 5181 /* Update Rx queue management counters */
5168 wmb(); 5182 wmb();
5169 mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled); 5183 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
5170 5184
5171 return rx_todo; 5185 return rx_todo;
5172} 5186}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 2177e56ed0be..d48d5793407d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1010,7 +1010,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
1010 if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && 1010 if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
1011 smp->method == IB_MGMT_METHOD_GET) || network_view) { 1011 smp->method == IB_MGMT_METHOD_GET) || network_view) {
1012 mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n", 1012 mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
1013 slave, smp->method, smp->mgmt_class, 1013 slave, smp->mgmt_class, smp->method,
1014 network_view ? "Network" : "Host", 1014 network_view ? "Network" : "Host",
1015 be16_to_cpu(smp->attr_id)); 1015 be16_to_cpu(smp->attr_id));
1016 return -EPERM; 1016 return -EPERM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 8a083d73efdb..038f9ce391e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -242,6 +242,13 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
242 unsigned long flags; 242 unsigned long flags;
243 u64 ns, zero = 0; 243 u64 ns, zero = 0;
244 244
245 /* mlx4_en_init_timestamp is called for each netdev.
246 * mdev->ptp_clock is common for all ports, skip initialization if
247 * was done for other port.
248 */
249 if (mdev->ptp_clock)
250 return;
251
245 rwlock_init(&mdev->clock_lock); 252 rwlock_init(&mdev->clock_lock);
246 253
247 memset(&mdev->cycles, 0, sizeof(mdev->cycles)); 254 memset(&mdev->cycles, 0, sizeof(mdev->cycles));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 005f910ec955..e0ec280a7fa1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -232,9 +232,6 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
232 if (mdev->pndev[i]) 232 if (mdev->pndev[i])
233 mlx4_en_destroy_netdev(mdev->pndev[i]); 233 mlx4_en_destroy_netdev(mdev->pndev[i]);
234 234
235 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
236 mlx4_en_remove_timestamp(mdev);
237
238 flush_workqueue(mdev->workqueue); 235 flush_workqueue(mdev->workqueue);
239 destroy_workqueue(mdev->workqueue); 236 destroy_workqueue(mdev->workqueue);
240 (void) mlx4_mr_free(dev, &mdev->mr); 237 (void) mlx4_mr_free(dev, &mdev->mr);
@@ -320,10 +317,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
320 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) 317 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
321 mdev->port_cnt++; 318 mdev->port_cnt++;
322 319
323 /* Initialize time stamp mechanism */
324 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
325 mlx4_en_init_timestamp(mdev);
326
327 /* Set default number of RX rings*/ 320 /* Set default number of RX rings*/
328 mlx4_en_set_num_rx_rings(mdev); 321 mlx4_en_set_num_rx_rings(mdev);
329 322
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 886e1bc86374..7869f97de5da 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2072,6 +2072,9 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
2072 /* flush any pending task for this netdev */ 2072 /* flush any pending task for this netdev */
2073 flush_workqueue(mdev->workqueue); 2073 flush_workqueue(mdev->workqueue);
2074 2074
2075 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2076 mlx4_en_remove_timestamp(mdev);
2077
2075 /* Detach the netdev so tasks would not attempt to access it */ 2078 /* Detach the netdev so tasks would not attempt to access it */
2076 mutex_lock(&mdev->state_lock); 2079 mutex_lock(&mdev->state_lock);
2077 mdev->pndev[priv->port] = NULL; 2080 mdev->pndev[priv->port] = NULL;
@@ -3058,9 +3061,12 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3058 } 3061 }
3059 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 3062 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
3060 3063
3064 /* Initialize time stamp mechanism */
3061 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 3065 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
3062 queue_delayed_work(mdev->workqueue, &priv->service_task, 3066 mlx4_en_init_timestamp(mdev);
3063 SERVICE_TASK_DELAY); 3067
3068 queue_delayed_work(mdev->workqueue, &priv->service_task,
3069 SERVICE_TASK_DELAY);
3064 3070
3065 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap, 3071 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
3066 mdev->profile.prof[priv->port].rx_ppp, 3072 mdev->profile.prof[priv->port].rx_ppp,
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 6fec3e993d02..cad6c44df91c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -4306,9 +4306,10 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4306 return -EOPNOTSUPP; 4306 return -EOPNOTSUPP;
4307 4307
4308 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 4308 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4309 ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port); 4309 err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4310 if (ctrl->port <= 0) 4310 if (err <= 0)
4311 return -EINVAL; 4311 return -EINVAL;
4312 ctrl->port = err;
4312 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; 4313 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4313 err = get_res(dev, slave, qpn, RES_QP, &rqp); 4314 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4314 if (err) { 4315 if (err) {
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index b83f7c0fcf99..122c2ee3dfe2 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -1937,6 +1937,12 @@ static void refill_rx(struct net_device *dev)
1937 break; /* Better luck next round. */ 1937 break; /* Better luck next round. */
1938 np->rx_dma[entry] = pci_map_single(np->pci_dev, 1938 np->rx_dma[entry] = pci_map_single(np->pci_dev,
1939 skb->data, buflen, PCI_DMA_FROMDEVICE); 1939 skb->data, buflen, PCI_DMA_FROMDEVICE);
1940 if (pci_dma_mapping_error(np->pci_dev,
1941 np->rx_dma[entry])) {
1942 dev_kfree_skb_any(skb);
1943 np->rx_skbuff[entry] = NULL;
1944 break; /* Better luck next round. */
1945 }
1940 np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]); 1946 np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
1941 } 1947 }
1942 np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz); 1948 np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
@@ -2093,6 +2099,12 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
2093 np->tx_skbuff[entry] = skb; 2099 np->tx_skbuff[entry] = skb;
2094 np->tx_dma[entry] = pci_map_single(np->pci_dev, 2100 np->tx_dma[entry] = pci_map_single(np->pci_dev,
2095 skb->data,skb->len, PCI_DMA_TODEVICE); 2101 skb->data,skb->len, PCI_DMA_TODEVICE);
2102 if (pci_dma_mapping_error(np->pci_dev, np->tx_dma[entry])) {
2103 np->tx_skbuff[entry] = NULL;
2104 dev_kfree_skb_irq(skb);
2105 dev->stats.tx_dropped++;
2106 return NETDEV_TX_OK;
2107 }
2096 2108
2097 np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]); 2109 np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
2098 2110
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index b159ef8303cc..057665180f13 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1326,7 +1326,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1326 /* Get platform resources */ 1326 /* Get platform resources */
1327 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1327 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1328 irq = platform_get_irq(pdev, 0); 1328 irq = platform_get_irq(pdev, 0);
1329 if ((!res) || (irq < 0) || (irq >= NR_IRQS)) { 1329 if (!res || irq < 0) {
1330 dev_err(&pdev->dev, "error getting resources.\n"); 1330 dev_err(&pdev->dev, "error getting resources.\n");
1331 ret = -ENXIO; 1331 ret = -ENXIO;
1332 goto err_exit; 1332 goto err_exit;
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index ac17d8669b1a..1292c360390c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -299,6 +299,7 @@ struct qed_hwfn {
299 299
300 /* Flag indicating whether interrupts are enabled or not*/ 300 /* Flag indicating whether interrupts are enabled or not*/
301 bool b_int_enabled; 301 bool b_int_enabled;
302 bool b_int_requested;
302 303
303 struct qed_mcp_info *mcp_info; 304 struct qed_mcp_info *mcp_info;
304 305
@@ -491,6 +492,8 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
491 u32 input_len, u8 *input_buf, 492 u32 input_len, u8 *input_buf,
492 u32 max_size, u8 *unzip_buf); 493 u32 max_size, u8 *unzip_buf);
493 494
495int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
496
494#define QED_ETH_INTERFACE_VERSION 300 497#define QED_ETH_INTERFACE_VERSION 300
495 498
496#endif /* _QED_H */ 499#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 803b190ccada..817bbd5476ff 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1385,52 +1385,63 @@ err0:
1385 return rc; 1385 return rc;
1386} 1386}
1387 1387
1388static u32 qed_hw_bar_size(struct qed_dev *cdev, 1388static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
1389 u8 bar_id) 1389 u8 bar_id)
1390{ 1390{
1391 u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0); 1391 u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE
1392 : PGLUE_B_REG_PF_BAR1_SIZE);
1393 u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
1392 1394
1393 return size / cdev->num_hwfns; 1395 /* Get the BAR size(in KB) from hardware given val */
1396 return 1 << (val + 15);
1394} 1397}
1395 1398
1396int qed_hw_prepare(struct qed_dev *cdev, 1399int qed_hw_prepare(struct qed_dev *cdev,
1397 int personality) 1400 int personality)
1398{ 1401{
1399 int rc, i; 1402 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1403 int rc;
1400 1404
1401 /* Store the precompiled init data ptrs */ 1405 /* Store the precompiled init data ptrs */
1402 qed_init_iro_array(cdev); 1406 qed_init_iro_array(cdev);
1403 1407
1404 /* Initialize the first hwfn - will learn number of hwfns */ 1408 /* Initialize the first hwfn - will learn number of hwfns */
1405 rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview, 1409 rc = qed_hw_prepare_single(p_hwfn,
1410 cdev->regview,
1406 cdev->doorbells, personality); 1411 cdev->doorbells, personality);
1407 if (rc) 1412 if (rc)
1408 return rc; 1413 return rc;
1409 1414
1410 personality = cdev->hwfns[0].hw_info.personality; 1415 personality = p_hwfn->hw_info.personality;
1411 1416
1412 /* Initialize the rest of the hwfns */ 1417 /* Initialize the rest of the hwfns */
1413 for (i = 1; i < cdev->num_hwfns; i++) { 1418 if (cdev->num_hwfns > 1) {
1414 void __iomem *p_regview, *p_doorbell; 1419 void __iomem *p_regview, *p_doorbell;
1420 u8 __iomem *addr;
1421
1422 /* adjust bar offset for second engine */
1423 addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2;
1424 p_regview = addr;
1415 1425
1416 p_regview = cdev->regview + 1426 /* adjust doorbell bar offset for second engine */
1417 i * qed_hw_bar_size(cdev, 0); 1427 addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2;
1418 p_doorbell = cdev->doorbells + 1428 p_doorbell = addr;
1419 i * qed_hw_bar_size(cdev, 1); 1429
1420 rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview, 1430 /* prepare second hw function */
1431 rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
1421 p_doorbell, personality); 1432 p_doorbell, personality);
1433
1434 /* in case of error, need to free the previously
1435 * initiliazed hwfn 0.
1436 */
1422 if (rc) { 1437 if (rc) {
1423 /* Cleanup previously initialized hwfns */ 1438 qed_init_free(p_hwfn);
1424 while (--i >= 0) { 1439 qed_mcp_free(p_hwfn);
1425 qed_init_free(&cdev->hwfns[i]); 1440 qed_hw_hwfn_free(p_hwfn);
1426 qed_mcp_free(&cdev->hwfns[i]);
1427 qed_hw_hwfn_free(&cdev->hwfns[i]);
1428 }
1429 return rc;
1430 } 1441 }
1431 } 1442 }
1432 1443
1433 return 0; 1444 return rc;
1434} 1445}
1435 1446
1436void qed_hw_remove(struct qed_dev *cdev) 1447void qed_hw_remove(struct qed_dev *cdev)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index de50e84902af..9cc9d62c1fec 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -783,22 +783,16 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
783 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 783 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
784} 784}
785 785
786void qed_int_igu_enable(struct qed_hwfn *p_hwfn, 786int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
787 struct qed_ptt *p_ptt, 787 enum qed_int_mode int_mode)
788 enum qed_int_mode int_mode)
789{ 788{
790 int i; 789 int rc, i;
791
792 p_hwfn->b_int_enabled = 1;
793 790
794 /* Mask non-link attentions */ 791 /* Mask non-link attentions */
795 for (i = 0; i < 9; i++) 792 for (i = 0; i < 9; i++)
796 qed_wr(p_hwfn, p_ptt, 793 qed_wr(p_hwfn, p_ptt,
797 MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0); 794 MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0);
798 795
799 /* Enable interrupt Generation */
800 qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
801
802 /* Configure AEU signal change to produce attentions for link */ 796 /* Configure AEU signal change to produce attentions for link */
803 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 797 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
804 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 798 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
@@ -808,6 +802,19 @@ void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
808 802
809 /* Unmask AEU signals toward IGU */ 803 /* Unmask AEU signals toward IGU */
810 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 804 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
805 if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
806 rc = qed_slowpath_irq_req(p_hwfn);
807 if (rc != 0) {
808 DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
809 return -EINVAL;
810 }
811 p_hwfn->b_int_requested = true;
812 }
813 /* Enable interrupt Generation */
814 qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
815 p_hwfn->b_int_enabled = 1;
816
817 return rc;
811} 818}
812 819
813void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, 820void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
@@ -1127,3 +1134,11 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
1127 1134
1128 return info->igu_sb_cnt; 1135 return info->igu_sb_cnt;
1129} 1136}
1137
1138void qed_int_disable_post_isr_release(struct qed_dev *cdev)
1139{
1140 int i;
1141
1142 for_each_hwfn(cdev, i)
1143 cdev->hwfns[i].b_int_requested = false;
1144}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 16b57518e706..51e0b09a7f47 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -169,10 +169,14 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
169 int *p_iov_blks); 169 int *p_iov_blks);
170 170
171/** 171/**
172 * @file 172 * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR
173 * release. The API need to be called after releasing all slowpath IRQs
174 * of the device.
175 *
176 * @param cdev
173 * 177 *
174 * @brief Interrupt handler
175 */ 178 */
179void qed_int_disable_post_isr_release(struct qed_dev *cdev);
176 180
177#define QED_CAU_DEF_RX_TIMER_RES 0 181#define QED_CAU_DEF_RX_TIMER_RES 0
178#define QED_CAU_DEF_TX_TIMER_RES 0 182#define QED_CAU_DEF_TX_TIMER_RES 0
@@ -366,10 +370,11 @@ void qed_int_setup(struct qed_hwfn *p_hwfn,
366 * @param p_hwfn 370 * @param p_hwfn
367 * @param p_ptt 371 * @param p_ptt
368 * @param int_mode 372 * @param int_mode
373 *
374 * @return int
369 */ 375 */
370void qed_int_igu_enable(struct qed_hwfn *p_hwfn, 376int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
371 struct qed_ptt *p_ptt, 377 enum qed_int_mode int_mode);
372 enum qed_int_mode int_mode);
373 378
374/** 379/**
375 * @brief - Initialize CAU status block entry 380 * @brief - Initialize CAU status block entry
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 947c7af72b25..174f7341c5c3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -476,41 +476,22 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
476 return rc; 476 return rc;
477} 477}
478 478
479static int qed_slowpath_irq_req(struct qed_dev *cdev) 479int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
480{ 480{
481 int i = 0, rc = 0; 481 struct qed_dev *cdev = hwfn->cdev;
482 int rc = 0;
483 u8 id;
482 484
483 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 485 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
484 /* Request all the slowpath MSI-X vectors */ 486 id = hwfn->my_id;
485 for (i = 0; i < cdev->num_hwfns; i++) { 487 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
486 snprintf(cdev->hwfns[i].name, NAME_SIZE, 488 id, cdev->pdev->bus->number,
487 "sp-%d-%02x:%02x.%02x", 489 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
488 i, cdev->pdev->bus->number, 490 rc = request_irq(cdev->int_params.msix_table[id].vector,
489 PCI_SLOT(cdev->pdev->devfn), 491 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
490 cdev->hwfns[i].abs_pf_id); 492 if (!rc)
491 493 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
492 rc = request_irq(cdev->int_params.msix_table[i].vector,
493 qed_msix_sp_int, 0,
494 cdev->hwfns[i].name,
495 cdev->hwfns[i].sp_dpc);
496 if (rc)
497 break;
498
499 DP_VERBOSE(&cdev->hwfns[i],
500 (NETIF_MSG_INTR | QED_MSG_SP),
501 "Requested slowpath MSI-X\n"); 494 "Requested slowpath MSI-X\n");
502 }
503
504 if (i != cdev->num_hwfns) {
505 /* Free already request MSI-X vectors */
506 for (i--; i >= 0; i--) {
507 unsigned int vec =
508 cdev->int_params.msix_table[i].vector;
509 synchronize_irq(vec);
510 free_irq(cdev->int_params.msix_table[i].vector,
511 cdev->hwfns[i].sp_dpc);
512 }
513 }
514 } else { 495 } else {
515 unsigned long flags = 0; 496 unsigned long flags = 0;
516 497
@@ -534,13 +515,17 @@ static void qed_slowpath_irq_free(struct qed_dev *cdev)
534 515
535 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 516 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
536 for_each_hwfn(cdev, i) { 517 for_each_hwfn(cdev, i) {
518 if (!cdev->hwfns[i].b_int_requested)
519 break;
537 synchronize_irq(cdev->int_params.msix_table[i].vector); 520 synchronize_irq(cdev->int_params.msix_table[i].vector);
538 free_irq(cdev->int_params.msix_table[i].vector, 521 free_irq(cdev->int_params.msix_table[i].vector,
539 cdev->hwfns[i].sp_dpc); 522 cdev->hwfns[i].sp_dpc);
540 } 523 }
541 } else { 524 } else {
542 free_irq(cdev->pdev->irq, cdev); 525 if (QED_LEADING_HWFN(cdev)->b_int_requested)
526 free_irq(cdev->pdev->irq, cdev);
543 } 527 }
528 qed_int_disable_post_isr_release(cdev);
544} 529}
545 530
546static int qed_nic_stop(struct qed_dev *cdev) 531static int qed_nic_stop(struct qed_dev *cdev)
@@ -765,16 +750,11 @@ static int qed_slowpath_start(struct qed_dev *cdev,
765 if (rc) 750 if (rc)
766 goto err1; 751 goto err1;
767 752
768 /* Request the slowpath IRQ */
769 rc = qed_slowpath_irq_req(cdev);
770 if (rc)
771 goto err2;
772
773 /* Allocate stream for unzipping */ 753 /* Allocate stream for unzipping */
774 rc = qed_alloc_stream_mem(cdev); 754 rc = qed_alloc_stream_mem(cdev);
775 if (rc) { 755 if (rc) {
776 DP_NOTICE(cdev, "Failed to allocate stream memory\n"); 756 DP_NOTICE(cdev, "Failed to allocate stream memory\n");
777 goto err3; 757 goto err2;
778 } 758 }
779 759
780 /* Start the slowpath */ 760 /* Start the slowpath */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 7a5ce5914ace..e8df12335a97 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -363,4 +363,8 @@
363 0x7 << 0) 363 0x7 << 0)
364#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \ 364#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
365 0 365 0
366#define PGLUE_B_REG_PF_BAR0_SIZE \
367 0x2aae60UL
368#define PGLUE_B_REG_PF_BAR1_SIZE \
369 0x2aae64UL
366#endif 370#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 31a1f1eb4f56..287fadfab52d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -124,8 +124,12 @@ struct qed_spq {
124 dma_addr_t p_phys; 124 dma_addr_t p_phys;
125 struct qed_spq_entry *p_virt; 125 struct qed_spq_entry *p_virt;
126 126
127 /* Used as index for completions (returns on EQ by FW) */ 127#define SPQ_RING_SIZE \
128 u16 echo_idx; 128 (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
129
130 /* Bitmap for handling out-of-order completions */
131 DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE);
132 u8 comp_bitmap_idx;
129 133
130 /* Statistics */ 134 /* Statistics */
131 u32 unlimited_pending_count; 135 u32 unlimited_pending_count;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 7c0b8459666e..3dd548ab8df1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -112,8 +112,6 @@ static int
112qed_spq_fill_entry(struct qed_hwfn *p_hwfn, 112qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
113 struct qed_spq_entry *p_ent) 113 struct qed_spq_entry *p_ent)
114{ 114{
115 p_ent->elem.hdr.echo = 0;
116 p_hwfn->p_spq->echo_idx++;
117 p_ent->flags = 0; 115 p_ent->flags = 0;
118 116
119 switch (p_ent->comp_mode) { 117 switch (p_ent->comp_mode) {
@@ -195,10 +193,12 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
195 struct qed_spq *p_spq, 193 struct qed_spq *p_spq,
196 struct qed_spq_entry *p_ent) 194 struct qed_spq_entry *p_ent)
197{ 195{
198 struct qed_chain *p_chain = &p_hwfn->p_spq->chain; 196 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
197 u16 echo = qed_chain_get_prod_idx(p_chain);
199 struct slow_path_element *elem; 198 struct slow_path_element *elem;
200 struct core_db_data db; 199 struct core_db_data db;
201 200
201 p_ent->elem.hdr.echo = cpu_to_le16(echo);
202 elem = qed_chain_produce(p_chain); 202 elem = qed_chain_produce(p_chain);
203 if (!elem) { 203 if (!elem) {
204 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); 204 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
@@ -437,7 +437,9 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
437 p_spq->comp_count = 0; 437 p_spq->comp_count = 0;
438 p_spq->comp_sent_count = 0; 438 p_spq->comp_sent_count = 0;
439 p_spq->unlimited_pending_count = 0; 439 p_spq->unlimited_pending_count = 0;
440 p_spq->echo_idx = 0; 440
441 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
442 p_spq->comp_bitmap_idx = 0;
441 443
442 /* SPQ cid, cannot fail */ 444 /* SPQ cid, cannot fail */
443 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); 445 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
@@ -582,26 +584,32 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
582 struct qed_spq *p_spq = p_hwfn->p_spq; 584 struct qed_spq *p_spq = p_hwfn->p_spq;
583 585
584 if (p_ent->queue == &p_spq->unlimited_pending) { 586 if (p_ent->queue == &p_spq->unlimited_pending) {
585 struct qed_spq_entry *p_en2;
586 587
587 if (list_empty(&p_spq->free_pool)) { 588 if (list_empty(&p_spq->free_pool)) {
588 list_add_tail(&p_ent->list, &p_spq->unlimited_pending); 589 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
589 p_spq->unlimited_pending_count++; 590 p_spq->unlimited_pending_count++;
590 591
591 return 0; 592 return 0;
592 } 593 } else {
594 struct qed_spq_entry *p_en2;
593 595
594 p_en2 = list_first_entry(&p_spq->free_pool, 596 p_en2 = list_first_entry(&p_spq->free_pool,
595 struct qed_spq_entry, 597 struct qed_spq_entry,
596 list); 598 list);
597 list_del(&p_en2->list); 599 list_del(&p_en2->list);
600
601 /* Copy the ring element physical pointer to the new
602 * entry, since we are about to override the entire ring
603 * entry and don't want to lose the pointer.
604 */
605 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
598 606
599 /* Strcut assignment */ 607 *p_en2 = *p_ent;
600 *p_en2 = *p_ent;
601 608
602 kfree(p_ent); 609 kfree(p_ent);
603 610
604 p_ent = p_en2; 611 p_ent = p_en2;
612 }
605 } 613 }
606 614
607 /* entry is to be placed in 'pending' queue */ 615 /* entry is to be placed in 'pending' queue */
@@ -777,13 +785,38 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
777 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, 785 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
778 list) { 786 list) {
779 if (p_ent->elem.hdr.echo == echo) { 787 if (p_ent->elem.hdr.echo == echo) {
788 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
789
780 list_del(&p_ent->list); 790 list_del(&p_ent->list);
781 791
782 qed_chain_return_produced(&p_spq->chain); 792 /* Avoid overriding of SPQ entries when getting
793 * out-of-order completions, by marking the completions
794 * in a bitmap and increasing the chain consumer only
795 * for the first successive completed entries.
796 */
797 bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
798
799 while (test_bit(p_spq->comp_bitmap_idx,
800 p_spq->p_comp_bitmap)) {
801 bitmap_clear(p_spq->p_comp_bitmap,
802 p_spq->comp_bitmap_idx,
803 SPQ_RING_SIZE);
804 p_spq->comp_bitmap_idx++;
805 qed_chain_return_produced(&p_spq->chain);
806 }
807
783 p_spq->comp_count++; 808 p_spq->comp_count++;
784 found = p_ent; 809 found = p_ent;
785 break; 810 break;
786 } 811 }
812
813 /* This is relatively uncommon - depends on scenarios
814 * which have mutliple per-PF sent ramrods.
815 */
816 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
817 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
818 le16_to_cpu(echo),
819 le16_to_cpu(p_ent->elem.hdr.echo));
787 } 820 }
788 821
789 /* Release lock before callback, as callback may post 822 /* Release lock before callback, as callback may post
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index be7d7a62cc0d..34906750b7e7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -246,12 +246,13 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
246 u32 state; 246 u32 state;
247 247
248 state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); 248 state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
249 while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit--) { 249 while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit) {
250 idc->vnic_wait_limit--;
250 msleep(1000); 251 msleep(1000);
251 state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); 252 state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
252 } 253 }
253 254
254 if (!idc->vnic_wait_limit) { 255 if (state != QLCNIC_DEV_NPAR_OPER) {
255 dev_err(&adapter->pdev->dev, 256 dev_err(&adapter->pdev->dev,
256 "vNIC mode not operational, state check timed out.\n"); 257 "vNIC mode not operational, state check timed out.\n");
257 return -EIO; 258 return -EIO;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 02b7115b6aaa..997976426799 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4211,8 +4211,9 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
4211 4211
4212 /* Wait for an outstanding reset to complete. */ 4212 /* Wait for an outstanding reset to complete. */
4213 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { 4213 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4214 int i = 3; 4214 int i = 4;
4215 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { 4215
4216 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4216 netif_err(qdev, ifup, qdev->ndev, 4217 netif_err(qdev, ifup, qdev->ndev,
4217 "Waiting for adapter UP...\n"); 4218 "Waiting for adapter UP...\n");
4218 ssleep(1); 4219 ssleep(1);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index ddb2c6c6ec94..689a4a5c8dcf 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -736,9 +736,8 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
736 netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n", 736 netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
737 jiffies, jiffies - dev->trans_start); 737 jiffies, jiffies - dev->trans_start);
738 qca->net_dev->stats.tx_errors++; 738 qca->net_dev->stats.tx_errors++;
739 /* wake the queue if there is room */ 739 /* Trigger tx queue flush and QCA7000 reset */
740 if (qcaspi_tx_ring_has_space(&qca->txr)) 740 qca->sync = QCASPI_SYNC_UNKNOWN;
741 netif_wake_queue(dev);
742} 741}
743 742
744static int 743static int
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ee8d1ec61fab..467d41698fd5 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -905,6 +905,9 @@ static int ravb_phy_init(struct net_device *ndev)
905 netdev_info(ndev, "limited PHY to 100Mbit/s\n"); 905 netdev_info(ndev, "limited PHY to 100Mbit/s\n");
906 } 906 }
907 907
908 /* 10BASE is not supported */
909 phydev->supported &= ~PHY_10BT_FEATURES;
910
908 netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", 911 netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
909 phydev->addr, phydev->irq, phydev->drv->name); 912 phydev->addr, phydev->irq, phydev->drv->name);
910 913
@@ -1037,7 +1040,7 @@ static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
1037 "rx_queue_1_mcast_packets", 1040 "rx_queue_1_mcast_packets",
1038 "rx_queue_1_errors", 1041 "rx_queue_1_errors",
1039 "rx_queue_1_crc_errors", 1042 "rx_queue_1_crc_errors",
1040 "rx_queue_1_frame_errors_", 1043 "rx_queue_1_frame_errors",
1041 "rx_queue_1_length_errors", 1044 "rx_queue_1_length_errors",
1042 "rx_queue_1_missed_errors", 1045 "rx_queue_1_missed_errors",
1043 "rx_queue_1_over_errors", 1046 "rx_queue_1_over_errors",
@@ -1225,7 +1228,7 @@ static int ravb_open(struct net_device *ndev)
1225 /* Device init */ 1228 /* Device init */
1226 error = ravb_dmac_init(ndev); 1229 error = ravb_dmac_init(ndev);
1227 if (error) 1230 if (error)
1228 goto out_free_irq; 1231 goto out_free_irq2;
1229 ravb_emac_init(ndev); 1232 ravb_emac_init(ndev);
1230 1233
1231 /* Initialise PTP Clock driver */ 1234 /* Initialise PTP Clock driver */
@@ -1243,9 +1246,11 @@ static int ravb_open(struct net_device *ndev)
1243out_ptp_stop: 1246out_ptp_stop:
1244 /* Stop PTP Clock driver */ 1247 /* Stop PTP Clock driver */
1245 ravb_ptp_stop(ndev); 1248 ravb_ptp_stop(ndev);
1249out_free_irq2:
1250 if (priv->chip_id == RCAR_GEN3)
1251 free_irq(priv->emac_irq, ndev);
1246out_free_irq: 1252out_free_irq:
1247 free_irq(ndev->irq, ndev); 1253 free_irq(ndev->irq, ndev);
1248 free_irq(priv->emac_irq, ndev);
1249out_napi_off: 1254out_napi_off:
1250 napi_disable(&priv->napi[RAVB_NC]); 1255 napi_disable(&priv->napi[RAVB_NC]);
1251 napi_disable(&priv->napi[RAVB_BE]); 1256 napi_disable(&priv->napi[RAVB_BE]);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index e7bab7909ed9..6a8fc0f341ff 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -52,6 +52,8 @@
52 NETIF_MSG_RX_ERR| \ 52 NETIF_MSG_RX_ERR| \
53 NETIF_MSG_TX_ERR) 53 NETIF_MSG_TX_ERR)
54 54
55#define SH_ETH_OFFSET_INVALID ((u16)~0)
56
55#define SH_ETH_OFFSET_DEFAULTS \ 57#define SH_ETH_OFFSET_DEFAULTS \
56 [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID 58 [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
57 59
@@ -404,6 +406,28 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
404static void sh_eth_rcv_snd_disable(struct net_device *ndev); 406static void sh_eth_rcv_snd_disable(struct net_device *ndev);
405static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); 407static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
406 408
409static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index)
410{
411 struct sh_eth_private *mdp = netdev_priv(ndev);
412 u16 offset = mdp->reg_offset[enum_index];
413
414 if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
415 return;
416
417 iowrite32(data, mdp->addr + offset);
418}
419
420static u32 sh_eth_read(struct net_device *ndev, int enum_index)
421{
422 struct sh_eth_private *mdp = netdev_priv(ndev);
423 u16 offset = mdp->reg_offset[enum_index];
424
425 if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
426 return ~0U;
427
428 return ioread32(mdp->addr + offset);
429}
430
407static bool sh_eth_is_gether(struct sh_eth_private *mdp) 431static bool sh_eth_is_gether(struct sh_eth_private *mdp)
408{ 432{
409 return mdp->reg_offset == sh_eth_offset_gigabit; 433 return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -1143,6 +1167,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
1143 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; 1167 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1144 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; 1168 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1145 dma_addr_t dma_addr; 1169 dma_addr_t dma_addr;
1170 u32 buf_len;
1146 1171
1147 mdp->cur_rx = 0; 1172 mdp->cur_rx = 0;
1148 mdp->cur_tx = 0; 1173 mdp->cur_tx = 0;
@@ -1163,16 +1188,16 @@ static void sh_eth_ring_format(struct net_device *ndev)
1163 /* RX descriptor */ 1188 /* RX descriptor */
1164 rxdesc = &mdp->rx_ring[i]; 1189 rxdesc = &mdp->rx_ring[i];
1165 /* The size of the buffer is a multiple of 32 bytes. */ 1190 /* The size of the buffer is a multiple of 32 bytes. */
1166 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32); 1191 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1167 dma_addr = dma_map_single(&ndev->dev, skb->data, 1192 rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
1168 rxdesc->buffer_length, 1193 dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
1169 DMA_FROM_DEVICE); 1194 DMA_FROM_DEVICE);
1170 if (dma_mapping_error(&ndev->dev, dma_addr)) { 1195 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1171 kfree_skb(skb); 1196 kfree_skb(skb);
1172 break; 1197 break;
1173 } 1198 }
1174 mdp->rx_skbuff[i] = skb; 1199 mdp->rx_skbuff[i] = skb;
1175 rxdesc->addr = dma_addr; 1200 rxdesc->addr = cpu_to_edmac(mdp, dma_addr);
1176 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1201 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1177 1202
1178 /* Rx descriptor address set */ 1203 /* Rx descriptor address set */
@@ -1196,7 +1221,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
1196 mdp->tx_skbuff[i] = NULL; 1221 mdp->tx_skbuff[i] = NULL;
1197 txdesc = &mdp->tx_ring[i]; 1222 txdesc = &mdp->tx_ring[i];
1198 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 1223 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1199 txdesc->buffer_length = 0; 1224 txdesc->len = cpu_to_edmac(mdp, 0);
1200 if (i == 0) { 1225 if (i == 0) {
1201 /* Tx descriptor address set */ 1226 /* Tx descriptor address set */
1202 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); 1227 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
@@ -1403,8 +1428,10 @@ static int sh_eth_txfree(struct net_device *ndev)
1403 entry, edmac_to_cpu(mdp, txdesc->status)); 1428 entry, edmac_to_cpu(mdp, txdesc->status));
1404 /* Free the original skb. */ 1429 /* Free the original skb. */
1405 if (mdp->tx_skbuff[entry]) { 1430 if (mdp->tx_skbuff[entry]) {
1406 dma_unmap_single(&ndev->dev, txdesc->addr, 1431 dma_unmap_single(&ndev->dev,
1407 txdesc->buffer_length, DMA_TO_DEVICE); 1432 edmac_to_cpu(mdp, txdesc->addr),
1433 edmac_to_cpu(mdp, txdesc->len) >> 16,
1434 DMA_TO_DEVICE);
1408 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 1435 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1409 mdp->tx_skbuff[entry] = NULL; 1436 mdp->tx_skbuff[entry] = NULL;
1410 free_num++; 1437 free_num++;
@@ -1414,7 +1441,7 @@ static int sh_eth_txfree(struct net_device *ndev)
1414 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 1441 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1415 1442
1416 ndev->stats.tx_packets++; 1443 ndev->stats.tx_packets++;
1417 ndev->stats.tx_bytes += txdesc->buffer_length; 1444 ndev->stats.tx_bytes += edmac_to_cpu(mdp, txdesc->len) >> 16;
1418 } 1445 }
1419 return free_num; 1446 return free_num;
1420} 1447}
@@ -1433,6 +1460,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1433 u32 desc_status; 1460 u32 desc_status;
1434 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; 1461 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1435 dma_addr_t dma_addr; 1462 dma_addr_t dma_addr;
1463 u32 buf_len;
1436 1464
1437 boguscnt = min(boguscnt, *quota); 1465 boguscnt = min(boguscnt, *quota);
1438 limit = boguscnt; 1466 limit = boguscnt;
@@ -1441,7 +1469,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1441 /* RACT bit must be checked before all the following reads */ 1469 /* RACT bit must be checked before all the following reads */
1442 dma_rmb(); 1470 dma_rmb();
1443 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1471 desc_status = edmac_to_cpu(mdp, rxdesc->status);
1444 pkt_len = rxdesc->frame_length; 1472 pkt_len = edmac_to_cpu(mdp, rxdesc->len) & RD_RFL;
1445 1473
1446 if (--boguscnt < 0) 1474 if (--boguscnt < 0)
1447 break; 1475 break;
@@ -1462,6 +1490,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1462 if (mdp->cd->shift_rd0) 1490 if (mdp->cd->shift_rd0)
1463 desc_status >>= 16; 1491 desc_status >>= 16;
1464 1492
1493 skb = mdp->rx_skbuff[entry];
1465 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | 1494 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1466 RD_RFS5 | RD_RFS6 | RD_RFS10)) { 1495 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1467 ndev->stats.rx_errors++; 1496 ndev->stats.rx_errors++;
@@ -1477,16 +1506,16 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1477 ndev->stats.rx_missed_errors++; 1506 ndev->stats.rx_missed_errors++;
1478 if (desc_status & RD_RFS10) 1507 if (desc_status & RD_RFS10)
1479 ndev->stats.rx_over_errors++; 1508 ndev->stats.rx_over_errors++;
1480 } else { 1509 } else if (skb) {
1510 dma_addr = edmac_to_cpu(mdp, rxdesc->addr);
1481 if (!mdp->cd->hw_swap) 1511 if (!mdp->cd->hw_swap)
1482 sh_eth_soft_swap( 1512 sh_eth_soft_swap(
1483 phys_to_virt(ALIGN(rxdesc->addr, 4)), 1513 phys_to_virt(ALIGN(dma_addr, 4)),
1484 pkt_len + 2); 1514 pkt_len + 2);
1485 skb = mdp->rx_skbuff[entry];
1486 mdp->rx_skbuff[entry] = NULL; 1515 mdp->rx_skbuff[entry] = NULL;
1487 if (mdp->cd->rpadir) 1516 if (mdp->cd->rpadir)
1488 skb_reserve(skb, NET_IP_ALIGN); 1517 skb_reserve(skb, NET_IP_ALIGN);
1489 dma_unmap_single(&ndev->dev, rxdesc->addr, 1518 dma_unmap_single(&ndev->dev, dma_addr,
1490 ALIGN(mdp->rx_buf_sz, 32), 1519 ALIGN(mdp->rx_buf_sz, 32),
1491 DMA_FROM_DEVICE); 1520 DMA_FROM_DEVICE);
1492 skb_put(skb, pkt_len); 1521 skb_put(skb, pkt_len);
@@ -1506,7 +1535,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1506 entry = mdp->dirty_rx % mdp->num_rx_ring; 1535 entry = mdp->dirty_rx % mdp->num_rx_ring;
1507 rxdesc = &mdp->rx_ring[entry]; 1536 rxdesc = &mdp->rx_ring[entry];
1508 /* The size of the buffer is 32 byte boundary. */ 1537 /* The size of the buffer is 32 byte boundary. */
1509 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32); 1538 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1539 rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
1510 1540
1511 if (mdp->rx_skbuff[entry] == NULL) { 1541 if (mdp->rx_skbuff[entry] == NULL) {
1512 skb = netdev_alloc_skb(ndev, skbuff_size); 1542 skb = netdev_alloc_skb(ndev, skbuff_size);
@@ -1514,8 +1544,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1514 break; /* Better luck next round. */ 1544 break; /* Better luck next round. */
1515 sh_eth_set_receive_align(skb); 1545 sh_eth_set_receive_align(skb);
1516 dma_addr = dma_map_single(&ndev->dev, skb->data, 1546 dma_addr = dma_map_single(&ndev->dev, skb->data,
1517 rxdesc->buffer_length, 1547 buf_len, DMA_FROM_DEVICE);
1518 DMA_FROM_DEVICE);
1519 if (dma_mapping_error(&ndev->dev, dma_addr)) { 1548 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1520 kfree_skb(skb); 1549 kfree_skb(skb);
1521 break; 1550 break;
@@ -1523,7 +1552,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1523 mdp->rx_skbuff[entry] = skb; 1552 mdp->rx_skbuff[entry] = skb;
1524 1553
1525 skb_checksum_none_assert(skb); 1554 skb_checksum_none_assert(skb);
1526 rxdesc->addr = dma_addr; 1555 rxdesc->addr = cpu_to_edmac(mdp, dma_addr);
1527 } 1556 }
1528 dma_wmb(); /* RACT bit must be set after all the above writes */ 1557 dma_wmb(); /* RACT bit must be set after all the above writes */
1529 if (entry >= mdp->num_rx_ring - 1) 1558 if (entry >= mdp->num_rx_ring - 1)
@@ -2331,8 +2360,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
2331 /* Free all the skbuffs in the Rx queue. */ 2360 /* Free all the skbuffs in the Rx queue. */
2332 for (i = 0; i < mdp->num_rx_ring; i++) { 2361 for (i = 0; i < mdp->num_rx_ring; i++) {
2333 rxdesc = &mdp->rx_ring[i]; 2362 rxdesc = &mdp->rx_ring[i];
2334 rxdesc->status = 0; 2363 rxdesc->status = cpu_to_edmac(mdp, 0);
2335 rxdesc->addr = 0xBADF00D0; 2364 rxdesc->addr = cpu_to_edmac(mdp, 0xBADF00D0);
2336 dev_kfree_skb(mdp->rx_skbuff[i]); 2365 dev_kfree_skb(mdp->rx_skbuff[i]);
2337 mdp->rx_skbuff[i] = NULL; 2366 mdp->rx_skbuff[i] = NULL;
2338 } 2367 }
@@ -2350,6 +2379,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2350{ 2379{
2351 struct sh_eth_private *mdp = netdev_priv(ndev); 2380 struct sh_eth_private *mdp = netdev_priv(ndev);
2352 struct sh_eth_txdesc *txdesc; 2381 struct sh_eth_txdesc *txdesc;
2382 dma_addr_t dma_addr;
2353 u32 entry; 2383 u32 entry;
2354 unsigned long flags; 2384 unsigned long flags;
2355 2385
@@ -2372,15 +2402,15 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2372 txdesc = &mdp->tx_ring[entry]; 2402 txdesc = &mdp->tx_ring[entry];
2373 /* soft swap. */ 2403 /* soft swap. */
2374 if (!mdp->cd->hw_swap) 2404 if (!mdp->cd->hw_swap)
2375 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), 2405 sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
2376 skb->len + 2); 2406 dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2377 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, 2407 DMA_TO_DEVICE);
2378 DMA_TO_DEVICE); 2408 if (dma_mapping_error(&ndev->dev, dma_addr)) {
2379 if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
2380 kfree_skb(skb); 2409 kfree_skb(skb);
2381 return NETDEV_TX_OK; 2410 return NETDEV_TX_OK;
2382 } 2411 }
2383 txdesc->buffer_length = skb->len; 2412 txdesc->addr = cpu_to_edmac(mdp, dma_addr);
2413 txdesc->len = cpu_to_edmac(mdp, skb->len << 16);
2384 2414
2385 dma_wmb(); /* TACT bit must be set after all the above writes */ 2415 dma_wmb(); /* TACT bit must be set after all the above writes */
2386 if (entry >= mdp->num_tx_ring - 1) 2416 if (entry >= mdp->num_tx_ring - 1)
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 50382b1c9ddc..72fcfc924589 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -283,7 +283,7 @@ enum DMAC_IM_BIT {
283 DMAC_M_RINT1 = 0x00000001, 283 DMAC_M_RINT1 = 0x00000001,
284}; 284};
285 285
286/* Receive descriptor bit */ 286/* Receive descriptor 0 bits */
287enum RD_STS_BIT { 287enum RD_STS_BIT {
288 RD_RACT = 0x80000000, RD_RDLE = 0x40000000, 288 RD_RACT = 0x80000000, RD_RDLE = 0x40000000,
289 RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000, 289 RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000,
@@ -298,6 +298,12 @@ enum RD_STS_BIT {
298#define RDFEND RD_RFP0 298#define RDFEND RD_RFP0
299#define RD_RFP (RD_RFP1|RD_RFP0) 299#define RD_RFP (RD_RFP1|RD_RFP0)
300 300
301/* Receive descriptor 1 bits */
302enum RD_LEN_BIT {
303 RD_RFL = 0x0000ffff, /* receive frame length */
304 RD_RBL = 0xffff0000, /* receive buffer length */
305};
306
301/* FCFTR */ 307/* FCFTR */
302enum FCFTR_BIT { 308enum FCFTR_BIT {
303 FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000, 309 FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000,
@@ -307,7 +313,7 @@ enum FCFTR_BIT {
307#define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0) 313#define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0)
308#define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0) 314#define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0)
309 315
310/* Transmit descriptor bit */ 316/* Transmit descriptor 0 bits */
311enum TD_STS_BIT { 317enum TD_STS_BIT {
312 TD_TACT = 0x80000000, TD_TDLE = 0x40000000, 318 TD_TACT = 0x80000000, TD_TDLE = 0x40000000,
313 TD_TFP1 = 0x20000000, TD_TFP0 = 0x10000000, 319 TD_TFP1 = 0x20000000, TD_TFP0 = 0x10000000,
@@ -317,6 +323,11 @@ enum TD_STS_BIT {
317#define TDFEND TD_TFP0 323#define TDFEND TD_TFP0
318#define TD_TFP (TD_TFP1|TD_TFP0) 324#define TD_TFP (TD_TFP1|TD_TFP0)
319 325
326/* Transmit descriptor 1 bits */
327enum TD_LEN_BIT {
328 TD_TBL = 0xffff0000, /* transmit buffer length */
329};
330
320/* RMCR */ 331/* RMCR */
321enum RMCR_BIT { 332enum RMCR_BIT {
322 RMCR_RNC = 0x00000001, 333 RMCR_RNC = 0x00000001,
@@ -425,15 +436,9 @@ enum TSU_FWSLC_BIT {
425 */ 436 */
426struct sh_eth_txdesc { 437struct sh_eth_txdesc {
427 u32 status; /* TD0 */ 438 u32 status; /* TD0 */
428#if defined(__LITTLE_ENDIAN) 439 u32 len; /* TD1 */
429 u16 pad0; /* TD1 */
430 u16 buffer_length; /* TD1 */
431#else
432 u16 buffer_length; /* TD1 */
433 u16 pad0; /* TD1 */
434#endif
435 u32 addr; /* TD2 */ 440 u32 addr; /* TD2 */
436 u32 pad1; /* padding data */ 441 u32 pad0; /* padding data */
437} __aligned(2) __packed; 442} __aligned(2) __packed;
438 443
439/* The sh ether Rx buffer descriptors. 444/* The sh ether Rx buffer descriptors.
@@ -441,13 +446,7 @@ struct sh_eth_txdesc {
441 */ 446 */
442struct sh_eth_rxdesc { 447struct sh_eth_rxdesc {
443 u32 status; /* RD0 */ 448 u32 status; /* RD0 */
444#if defined(__LITTLE_ENDIAN) 449 u32 len; /* RD1 */
445 u16 frame_length; /* RD1 */
446 u16 buffer_length; /* RD1 */
447#else
448 u16 buffer_length; /* RD1 */
449 u16 frame_length; /* RD1 */
450#endif
451 u32 addr; /* RD2 */ 450 u32 addr; /* RD2 */
452 u32 pad0; /* padding data */ 451 u32 pad0; /* padding data */
453} __aligned(2) __packed; 452} __aligned(2) __packed;
@@ -546,31 +545,6 @@ static inline void sh_eth_soft_swap(char *src, int len)
546#endif 545#endif
547} 546}
548 547
549#define SH_ETH_OFFSET_INVALID ((u16) ~0)
550
551static inline void sh_eth_write(struct net_device *ndev, u32 data,
552 int enum_index)
553{
554 struct sh_eth_private *mdp = netdev_priv(ndev);
555 u16 offset = mdp->reg_offset[enum_index];
556
557 if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
558 return;
559
560 iowrite32(data, mdp->addr + offset);
561}
562
563static inline u32 sh_eth_read(struct net_device *ndev, int enum_index)
564{
565 struct sh_eth_private *mdp = netdev_priv(ndev);
566 u16 offset = mdp->reg_offset[enum_index];
567
568 if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
569 return ~0U;
570
571 return ioread32(mdp->addr + offset);
572}
573
574static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, 548static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
575 int enum_index) 549 int enum_index)
576{ 550{
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index bc6d21b471be..e6a084a6be12 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3299,7 +3299,8 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
3299 3299
3300 new_spec.priority = EFX_FILTER_PRI_AUTO; 3300 new_spec.priority = EFX_FILTER_PRI_AUTO;
3301 new_spec.flags = (EFX_FILTER_FLAG_RX | 3301 new_spec.flags = (EFX_FILTER_FLAG_RX |
3302 EFX_FILTER_FLAG_RX_RSS); 3302 (efx_rss_enabled(efx) ?
3303 EFX_FILTER_FLAG_RX_RSS : 0));
3303 new_spec.dmaq_id = 0; 3304 new_spec.dmaq_id = 0;
3304 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; 3305 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
3305 rc = efx_ef10_filter_push(efx, &new_spec, 3306 rc = efx_ef10_filter_push(efx, &new_spec,
@@ -3921,6 +3922,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
3921{ 3922{
3922 struct efx_ef10_filter_table *table = efx->filter_state; 3923 struct efx_ef10_filter_table *table = efx->filter_state;
3923 struct efx_ef10_dev_addr *addr_list; 3924 struct efx_ef10_dev_addr *addr_list;
3925 enum efx_filter_flags filter_flags;
3924 struct efx_filter_spec spec; 3926 struct efx_filter_spec spec;
3925 u8 baddr[ETH_ALEN]; 3927 u8 baddr[ETH_ALEN];
3926 unsigned int i, j; 3928 unsigned int i, j;
@@ -3935,11 +3937,11 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
3935 addr_count = table->dev_uc_count; 3937 addr_count = table->dev_uc_count;
3936 } 3938 }
3937 3939
3940 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
3941
3938 /* Insert/renew filters */ 3942 /* Insert/renew filters */
3939 for (i = 0; i < addr_count; i++) { 3943 for (i = 0; i < addr_count; i++) {
3940 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 3944 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
3941 EFX_FILTER_FLAG_RX_RSS,
3942 0);
3943 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, 3945 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
3944 addr_list[i].addr); 3946 addr_list[i].addr);
3945 rc = efx_ef10_filter_insert(efx, &spec, true); 3947 rc = efx_ef10_filter_insert(efx, &spec, true);
@@ -3968,9 +3970,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
3968 3970
3969 if (multicast && rollback) { 3971 if (multicast && rollback) {
3970 /* Also need an Ethernet broadcast filter */ 3972 /* Also need an Ethernet broadcast filter */
3971 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 3973 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
3972 EFX_FILTER_FLAG_RX_RSS,
3973 0);
3974 eth_broadcast_addr(baddr); 3974 eth_broadcast_addr(baddr);
3975 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); 3975 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr);
3976 rc = efx_ef10_filter_insert(efx, &spec, true); 3976 rc = efx_ef10_filter_insert(efx, &spec, true);
@@ -4000,13 +4000,14 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
4000{ 4000{
4001 struct efx_ef10_filter_table *table = efx->filter_state; 4001 struct efx_ef10_filter_table *table = efx->filter_state;
4002 struct efx_ef10_nic_data *nic_data = efx->nic_data; 4002 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4003 enum efx_filter_flags filter_flags;
4003 struct efx_filter_spec spec; 4004 struct efx_filter_spec spec;
4004 u8 baddr[ETH_ALEN]; 4005 u8 baddr[ETH_ALEN];
4005 int rc; 4006 int rc;
4006 4007
4007 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 4008 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
4008 EFX_FILTER_FLAG_RX_RSS, 4009
4009 0); 4010 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
4010 4011
4011 if (multicast) 4012 if (multicast)
4012 efx_filter_set_mc_def(&spec); 4013 efx_filter_set_mc_def(&spec);
@@ -4023,8 +4024,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
4023 if (!nic_data->workaround_26807) { 4024 if (!nic_data->workaround_26807) {
4024 /* Also need an Ethernet broadcast filter */ 4025 /* Also need an Ethernet broadcast filter */
4025 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 4026 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
4026 EFX_FILTER_FLAG_RX_RSS, 4027 filter_flags, 0);
4027 0);
4028 eth_broadcast_addr(baddr); 4028 eth_broadcast_addr(baddr);
4029 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, 4029 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
4030 baddr); 4030 baddr);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 1aaf76c1ace8..10827476bc0b 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -76,6 +76,11 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
76#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \ 76#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \
77 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) 77 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
78 78
79static inline bool efx_rss_enabled(struct efx_nic *efx)
80{
81 return efx->rss_spread > 1;
82}
83
79/* Filters */ 84/* Filters */
80 85
81void efx_mac_reconfigure(struct efx_nic *efx); 86void efx_mac_reconfigure(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 5a1c5a8f278a..133e9e35be9e 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2242,7 +2242,7 @@ efx_farch_filter_init_rx_auto(struct efx_nic *efx,
2242 */ 2242 */
2243 spec->priority = EFX_FILTER_PRI_AUTO; 2243 spec->priority = EFX_FILTER_PRI_AUTO;
2244 spec->flags = (EFX_FILTER_FLAG_RX | 2244 spec->flags = (EFX_FILTER_FLAG_RX |
2245 (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) | 2245 (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) |
2246 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); 2246 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
2247 spec->dmaq_id = 0; 2247 spec->dmaq_id = 0;
2248} 2248}
diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c
index 3d5ee3259885..194f67d9f3bf 100644
--- a/drivers/net/ethernet/sfc/txc43128_phy.c
+++ b/drivers/net/ethernet/sfc/txc43128_phy.c
@@ -418,7 +418,7 @@ static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
418 418
419 val |= (1 << TXC_GLCMD_LMTSWRST_LBN); 419 val |= (1 << TXC_GLCMD_LMTSWRST_LBN);
420 efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val); 420 efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
421 while (tries--) { 421 while (--tries) {
422 val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD); 422 val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
423 if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN))) 423 if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN)))
424 break; 424 break;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 7f6f4a4fcc70..58c05acc2aab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -299,16 +299,17 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
299 if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) { 299 if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) {
300 const char *rs; 300 const char *rs;
301 301
302 dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN;
303
302 err = of_property_read_string(np, "st,tx-retime-src", &rs); 304 err = of_property_read_string(np, "st,tx-retime-src", &rs);
303 if (err < 0) { 305 if (err < 0) {
304 dev_warn(dev, "Use internal clock source\n"); 306 dev_warn(dev, "Use internal clock source\n");
305 dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN; 307 } else {
306 } else if (!strcasecmp(rs, "clk_125")) { 308 if (!strcasecmp(rs, "clk_125"))
307 dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125; 309 dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125;
308 } else if (!strcasecmp(rs, "txclk")) { 310 else if (!strcasecmp(rs, "txclk"))
309 dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK; 311 dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK;
310 } 312 }
311
312 dwmac->speed = SPEED_1000; 313 dwmac->speed = SPEED_1000;
313 } 314 }
314 315
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 52b8ed9bd87c..adff46375a32 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -153,7 +153,11 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
153 if (ret) 153 if (ret)
154 return ret; 154 return ret;
155 155
156 return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 156 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
157 if (ret)
158 sun7i_gmac_exit(pdev, plat_dat->bsp_priv);
159
160 return ret;
157} 161}
158 162
159static const struct of_device_id sun7i_dwmac_match[] = { 163static const struct of_device_id sun7i_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 64d8aa4e0cad..a5b869eb4678 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -185,7 +185,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
185 priv->clk_csr = STMMAC_CSR_100_150M; 185 priv->clk_csr = STMMAC_CSR_100_150M;
186 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 186 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
187 priv->clk_csr = STMMAC_CSR_150_250M; 187 priv->clk_csr = STMMAC_CSR_150_250M;
188 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) 188 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
189 priv->clk_csr = STMMAC_CSR_250_300M; 189 priv->clk_csr = STMMAC_CSR_250_300M;
190 } 190 }
191} 191}
@@ -2232,6 +2232,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2232 2232
2233 frame_len = priv->hw->desc->get_rx_frame_len(p, coe); 2233 frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2234 2234
2235 /* check if frame_len fits the preallocated memory */
2236 if (frame_len > priv->dma_buf_sz) {
2237 priv->dev->stats.rx_length_errors++;
2238 break;
2239 }
2240
2235 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 2241 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
2236 * Type frames (LLC/LLC-SNAP) 2242 * Type frames (LLC/LLC-SNAP)
2237 */ 2243 */
@@ -3040,8 +3046,6 @@ int stmmac_suspend(struct net_device *ndev)
3040 priv->hw->dma->stop_tx(priv->ioaddr); 3046 priv->hw->dma->stop_tx(priv->ioaddr);
3041 priv->hw->dma->stop_rx(priv->ioaddr); 3047 priv->hw->dma->stop_rx(priv->ioaddr);
3042 3048
3043 stmmac_clear_descriptors(priv);
3044
3045 /* Enable Power down mode by programming the PMT regs */ 3049 /* Enable Power down mode by programming the PMT regs */
3046 if (device_may_wakeup(priv->device)) { 3050 if (device_may_wakeup(priv->device)) {
3047 priv->hw->mac->pmt(priv->hw, priv->wolopts); 3051 priv->hw->mac->pmt(priv->hw, priv->wolopts);
@@ -3099,9 +3103,15 @@ int stmmac_resume(struct net_device *ndev)
3099 3103
3100 netif_device_attach(ndev); 3104 netif_device_attach(ndev);
3101 3105
3102 init_dma_desc_rings(ndev, GFP_ATOMIC); 3106 priv->cur_rx = 0;
3107 priv->dirty_rx = 0;
3108 priv->dirty_tx = 0;
3109 priv->cur_tx = 0;
3110 stmmac_clear_descriptors(priv);
3111
3103 stmmac_hw_setup(ndev, false); 3112 stmmac_hw_setup(ndev, false);
3104 stmmac_init_tx_coalesce(priv); 3113 stmmac_init_tx_coalesce(priv);
3114 stmmac_set_rx_mode(ndev);
3105 3115
3106 napi_enable(&priv->napi); 3116 napi_enable(&priv->napi);
3107 3117
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index ebf6abc4853f..bba670c42e37 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -138,7 +138,6 @@ int stmmac_mdio_reset(struct mii_bus *bus)
138 138
139#ifdef CONFIG_OF 139#ifdef CONFIG_OF
140 if (priv->device->of_node) { 140 if (priv->device->of_node) {
141 int reset_gpio, active_low;
142 141
143 if (data->reset_gpio < 0) { 142 if (data->reset_gpio < 0) {
144 struct device_node *np = priv->device->of_node; 143 struct device_node *np = priv->device->of_node;
@@ -154,24 +153,23 @@ int stmmac_mdio_reset(struct mii_bus *bus)
154 "snps,reset-active-low"); 153 "snps,reset-active-low");
155 of_property_read_u32_array(np, 154 of_property_read_u32_array(np,
156 "snps,reset-delays-us", data->delays, 3); 155 "snps,reset-delays-us", data->delays, 3);
157 }
158 156
159 reset_gpio = data->reset_gpio; 157 if (gpio_request(data->reset_gpio, "mdio-reset"))
160 active_low = data->active_low; 158 return 0;
159 }
161 160
162 if (!gpio_request(reset_gpio, "mdio-reset")) { 161 gpio_direction_output(data->reset_gpio,
163 gpio_direction_output(reset_gpio, active_low ? 1 : 0); 162 data->active_low ? 1 : 0);
164 if (data->delays[0]) 163 if (data->delays[0])
165 msleep(DIV_ROUND_UP(data->delays[0], 1000)); 164 msleep(DIV_ROUND_UP(data->delays[0], 1000));
166 165
167 gpio_set_value(reset_gpio, active_low ? 0 : 1); 166 gpio_set_value(data->reset_gpio, data->active_low ? 0 : 1);
168 if (data->delays[1]) 167 if (data->delays[1])
169 msleep(DIV_ROUND_UP(data->delays[1], 1000)); 168 msleep(DIV_ROUND_UP(data->delays[1], 1000));
170 169
171 gpio_set_value(reset_gpio, active_low ? 1 : 0); 170 gpio_set_value(data->reset_gpio, data->active_low ? 1 : 0);
172 if (data->delays[2]) 171 if (data->delays[2])
173 msleep(DIV_ROUND_UP(data->delays[2], 1000)); 172 msleep(DIV_ROUND_UP(data->delays[2], 1000));
174 }
175 } 173 }
176#endif 174#endif
177 175
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index c08be62bceba..1562ab4151e1 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -78,6 +78,9 @@ static int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
78 78
79int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr) 79int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
80{ 80{
81 if (of_machine_is_compatible("ti,dm8148"))
82 return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
83
81 if (of_machine_is_compatible("ti,am33xx")) 84 if (of_machine_is_compatible("ti,am33xx"))
82 return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); 85 return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
83 86
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 48b92c9de12a..fc958067d10a 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2026,45 +2026,54 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
2026 for_each_child_of_node(node, slave_node) { 2026 for_each_child_of_node(node, slave_node) {
2027 struct cpsw_slave_data *slave_data = data->slave_data + i; 2027 struct cpsw_slave_data *slave_data = data->slave_data + i;
2028 const void *mac_addr = NULL; 2028 const void *mac_addr = NULL;
2029 u32 phyid;
2030 int lenp; 2029 int lenp;
2031 const __be32 *parp; 2030 const __be32 *parp;
2032 struct device_node *mdio_node;
2033 struct platform_device *mdio;
2034 2031
2035 /* This is no slave child node, continue */ 2032 /* This is no slave child node, continue */
2036 if (strcmp(slave_node->name, "slave")) 2033 if (strcmp(slave_node->name, "slave"))
2037 continue; 2034 continue;
2038 2035
2039 priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0); 2036 priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
2037 parp = of_get_property(slave_node, "phy_id", &lenp);
2040 if (of_phy_is_fixed_link(slave_node)) { 2038 if (of_phy_is_fixed_link(slave_node)) {
2041 struct phy_device *pd; 2039 struct device_node *phy_node;
2040 struct phy_device *phy_dev;
2042 2041
2042 /* In the case of a fixed PHY, the DT node associated
2043 * to the PHY is the Ethernet MAC DT node.
2044 */
2043 ret = of_phy_register_fixed_link(slave_node); 2045 ret = of_phy_register_fixed_link(slave_node);
2044 if (ret) 2046 if (ret)
2045 return ret; 2047 return ret;
2046 pd = of_phy_find_device(slave_node); 2048 phy_node = of_node_get(slave_node);
2047 if (!pd) 2049 phy_dev = of_phy_find_device(phy_node);
2050 if (!phy_dev)
2048 return -ENODEV; 2051 return -ENODEV;
2049 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 2052 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
2050 PHY_ID_FMT, pd->bus->id, pd->phy_id); 2053 PHY_ID_FMT, phy_dev->bus->id, phy_dev->addr);
2051 goto no_phy_slave; 2054 } else if (parp) {
2052 } 2055 u32 phyid;
2053 parp = of_get_property(slave_node, "phy_id", &lenp); 2056 struct device_node *mdio_node;
2054 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { 2057 struct platform_device *mdio;
2055 dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i); 2058
2059 if (lenp != (sizeof(__be32) * 2)) {
2060 dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
2061 goto no_phy_slave;
2062 }
2063 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
2064 phyid = be32_to_cpup(parp+1);
2065 mdio = of_find_device_by_node(mdio_node);
2066 of_node_put(mdio_node);
2067 if (!mdio) {
2068 dev_err(&pdev->dev, "Missing mdio platform device\n");
2069 return -EINVAL;
2070 }
2071 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
2072 PHY_ID_FMT, mdio->name, phyid);
2073 } else {
2074 dev_err(&pdev->dev, "No slave[%d] phy_id or fixed-link property\n", i);
2056 goto no_phy_slave; 2075 goto no_phy_slave;
2057 } 2076 }
2058 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
2059 phyid = be32_to_cpup(parp+1);
2060 mdio = of_find_device_by_node(mdio_node);
2061 of_node_put(mdio_node);
2062 if (!mdio) {
2063 dev_err(&pdev->dev, "Missing mdio platform device\n");
2064 return -EINVAL;
2065 }
2066 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
2067 PHY_ID_FMT, mdio->name, phyid);
2068 slave_data->phy_if = of_get_phy_mode(slave_node); 2077 slave_data->phy_if = of_get_phy_mode(slave_node);
2069 if (slave_data->phy_if < 0) { 2078 if (slave_data->phy_if < 0) {
2070 dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", 2079 dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
@@ -2418,7 +2427,7 @@ static int cpsw_probe(struct platform_device *pdev)
2418 ndev->irq = platform_get_irq(pdev, 1); 2427 ndev->irq = platform_get_irq(pdev, 1);
2419 if (ndev->irq < 0) { 2428 if (ndev->irq < 0) {
2420 dev_err(priv->dev, "error getting irq resource\n"); 2429 dev_err(priv->dev, "error getting irq resource\n");
2421 ret = -ENOENT; 2430 ret = ndev->irq;
2422 goto clean_ale_ret; 2431 goto clean_ale_ret;
2423 } 2432 }
2424 2433
@@ -2439,8 +2448,10 @@ static int cpsw_probe(struct platform_device *pdev)
2439 2448
2440 /* RX IRQ */ 2449 /* RX IRQ */
2441 irq = platform_get_irq(pdev, 1); 2450 irq = platform_get_irq(pdev, 1);
2442 if (irq < 0) 2451 if (irq < 0) {
2452 ret = irq;
2443 goto clean_ale_ret; 2453 goto clean_ale_ret;
2454 }
2444 2455
2445 priv->irqs_table[0] = irq; 2456 priv->irqs_table[0] = irq;
2446 ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt, 2457 ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
@@ -2452,8 +2463,10 @@ static int cpsw_probe(struct platform_device *pdev)
2452 2463
2453 /* TX IRQ */ 2464 /* TX IRQ */
2454 irq = platform_get_irq(pdev, 2); 2465 irq = platform_get_irq(pdev, 2);
2455 if (irq < 0) 2466 if (irq < 0) {
2467 ret = irq;
2456 goto clean_ale_ret; 2468 goto clean_ale_ret;
2469 }
2457 2470
2458 priv->irqs_table[1] = irq; 2471 priv->irqs_table[1] = irq;
2459 ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt, 2472 ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index de5c30c9f059..58efdec12f30 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -967,8 +967,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
967 err = udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, 967 err = udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
968 &fl6.saddr, &fl6.daddr, prio, ttl, 968 &fl6.saddr, &fl6.daddr, prio, ttl,
969 sport, geneve->dst_port, !udp_csum); 969 sport, geneve->dst_port, !udp_csum);
970
971 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
972 return NETDEV_TX_OK; 970 return NETDEV_TX_OK;
973 971
974tx_error: 972tx_error:
@@ -1157,7 +1155,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
1157 struct geneve_net *gn = net_generic(net, geneve_net_id); 1155 struct geneve_net *gn = net_generic(net, geneve_net_id);
1158 struct geneve_dev *t, *geneve = netdev_priv(dev); 1156 struct geneve_dev *t, *geneve = netdev_priv(dev);
1159 bool tun_collect_md, tun_on_same_port; 1157 bool tun_collect_md, tun_on_same_port;
1160 int err; 1158 int err, encap_len;
1161 1159
1162 if (!remote) 1160 if (!remote)
1163 return -EINVAL; 1161 return -EINVAL;
@@ -1189,6 +1187,14 @@ static int geneve_configure(struct net *net, struct net_device *dev,
1189 if (t) 1187 if (t)
1190 return -EBUSY; 1188 return -EBUSY;
1191 1189
1190 /* make enough headroom for basic scenario */
1191 encap_len = GENEVE_BASE_HLEN + ETH_HLEN;
1192 if (remote->sa.sa_family == AF_INET)
1193 encap_len += sizeof(struct iphdr);
1194 else
1195 encap_len += sizeof(struct ipv6hdr);
1196 dev->needed_headroom = encap_len + ETH_HLEN;
1197
1192 if (metadata) { 1198 if (metadata) {
1193 if (tun_on_same_port) 1199 if (tun_on_same_port)
1194 return -EPERM; 1200 return -EPERM;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 7c4a4151ef0f..9f0b1c342b77 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -683,14 +683,14 @@ static void sixpack_close(struct tty_struct *tty)
683 if (!atomic_dec_and_test(&sp->refcnt)) 683 if (!atomic_dec_and_test(&sp->refcnt))
684 down(&sp->dead_sem); 684 down(&sp->dead_sem);
685 685
686 unregister_netdev(sp->dev); 686 del_timer_sync(&sp->tx_t);
687 687 del_timer_sync(&sp->resync_t);
688 del_timer(&sp->tx_t);
689 del_timer(&sp->resync_t);
690 688
691 /* Free all 6pack frame buffers. */ 689 /* Free all 6pack frame buffers. */
692 kfree(sp->rbuff); 690 kfree(sp->rbuff);
693 kfree(sp->xbuff); 691 kfree(sp->xbuff);
692
693 unregister_netdev(sp->dev);
694} 694}
695 695
696/* Perform I/O control on an active 6pack channel. */ 696/* Perform I/O control on an active 6pack channel. */
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 216bfd350169..0b72b9de5207 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -798,13 +798,13 @@ static void mkiss_close(struct tty_struct *tty)
798 if (!atomic_dec_and_test(&ax->refcnt)) 798 if (!atomic_dec_and_test(&ax->refcnt))
799 down(&ax->dead_sem); 799 down(&ax->dead_sem);
800 800
801 unregister_netdev(ax->dev);
802
803 /* Free all AX25 frame buffers. */ 801 /* Free all AX25 frame buffers. */
804 kfree(ax->rbuff); 802 kfree(ax->rbuff);
805 kfree(ax->xbuff); 803 kfree(ax->xbuff);
806 804
807 ax->tty = NULL; 805 ax->tty = NULL;
806
807 unregister_netdev(ax->dev);
808} 808}
809 809
810/* Perform I/O control on an active ax25 channel. */ 810/* Perform I/O control on an active ax25 channel. */
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 54036ae0a388..0fc521941c71 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -498,7 +498,7 @@ static void macvtap_sock_write_space(struct sock *sk)
498 wait_queue_head_t *wqueue; 498 wait_queue_head_t *wqueue;
499 499
500 if (!sock_writeable(sk) || 500 if (!sock_writeable(sk) ||
501 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) 501 !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
502 return; 502 return;
503 503
504 wqueue = sk_sleep(sk); 504 wqueue = sk_sleep(sk);
@@ -585,7 +585,7 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait)
585 mask |= POLLIN | POLLRDNORM; 585 mask |= POLLIN | POLLRDNORM;
586 586
587 if (sock_writeable(&q->sk) || 587 if (sock_writeable(&q->sk) ||
588 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) && 588 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
589 sock_writeable(&q->sk))) 589 sock_writeable(&q->sk)))
590 mask |= POLLOUT | POLLWRNORM; 590 mask |= POLLOUT | POLLWRNORM;
591 591
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 07a6119121c3..3ce5d9514623 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -614,7 +614,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
614 { PHY_ID_BCM5461, 0xfffffff0 }, 614 { PHY_ID_BCM5461, 0xfffffff0 },
615 { PHY_ID_BCM54616S, 0xfffffff0 }, 615 { PHY_ID_BCM54616S, 0xfffffff0 },
616 { PHY_ID_BCM5464, 0xfffffff0 }, 616 { PHY_ID_BCM5464, 0xfffffff0 },
617 { PHY_ID_BCM5482, 0xfffffff0 }, 617 { PHY_ID_BCM5481, 0xfffffff0 },
618 { PHY_ID_BCM5482, 0xfffffff0 }, 618 { PHY_ID_BCM5482, 0xfffffff0 },
619 { PHY_ID_BCM50610, 0xfffffff0 }, 619 { PHY_ID_BCM50610, 0xfffffff0 },
620 { PHY_ID_BCM50610M, 0xfffffff0 }, 620 { PHY_ID_BCM50610M, 0xfffffff0 },
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 908e8d486342..7f8e7662e28c 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -149,9 +149,14 @@ int mdio_mux_init(struct device *dev,
149 } 149 }
150 cb->bus_number = v; 150 cb->bus_number = v;
151 cb->parent = pb; 151 cb->parent = pb;
152
152 cb->mii_bus = mdiobus_alloc(); 153 cb->mii_bus = mdiobus_alloc();
154 if (!cb->mii_bus) {
155 ret_val = -ENOMEM;
156 of_node_put(child_bus_node);
157 break;
158 }
153 cb->mii_bus->priv = cb; 159 cb->mii_bus->priv = cb;
154
155 cb->mii_bus->irq = cb->phy_irq; 160 cb->mii_bus->irq = cb->phy_irq;
156 cb->mii_bus->name = "mdio_mux"; 161 cb->mii_bus->name = "mdio_mux";
157 snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x", 162 snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x",
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index cf6312fafea5..e13ad6cdcc22 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -339,9 +339,18 @@ static int ksz9021_config_init(struct phy_device *phydev)
339{ 339{
340 const struct device *dev = &phydev->dev; 340 const struct device *dev = &phydev->dev;
341 const struct device_node *of_node = dev->of_node; 341 const struct device_node *of_node = dev->of_node;
342 const struct device *dev_walker;
342 343
343 if (!of_node && dev->parent->of_node) 344 /* The Micrel driver has a deprecated option to place phy OF
344 of_node = dev->parent->of_node; 345 * properties in the MAC node. Walk up the tree of devices to
346 * find a device with an OF node.
347 */
348 dev_walker = &phydev->dev;
349 do {
350 of_node = dev_walker->of_node;
351 dev_walker = dev_walker->parent;
352
353 } while (!of_node && dev_walker);
345 354
346 if (of_node) { 355 if (of_node) {
347 ksz9021_load_values_from_of(phydev, of_node, 356 ksz9021_load_values_from_of(phydev, of_node,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 48ce6ef400fe..47cd306dbb3c 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -448,7 +448,8 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
448 mdiobus_write(phydev->bus, mii_data->phy_id, 448 mdiobus_write(phydev->bus, mii_data->phy_id,
449 mii_data->reg_num, val); 449 mii_data->reg_num, val);
450 450
451 if (mii_data->reg_num == MII_BMCR && 451 if (mii_data->phy_id == phydev->addr &&
452 mii_data->reg_num == MII_BMCR &&
452 val & BMCR_RESET) 453 val & BMCR_RESET)
453 return phy_init_hw(phydev); 454 return phy_init_hw(phydev);
454 455
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 5e0b43283bce..0a37f840fcc5 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -568,6 +568,9 @@ static int pppoe_create(struct net *net, struct socket *sock, int kern)
568 sk->sk_family = PF_PPPOX; 568 sk->sk_family = PF_PPPOX;
569 sk->sk_protocol = PX_PROTO_OE; 569 sk->sk_protocol = PX_PROTO_OE;
570 570
571 INIT_WORK(&pppox_sk(sk)->proto.pppoe.padt_work,
572 pppoe_unbind_sock_work);
573
571 return 0; 574 return 0;
572} 575}
573 576
@@ -632,8 +635,6 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
632 635
633 lock_sock(sk); 636 lock_sock(sk);
634 637
635 INIT_WORK(&po->proto.pppoe.padt_work, pppoe_unbind_sock_work);
636
637 error = -EINVAL; 638 error = -EINVAL;
638 if (sp->sa_protocol != PX_PROTO_OE) 639 if (sp->sa_protocol != PX_PROTO_OE)
639 goto end; 640 goto end;
@@ -663,8 +664,13 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
663 po->pppoe_dev = NULL; 664 po->pppoe_dev = NULL;
664 } 665 }
665 666
666 memset(sk_pppox(po) + 1, 0, 667 po->pppoe_ifindex = 0;
667 sizeof(struct pppox_sock) - sizeof(struct sock)); 668 memset(&po->pppoe_pa, 0, sizeof(po->pppoe_pa));
669 memset(&po->pppoe_relay, 0, sizeof(po->pppoe_relay));
670 memset(&po->chan, 0, sizeof(po->chan));
671 po->next = NULL;
672 po->num = 0;
673
668 sk->sk_state = PPPOX_NONE; 674 sk->sk_state = PPPOX_NONE;
669 } 675 }
670 676
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index fc69e41d0950..597c53e0a2ec 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -419,6 +419,9 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
419 struct pptp_opt *opt = &po->proto.pptp; 419 struct pptp_opt *opt = &po->proto.pptp;
420 int error = 0; 420 int error = 0;
421 421
422 if (sockaddr_len < sizeof(struct sockaddr_pppox))
423 return -EINVAL;
424
422 lock_sock(sk); 425 lock_sock(sk);
423 426
424 opt->src_addr = sp->sa_addr.pptp; 427 opt->src_addr = sp->sa_addr.pptp;
@@ -440,6 +443,9 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
440 struct flowi4 fl4; 443 struct flowi4 fl4;
441 int error = 0; 444 int error = 0;
442 445
446 if (sockaddr_len < sizeof(struct sockaddr_pppox))
447 return -EINVAL;
448
443 if (sp->sa_protocol != PX_PROTO_PPTP) 449 if (sp->sa_protocol != PX_PROTO_PPTP)
444 return -EINVAL; 450 return -EINVAL;
445 451
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b1878faea397..f0db770e8b2f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1040,7 +1040,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
1040 mask |= POLLIN | POLLRDNORM; 1040 mask |= POLLIN | POLLRDNORM;
1041 1041
1042 if (sock_writeable(sk) || 1042 if (sock_writeable(sk) ||
1043 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1043 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1044 sock_writeable(sk))) 1044 sock_writeable(sk)))
1045 mask |= POLLOUT | POLLWRNORM; 1045 mask |= POLLOUT | POLLWRNORM;
1046 1046
@@ -1488,7 +1488,7 @@ static void tun_sock_write_space(struct sock *sk)
1488 if (!sock_writeable(sk)) 1488 if (!sock_writeable(sk))
1489 return; 1489 return;
1490 1490
1491 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) 1491 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
1492 return; 1492 return;
1493 1493
1494 wqueue = sk_sleep(sk); 1494 wqueue = sk_sleep(sk);
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index bbde9884ab8a..bdd83d95ec0a 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -100,7 +100,7 @@ static const struct net_device_ops cdc_mbim_netdev_ops = {
100 .ndo_stop = usbnet_stop, 100 .ndo_stop = usbnet_stop,
101 .ndo_start_xmit = usbnet_start_xmit, 101 .ndo_start_xmit = usbnet_start_xmit,
102 .ndo_tx_timeout = usbnet_tx_timeout, 102 .ndo_tx_timeout = usbnet_tx_timeout,
103 .ndo_change_mtu = usbnet_change_mtu, 103 .ndo_change_mtu = cdc_ncm_change_mtu,
104 .ndo_set_mac_address = eth_mac_addr, 104 .ndo_set_mac_address = eth_mac_addr,
105 .ndo_validate_addr = eth_validate_addr, 105 .ndo_validate_addr = eth_validate_addr,
106 .ndo_vlan_rx_add_vid = cdc_mbim_rx_add_vid, 106 .ndo_vlan_rx_add_vid = cdc_mbim_rx_add_vid,
@@ -158,7 +158,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
158 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) 158 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
159 goto err; 159 goto err;
160 160
161 ret = cdc_ncm_bind_common(dev, intf, data_altsetting, 0); 161 ret = cdc_ncm_bind_common(dev, intf, data_altsetting, dev->driver_info->data);
162 if (ret) 162 if (ret)
163 goto err; 163 goto err;
164 164
@@ -582,6 +582,26 @@ static const struct driver_info cdc_mbim_info_zlp = {
582 .tx_fixup = cdc_mbim_tx_fixup, 582 .tx_fixup = cdc_mbim_tx_fixup,
583}; 583};
584 584
585/* The spefication explicitly allows NDPs to be placed anywhere in the
586 * frame, but some devices fail unless the NDP is placed after the IP
587 * packets. Using the CDC_NCM_FLAG_NDP_TO_END flags to force this
588 * behaviour.
589 *
590 * Note: The current implementation of this feature restricts each NTB
591 * to a single NDP, implying that multiplexed sessions cannot share an
592 * NTB. This might affect performace for multiplexed sessions.
593 */
594static const struct driver_info cdc_mbim_info_ndp_to_end = {
595 .description = "CDC MBIM",
596 .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
597 .bind = cdc_mbim_bind,
598 .unbind = cdc_mbim_unbind,
599 .manage_power = cdc_mbim_manage_power,
600 .rx_fixup = cdc_mbim_rx_fixup,
601 .tx_fixup = cdc_mbim_tx_fixup,
602 .data = CDC_NCM_FLAG_NDP_TO_END,
603};
604
585static const struct usb_device_id mbim_devs[] = { 605static const struct usb_device_id mbim_devs[] = {
586 /* This duplicate NCM entry is intentional. MBIM devices can 606 /* This duplicate NCM entry is intentional. MBIM devices can
587 * be disguised as NCM by default, and this is necessary to 607 * be disguised as NCM by default, and this is necessary to
@@ -597,6 +617,10 @@ static const struct usb_device_id mbim_devs[] = {
597 { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 617 { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
598 .driver_info = (unsigned long)&cdc_mbim_info, 618 .driver_info = (unsigned long)&cdc_mbim_info,
599 }, 619 },
620 /* Huawei E3372 fails unless NDP comes after the IP packets */
621 { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
622 .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
623 },
600 /* default entry */ 624 /* default entry */
601 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 625 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
602 .driver_info = (unsigned long)&cdc_mbim_info_zlp, 626 .driver_info = (unsigned long)&cdc_mbim_info_zlp,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index a187f08113ec..e8a1144c5a8b 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -41,6 +41,7 @@
41#include <linux/module.h> 41#include <linux/module.h>
42#include <linux/netdevice.h> 42#include <linux/netdevice.h>
43#include <linux/ctype.h> 43#include <linux/ctype.h>
44#include <linux/etherdevice.h>
44#include <linux/ethtool.h> 45#include <linux/ethtool.h>
45#include <linux/workqueue.h> 46#include <linux/workqueue.h>
46#include <linux/mii.h> 47#include <linux/mii.h>
@@ -689,9 +690,35 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
689 kfree(ctx); 690 kfree(ctx);
690} 691}
691 692
693/* we need to override the usbnet change_mtu ndo for two reasons:
694 * - respect the negotiated maximum datagram size
695 * - avoid unwanted changes to rx and tx buffers
696 */
697int cdc_ncm_change_mtu(struct net_device *net, int new_mtu)
698{
699 struct usbnet *dev = netdev_priv(net);
700 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
701 int maxmtu = ctx->max_datagram_size - cdc_ncm_eth_hlen(dev);
702
703 if (new_mtu <= 0 || new_mtu > maxmtu)
704 return -EINVAL;
705 net->mtu = new_mtu;
706 return 0;
707}
708EXPORT_SYMBOL_GPL(cdc_ncm_change_mtu);
709
710static const struct net_device_ops cdc_ncm_netdev_ops = {
711 .ndo_open = usbnet_open,
712 .ndo_stop = usbnet_stop,
713 .ndo_start_xmit = usbnet_start_xmit,
714 .ndo_tx_timeout = usbnet_tx_timeout,
715 .ndo_change_mtu = cdc_ncm_change_mtu,
716 .ndo_set_mac_address = eth_mac_addr,
717 .ndo_validate_addr = eth_validate_addr,
718};
719
692int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags) 720int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags)
693{ 721{
694 const struct usb_cdc_union_desc *union_desc = NULL;
695 struct cdc_ncm_ctx *ctx; 722 struct cdc_ncm_ctx *ctx;
696 struct usb_driver *driver; 723 struct usb_driver *driver;
697 u8 *buf; 724 u8 *buf;
@@ -725,15 +752,16 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
725 /* parse through descriptors associated with control interface */ 752 /* parse through descriptors associated with control interface */
726 cdc_parse_cdc_header(&hdr, intf, buf, len); 753 cdc_parse_cdc_header(&hdr, intf, buf, len);
727 754
728 ctx->data = usb_ifnum_to_if(dev->udev, 755 if (hdr.usb_cdc_union_desc)
729 hdr.usb_cdc_union_desc->bSlaveInterface0); 756 ctx->data = usb_ifnum_to_if(dev->udev,
757 hdr.usb_cdc_union_desc->bSlaveInterface0);
730 ctx->ether_desc = hdr.usb_cdc_ether_desc; 758 ctx->ether_desc = hdr.usb_cdc_ether_desc;
731 ctx->func_desc = hdr.usb_cdc_ncm_desc; 759 ctx->func_desc = hdr.usb_cdc_ncm_desc;
732 ctx->mbim_desc = hdr.usb_cdc_mbim_desc; 760 ctx->mbim_desc = hdr.usb_cdc_mbim_desc;
733 ctx->mbim_extended_desc = hdr.usb_cdc_mbim_extended_desc; 761 ctx->mbim_extended_desc = hdr.usb_cdc_mbim_extended_desc;
734 762
735 /* some buggy devices have an IAD but no CDC Union */ 763 /* some buggy devices have an IAD but no CDC Union */
736 if (!union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) { 764 if (!hdr.usb_cdc_union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
737 ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1); 765 ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1);
738 dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n"); 766 dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n");
739 } 767 }
@@ -823,6 +851,9 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
823 /* add our sysfs attrs */ 851 /* add our sysfs attrs */
824 dev->net->sysfs_groups[0] = &cdc_ncm_sysfs_attr_group; 852 dev->net->sysfs_groups[0] = &cdc_ncm_sysfs_attr_group;
825 853
854 /* must handle MTU changes */
855 dev->net->netdev_ops = &cdc_ncm_netdev_ops;
856
826 return 0; 857 return 0;
827 858
828error2: 859error2:
@@ -955,10 +986,18 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
955 * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and 986 * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and
956 * the wNdpIndex field in the header is actually not consistent with reality. It will be later. 987 * the wNdpIndex field in the header is actually not consistent with reality. It will be later.
957 */ 988 */
958 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) 989 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
959 if (ctx->delayed_ndp16->dwSignature == sign) 990 if (ctx->delayed_ndp16->dwSignature == sign)
960 return ctx->delayed_ndp16; 991 return ctx->delayed_ndp16;
961 992
993 /* We can only push a single NDP to the end. Return
994 * NULL to send what we've already got and queue this
995 * skb for later.
996 */
997 else if (ctx->delayed_ndp16->dwSignature)
998 return NULL;
999 }
1000
962 /* follow the chain of NDPs, looking for a match */ 1001 /* follow the chain of NDPs, looking for a match */
963 while (ndpoffset) { 1002 while (ndpoffset) {
964 ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset); 1003 ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
@@ -1550,6 +1589,24 @@ static const struct usb_device_id cdc_devs[] = {
1550 .driver_info = (unsigned long) &wwan_info, 1589 .driver_info = (unsigned long) &wwan_info,
1551 }, 1590 },
1552 1591
1592 /* DW5812 LTE Verizon Mobile Broadband Card
1593 * Unlike DW5550 this device requires FLAG_NOARP
1594 */
1595 { USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x81bb,
1596 USB_CLASS_COMM,
1597 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
1598 .driver_info = (unsigned long)&wwan_noarp_info,
1599 },
1600
1601 /* DW5813 LTE AT&T Mobile Broadband Card
1602 * Unlike DW5550 this device requires FLAG_NOARP
1603 */
1604 { USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x81bc,
1605 USB_CLASS_COMM,
1606 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
1607 .driver_info = (unsigned long)&wwan_noarp_info,
1608 },
1609
1553 /* Dell branded MBM devices like DW5550 */ 1610 /* Dell branded MBM devices like DW5550 */
1554 { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO 1611 { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
1555 | USB_DEVICE_ID_MATCH_VENDOR, 1612 | USB_DEVICE_ID_MATCH_VENDOR,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 34799eaace41..9a5be8b85186 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -725,6 +725,7 @@ static const struct usb_device_id products[] = {
725 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 725 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
726 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 726 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
727 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 727 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
728 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
728 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ 729 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
729 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ 730 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
730 {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */ 731 {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d9427ca3dba7..2e32c41536ae 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -3067,17 +3067,6 @@ static int rtl8152_open(struct net_device *netdev)
3067 3067
3068 mutex_lock(&tp->control); 3068 mutex_lock(&tp->control);
3069 3069
3070 /* The WORK_ENABLE may be set when autoresume occurs */
3071 if (test_bit(WORK_ENABLE, &tp->flags)) {
3072 clear_bit(WORK_ENABLE, &tp->flags);
3073 usb_kill_urb(tp->intr_urb);
3074 cancel_delayed_work_sync(&tp->schedule);
3075
3076 /* disable the tx/rx, if the workqueue has enabled them. */
3077 if (netif_carrier_ok(netdev))
3078 tp->rtl_ops.disable(tp);
3079 }
3080
3081 tp->rtl_ops.up(tp); 3070 tp->rtl_ops.up(tp);
3082 3071
3083 rtl8152_set_speed(tp, AUTONEG_ENABLE, 3072 rtl8152_set_speed(tp, AUTONEG_ENABLE,
@@ -3124,12 +3113,6 @@ static int rtl8152_close(struct net_device *netdev)
3124 } else { 3113 } else {
3125 mutex_lock(&tp->control); 3114 mutex_lock(&tp->control);
3126 3115
3127 /* The autosuspend may have been enabled and wouldn't
3128 * be disable when autoresume occurs, because the
3129 * netif_running() would be false.
3130 */
3131 rtl_runtime_suspend_enable(tp, false);
3132
3133 tp->rtl_ops.down(tp); 3116 tp->rtl_ops.down(tp);
3134 3117
3135 mutex_unlock(&tp->control); 3118 mutex_unlock(&tp->control);
@@ -3512,7 +3495,7 @@ static int rtl8152_resume(struct usb_interface *intf)
3512 netif_device_attach(tp->netdev); 3495 netif_device_attach(tp->netdev);
3513 } 3496 }
3514 3497
3515 if (netif_running(tp->netdev)) { 3498 if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
3516 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3499 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3517 rtl_runtime_suspend_enable(tp, false); 3500 rtl_runtime_suspend_enable(tp, false);
3518 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3501 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
@@ -3532,6 +3515,8 @@ static int rtl8152_resume(struct usb_interface *intf)
3532 } 3515 }
3533 usb_submit_urb(tp->intr_urb, GFP_KERNEL); 3516 usb_submit_urb(tp->intr_urb, GFP_KERNEL);
3534 } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3517 } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3518 if (tp->netdev->flags & IFF_UP)
3519 rtl_runtime_suspend_enable(tp, false);
3535 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3520 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3536 } 3521 }
3537 3522
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 0ef4a5ad5557..ba21d072be31 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -117,12 +117,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
117 kfree_skb(skb); 117 kfree_skb(skb);
118 goto drop; 118 goto drop;
119 } 119 }
120 /* don't change ip_summed == CHECKSUM_PARTIAL, as that
121 * will cause bad checksum on forwarded packets
122 */
123 if (skb->ip_summed == CHECKSUM_NONE &&
124 rcv->features & NETIF_F_RXCSUM)
125 skb->ip_summed = CHECKSUM_UNNECESSARY;
126 120
127 if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) { 121 if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
128 struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats); 122 struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d8838dedb7a4..f94ab786088f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -140,6 +140,12 @@ struct virtnet_info {
140 140
141 /* CPU hot plug notifier */ 141 /* CPU hot plug notifier */
142 struct notifier_block nb; 142 struct notifier_block nb;
143
144 /* Control VQ buffers: protected by the rtnl lock */
145 struct virtio_net_ctrl_hdr ctrl_hdr;
146 virtio_net_ctrl_ack ctrl_status;
147 u8 ctrl_promisc;
148 u8 ctrl_allmulti;
143}; 149};
144 150
145struct padded_vnet_hdr { 151struct padded_vnet_hdr {
@@ -976,31 +982,30 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
976 struct scatterlist *out) 982 struct scatterlist *out)
977{ 983{
978 struct scatterlist *sgs[4], hdr, stat; 984 struct scatterlist *sgs[4], hdr, stat;
979 struct virtio_net_ctrl_hdr ctrl;
980 virtio_net_ctrl_ack status = ~0;
981 unsigned out_num = 0, tmp; 985 unsigned out_num = 0, tmp;
982 986
983 /* Caller should know better */ 987 /* Caller should know better */
984 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); 988 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
985 989
986 ctrl.class = class; 990 vi->ctrl_status = ~0;
987 ctrl.cmd = cmd; 991 vi->ctrl_hdr.class = class;
992 vi->ctrl_hdr.cmd = cmd;
988 /* Add header */ 993 /* Add header */
989 sg_init_one(&hdr, &ctrl, sizeof(ctrl)); 994 sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr));
990 sgs[out_num++] = &hdr; 995 sgs[out_num++] = &hdr;
991 996
992 if (out) 997 if (out)
993 sgs[out_num++] = out; 998 sgs[out_num++] = out;
994 999
995 /* Add return status. */ 1000 /* Add return status. */
996 sg_init_one(&stat, &status, sizeof(status)); 1001 sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status));
997 sgs[out_num] = &stat; 1002 sgs[out_num] = &stat;
998 1003
999 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); 1004 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
1000 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); 1005 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
1001 1006
1002 if (unlikely(!virtqueue_kick(vi->cvq))) 1007 if (unlikely(!virtqueue_kick(vi->cvq)))
1003 return status == VIRTIO_NET_OK; 1008 return vi->ctrl_status == VIRTIO_NET_OK;
1004 1009
1005 /* Spin for a response, the kick causes an ioport write, trapping 1010 /* Spin for a response, the kick causes an ioport write, trapping
1006 * into the hypervisor, so the request should be handled immediately. 1011 * into the hypervisor, so the request should be handled immediately.
@@ -1009,7 +1014,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1009 !virtqueue_is_broken(vi->cvq)) 1014 !virtqueue_is_broken(vi->cvq))
1010 cpu_relax(); 1015 cpu_relax();
1011 1016
1012 return status == VIRTIO_NET_OK; 1017 return vi->ctrl_status == VIRTIO_NET_OK;
1013} 1018}
1014 1019
1015static int virtnet_set_mac_address(struct net_device *dev, void *p) 1020static int virtnet_set_mac_address(struct net_device *dev, void *p)
@@ -1151,7 +1156,6 @@ static void virtnet_set_rx_mode(struct net_device *dev)
1151{ 1156{
1152 struct virtnet_info *vi = netdev_priv(dev); 1157 struct virtnet_info *vi = netdev_priv(dev);
1153 struct scatterlist sg[2]; 1158 struct scatterlist sg[2];
1154 u8 promisc, allmulti;
1155 struct virtio_net_ctrl_mac *mac_data; 1159 struct virtio_net_ctrl_mac *mac_data;
1156 struct netdev_hw_addr *ha; 1160 struct netdev_hw_addr *ha;
1157 int uc_count; 1161 int uc_count;
@@ -1163,22 +1167,22 @@ static void virtnet_set_rx_mode(struct net_device *dev)
1163 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 1167 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1164 return; 1168 return;
1165 1169
1166 promisc = ((dev->flags & IFF_PROMISC) != 0); 1170 vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0);
1167 allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 1171 vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1168 1172
1169 sg_init_one(sg, &promisc, sizeof(promisc)); 1173 sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc));
1170 1174
1171 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1175 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1172 VIRTIO_NET_CTRL_RX_PROMISC, sg)) 1176 VIRTIO_NET_CTRL_RX_PROMISC, sg))
1173 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 1177 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
1174 promisc ? "en" : "dis"); 1178 vi->ctrl_promisc ? "en" : "dis");
1175 1179
1176 sg_init_one(sg, &allmulti, sizeof(allmulti)); 1180 sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti));
1177 1181
1178 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1182 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1179 VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) 1183 VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
1180 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 1184 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
1181 allmulti ? "en" : "dis"); 1185 vi->ctrl_allmulti ? "en" : "dis");
1182 1186
1183 uc_count = netdev_uc_count(dev); 1187 uc_count = netdev_uc_count(dev);
1184 mc_count = netdev_mc_count(dev); 1188 mc_count = netdev_mc_count(dev);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 899ea4288197..417903715437 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -587,6 +587,12 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
587 &adapter->pdev->dev, 587 &adapter->pdev->dev,
588 rbi->skb->data, rbi->len, 588 rbi->skb->data, rbi->len,
589 PCI_DMA_FROMDEVICE); 589 PCI_DMA_FROMDEVICE);
590 if (dma_mapping_error(&adapter->pdev->dev,
591 rbi->dma_addr)) {
592 dev_kfree_skb_any(rbi->skb);
593 rq->stats.rx_buf_alloc_failure++;
594 break;
595 }
590 } else { 596 } else {
591 /* rx buffer skipped by the device */ 597 /* rx buffer skipped by the device */
592 } 598 }
@@ -605,13 +611,18 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
605 &adapter->pdev->dev, 611 &adapter->pdev->dev,
606 rbi->page, 0, PAGE_SIZE, 612 rbi->page, 0, PAGE_SIZE,
607 PCI_DMA_FROMDEVICE); 613 PCI_DMA_FROMDEVICE);
614 if (dma_mapping_error(&adapter->pdev->dev,
615 rbi->dma_addr)) {
616 put_page(rbi->page);
617 rq->stats.rx_buf_alloc_failure++;
618 break;
619 }
608 } else { 620 } else {
609 /* rx buffers skipped by the device */ 621 /* rx buffers skipped by the device */
610 } 622 }
611 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT; 623 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
612 } 624 }
613 625
614 BUG_ON(rbi->dma_addr == 0);
615 gd->rxd.addr = cpu_to_le64(rbi->dma_addr); 626 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
616 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT) 627 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
617 | val | rbi->len); 628 | val | rbi->len);
@@ -655,7 +666,7 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
655} 666}
656 667
657 668
658static void 669static int
659vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, 670vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
660 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, 671 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
661 struct vmxnet3_adapter *adapter) 672 struct vmxnet3_adapter *adapter)
@@ -715,6 +726,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
715 tbi->dma_addr = dma_map_single(&adapter->pdev->dev, 726 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
716 skb->data + buf_offset, buf_size, 727 skb->data + buf_offset, buf_size,
717 PCI_DMA_TODEVICE); 728 PCI_DMA_TODEVICE);
729 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
730 return -EFAULT;
718 731
719 tbi->len = buf_size; 732 tbi->len = buf_size;
720 733
@@ -755,6 +768,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
755 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, 768 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
756 buf_offset, buf_size, 769 buf_offset, buf_size,
757 DMA_TO_DEVICE); 770 DMA_TO_DEVICE);
771 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
772 return -EFAULT;
758 773
759 tbi->len = buf_size; 774 tbi->len = buf_size;
760 775
@@ -782,6 +797,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
782 /* set the last buf_info for the pkt */ 797 /* set the last buf_info for the pkt */
783 tbi->skb = skb; 798 tbi->skb = skb;
784 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; 799 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
800
801 return 0;
785} 802}
786 803
787 804
@@ -1020,7 +1037,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1020 } 1037 }
1021 1038
1022 /* fill tx descs related to addr & len */ 1039 /* fill tx descs related to addr & len */
1023 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); 1040 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1041 goto unlock_drop_pkt;
1024 1042
1025 /* setup the EOP desc */ 1043 /* setup the EOP desc */
1026 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); 1044 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
@@ -1231,6 +1249,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1231 struct vmxnet3_rx_buf_info *rbi; 1249 struct vmxnet3_rx_buf_info *rbi;
1232 struct sk_buff *skb, *new_skb = NULL; 1250 struct sk_buff *skb, *new_skb = NULL;
1233 struct page *new_page = NULL; 1251 struct page *new_page = NULL;
1252 dma_addr_t new_dma_addr;
1234 int num_to_alloc; 1253 int num_to_alloc;
1235 struct Vmxnet3_RxDesc *rxd; 1254 struct Vmxnet3_RxDesc *rxd;
1236 u32 idx, ring_idx; 1255 u32 idx, ring_idx;
@@ -1287,6 +1306,21 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1287 skip_page_frags = true; 1306 skip_page_frags = true;
1288 goto rcd_done; 1307 goto rcd_done;
1289 } 1308 }
1309 new_dma_addr = dma_map_single(&adapter->pdev->dev,
1310 new_skb->data, rbi->len,
1311 PCI_DMA_FROMDEVICE);
1312 if (dma_mapping_error(&adapter->pdev->dev,
1313 new_dma_addr)) {
1314 dev_kfree_skb(new_skb);
1315 /* Skb allocation failed, do not handover this
1316 * skb to stack. Reuse it. Drop the existing pkt
1317 */
1318 rq->stats.rx_buf_alloc_failure++;
1319 ctx->skb = NULL;
1320 rq->stats.drop_total++;
1321 skip_page_frags = true;
1322 goto rcd_done;
1323 }
1290 1324
1291 dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr, 1325 dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
1292 rbi->len, 1326 rbi->len,
@@ -1303,9 +1337,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1303 1337
1304 /* Immediate refill */ 1338 /* Immediate refill */
1305 rbi->skb = new_skb; 1339 rbi->skb = new_skb;
1306 rbi->dma_addr = dma_map_single(&adapter->pdev->dev, 1340 rbi->dma_addr = new_dma_addr;
1307 rbi->skb->data, rbi->len,
1308 PCI_DMA_FROMDEVICE);
1309 rxd->addr = cpu_to_le64(rbi->dma_addr); 1341 rxd->addr = cpu_to_le64(rbi->dma_addr);
1310 rxd->len = rbi->len; 1342 rxd->len = rbi->len;
1311 if (adapter->version == 2 && 1343 if (adapter->version == 2 &&
@@ -1348,6 +1380,19 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1348 skip_page_frags = true; 1380 skip_page_frags = true;
1349 goto rcd_done; 1381 goto rcd_done;
1350 } 1382 }
1383 new_dma_addr = dma_map_page(&adapter->pdev->dev
1384 , rbi->page,
1385 0, PAGE_SIZE,
1386 PCI_DMA_FROMDEVICE);
1387 if (dma_mapping_error(&adapter->pdev->dev,
1388 new_dma_addr)) {
1389 put_page(new_page);
1390 rq->stats.rx_buf_alloc_failure++;
1391 dev_kfree_skb(ctx->skb);
1392 ctx->skb = NULL;
1393 skip_page_frags = true;
1394 goto rcd_done;
1395 }
1351 1396
1352 dma_unmap_page(&adapter->pdev->dev, 1397 dma_unmap_page(&adapter->pdev->dev,
1353 rbi->dma_addr, rbi->len, 1398 rbi->dma_addr, rbi->len,
@@ -1357,10 +1402,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1357 1402
1358 /* Immediate refill */ 1403 /* Immediate refill */
1359 rbi->page = new_page; 1404 rbi->page = new_page;
1360 rbi->dma_addr = dma_map_page(&adapter->pdev->dev 1405 rbi->dma_addr = new_dma_addr;
1361 , rbi->page,
1362 0, PAGE_SIZE,
1363 PCI_DMA_FROMDEVICE);
1364 rxd->addr = cpu_to_le64(rbi->dma_addr); 1406 rxd->addr = cpu_to_le64(rbi->dma_addr);
1365 rxd->len = rbi->len; 1407 rxd->len = rbi->len;
1366 } 1408 }
@@ -2167,7 +2209,8 @@ vmxnet3_set_mc(struct net_device *netdev)
2167 PCI_DMA_TODEVICE); 2209 PCI_DMA_TODEVICE);
2168 } 2210 }
2169 2211
2170 if (new_table_pa) { 2212 if (!dma_mapping_error(&adapter->pdev->dev,
2213 new_table_pa)) {
2171 new_mode |= VMXNET3_RXM_MCAST; 2214 new_mode |= VMXNET3_RXM_MCAST;
2172 rxConf->mfTablePA = cpu_to_le64(new_table_pa); 2215 rxConf->mfTablePA = cpu_to_le64(new_table_pa);
2173 } else { 2216 } else {
@@ -3075,6 +3118,11 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3075 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, 3118 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3076 sizeof(struct vmxnet3_adapter), 3119 sizeof(struct vmxnet3_adapter),
3077 PCI_DMA_TODEVICE); 3120 PCI_DMA_TODEVICE);
3121 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
3122 dev_err(&pdev->dev, "Failed to map dma\n");
3123 err = -EFAULT;
3124 goto err_dma_map;
3125 }
3078 adapter->shared = dma_alloc_coherent( 3126 adapter->shared = dma_alloc_coherent(
3079 &adapter->pdev->dev, 3127 &adapter->pdev->dev,
3080 sizeof(struct Vmxnet3_DriverShared), 3128 sizeof(struct Vmxnet3_DriverShared),
@@ -3233,6 +3281,7 @@ err_alloc_queue_desc:
3233err_alloc_shared: 3281err_alloc_shared:
3234 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, 3282 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3235 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); 3283 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3284err_dma_map:
3236 free_netdev(netdev); 3285 free_netdev(netdev);
3237 return err; 3286 return err;
3238} 3287}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 92fa3e1ea65c..4f9748457f5a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -907,7 +907,6 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
907 struct nlattr *tb[], struct nlattr *data[]) 907 struct nlattr *tb[], struct nlattr *data[])
908{ 908{
909 struct net_vrf *vrf = netdev_priv(dev); 909 struct net_vrf *vrf = netdev_priv(dev);
910 int err;
911 910
912 if (!data || !data[IFLA_VRF_TABLE]) 911 if (!data || !data[IFLA_VRF_TABLE])
913 return -EINVAL; 912 return -EINVAL;
@@ -916,15 +915,7 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
916 915
917 dev->priv_flags |= IFF_L3MDEV_MASTER; 916 dev->priv_flags |= IFF_L3MDEV_MASTER;
918 917
919 err = register_netdevice(dev); 918 return register_netdevice(dev);
920 if (err < 0)
921 goto out_fail;
922
923 return 0;
924
925out_fail:
926 free_netdev(dev);
927 return err;
928} 919}
929 920
930static size_t vrf_nl_getsize(const struct net_device *dev) 921static size_t vrf_nl_getsize(const struct net_device *dev)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 6369a5734d4c..ba363cedef80 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1158,7 +1158,6 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
1158 struct pcpu_sw_netstats *stats; 1158 struct pcpu_sw_netstats *stats;
1159 union vxlan_addr saddr; 1159 union vxlan_addr saddr;
1160 int err = 0; 1160 int err = 0;
1161 union vxlan_addr *remote_ip;
1162 1161
1163 /* For flow based devices, map all packets to VNI 0 */ 1162 /* For flow based devices, map all packets to VNI 0 */
1164 if (vs->flags & VXLAN_F_COLLECT_METADATA) 1163 if (vs->flags & VXLAN_F_COLLECT_METADATA)
@@ -1169,7 +1168,6 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
1169 if (!vxlan) 1168 if (!vxlan)
1170 goto drop; 1169 goto drop;
1171 1170
1172 remote_ip = &vxlan->default_dst.remote_ip;
1173 skb_reset_mac_header(skb); 1171 skb_reset_mac_header(skb);
1174 skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); 1172 skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
1175 skb->protocol = eth_type_trans(skb, vxlan->dev); 1173 skb->protocol = eth_type_trans(skb, vxlan->dev);
@@ -1179,8 +1177,8 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
1179 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) 1177 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1180 goto drop; 1178 goto drop;
1181 1179
1182 /* Re-examine inner Ethernet packet */ 1180 /* Get data from the outer IP header */
1183 if (remote_ip->sa.sa_family == AF_INET) { 1181 if (vxlan_get_sk_family(vs) == AF_INET) {
1184 oip = ip_hdr(skb); 1182 oip = ip_hdr(skb);
1185 saddr.sin.sin_addr.s_addr = oip->saddr; 1183 saddr.sin.sin_addr.s_addr = oip->saddr;
1186 saddr.sa.sa_family = AF_INET; 1184 saddr.sa.sa_family = AF_INET;
@@ -1848,6 +1846,34 @@ static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *sk
1848 !(vxflags & VXLAN_F_UDP_CSUM)); 1846 !(vxflags & VXLAN_F_UDP_CSUM));
1849} 1847}
1850 1848
1849#if IS_ENABLED(CONFIG_IPV6)
1850static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1851 struct sk_buff *skb, int oif,
1852 const struct in6_addr *daddr,
1853 struct in6_addr *saddr)
1854{
1855 struct dst_entry *ndst;
1856 struct flowi6 fl6;
1857 int err;
1858
1859 memset(&fl6, 0, sizeof(fl6));
1860 fl6.flowi6_oif = oif;
1861 fl6.daddr = *daddr;
1862 fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
1863 fl6.flowi6_mark = skb->mark;
1864 fl6.flowi6_proto = IPPROTO_UDP;
1865
1866 err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
1867 vxlan->vn6_sock->sock->sk,
1868 &ndst, &fl6);
1869 if (err < 0)
1870 return ERR_PTR(err);
1871
1872 *saddr = fl6.saddr;
1873 return ndst;
1874}
1875#endif
1876
1851/* Bypass encapsulation if the destination is local */ 1877/* Bypass encapsulation if the destination is local */
1852static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, 1878static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1853 struct vxlan_dev *dst_vxlan) 1879 struct vxlan_dev *dst_vxlan)
@@ -2035,21 +2061,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2035#if IS_ENABLED(CONFIG_IPV6) 2061#if IS_ENABLED(CONFIG_IPV6)
2036 } else { 2062 } else {
2037 struct dst_entry *ndst; 2063 struct dst_entry *ndst;
2038 struct flowi6 fl6; 2064 struct in6_addr saddr;
2039 u32 rt6i_flags; 2065 u32 rt6i_flags;
2040 2066
2041 if (!vxlan->vn6_sock) 2067 if (!vxlan->vn6_sock)
2042 goto drop; 2068 goto drop;
2043 sk = vxlan->vn6_sock->sock->sk; 2069 sk = vxlan->vn6_sock->sock->sk;
2044 2070
2045 memset(&fl6, 0, sizeof(fl6)); 2071 ndst = vxlan6_get_route(vxlan, skb,
2046 fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0; 2072 rdst ? rdst->remote_ifindex : 0,
2047 fl6.daddr = dst->sin6.sin6_addr; 2073 &dst->sin6.sin6_addr, &saddr);
2048 fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; 2074 if (IS_ERR(ndst)) {
2049 fl6.flowi6_mark = skb->mark;
2050 fl6.flowi6_proto = IPPROTO_UDP;
2051
2052 if (ipv6_stub->ipv6_dst_lookup(vxlan->net, sk, &ndst, &fl6)) {
2053 netdev_dbg(dev, "no route to %pI6\n", 2075 netdev_dbg(dev, "no route to %pI6\n",
2054 &dst->sin6.sin6_addr); 2076 &dst->sin6.sin6_addr);
2055 dev->stats.tx_carrier_errors++; 2077 dev->stats.tx_carrier_errors++;
@@ -2081,7 +2103,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2081 } 2103 }
2082 2104
2083 ttl = ttl ? : ip6_dst_hoplimit(ndst); 2105 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2084 err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr, 2106 err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr,
2085 0, ttl, src_port, dst_port, htonl(vni << 8), md, 2107 0, ttl, src_port, dst_port, htonl(vni << 8), md,
2086 !net_eq(vxlan->net, dev_net(vxlan->dev)), 2108 !net_eq(vxlan->net, dev_net(vxlan->dev)),
2087 flags); 2109 flags);
@@ -2395,9 +2417,30 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2395 vxlan->cfg.port_max, true); 2417 vxlan->cfg.port_max, true);
2396 dport = info->key.tp_dst ? : vxlan->cfg.dst_port; 2418 dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
2397 2419
2398 if (ip_tunnel_info_af(info) == AF_INET) 2420 if (ip_tunnel_info_af(info) == AF_INET) {
2421 if (!vxlan->vn4_sock)
2422 return -EINVAL;
2399 return egress_ipv4_tun_info(dev, skb, info, sport, dport); 2423 return egress_ipv4_tun_info(dev, skb, info, sport, dport);
2400 return -EINVAL; 2424 } else {
2425#if IS_ENABLED(CONFIG_IPV6)
2426 struct dst_entry *ndst;
2427
2428 if (!vxlan->vn6_sock)
2429 return -EINVAL;
2430 ndst = vxlan6_get_route(vxlan, skb, 0,
2431 &info->key.u.ipv6.dst,
2432 &info->key.u.ipv6.src);
2433 if (IS_ERR(ndst))
2434 return PTR_ERR(ndst);
2435 dst_release(ndst);
2436
2437 info->key.tp_src = sport;
2438 info->key.tp_dst = dport;
2439#else /* !CONFIG_IPV6 */
2440 return -EPFNOSUPPORT;
2441#endif
2442 }
2443 return 0;
2401} 2444}
2402 2445
2403static const struct net_device_ops vxlan_netdev_ops = { 2446static const struct net_device_ops vxlan_netdev_ops = {
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index e92aaf615901..89541cc90e87 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1075,11 +1075,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1075 1075
1076 used = pvc_is_used(pvc); 1076 used = pvc_is_used(pvc);
1077 1077
1078 if (type == ARPHRD_ETHER) { 1078 if (type == ARPHRD_ETHER)
1079 dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN, 1079 dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN,
1080 ether_setup); 1080 ether_setup);
1081 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1081 else
1082 } else
1083 dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup); 1082 dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup);
1084 1083
1085 if (!dev) { 1084 if (!dev) {
@@ -1088,9 +1087,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1088 return -ENOBUFS; 1087 return -ENOBUFS;
1089 } 1088 }
1090 1089
1091 if (type == ARPHRD_ETHER) 1090 if (type == ARPHRD_ETHER) {
1091 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1092 eth_hw_addr_random(dev); 1092 eth_hw_addr_random(dev);
1093 else { 1093 } else {
1094 *(__be16*)dev->dev_addr = htons(dlci); 1094 *(__be16*)dev->dev_addr = htons(dlci);
1095 dlci_to_q922(dev->broadcast, dlci); 1095 dlci_to_q922(dev->broadcast, dlci);
1096 } 1096 }
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 5c47b011a9d7..cd39025d2abf 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -549,16 +549,12 @@ static void x25_asy_receive_buf(struct tty_struct *tty,
549 549
550static int x25_asy_open_tty(struct tty_struct *tty) 550static int x25_asy_open_tty(struct tty_struct *tty)
551{ 551{
552 struct x25_asy *sl = tty->disc_data; 552 struct x25_asy *sl;
553 int err; 553 int err;
554 554
555 if (tty->ops->write == NULL) 555 if (tty->ops->write == NULL)
556 return -EOPNOTSUPP; 556 return -EOPNOTSUPP;
557 557
558 /* First make sure we're not already connected. */
559 if (sl && sl->magic == X25_ASY_MAGIC)
560 return -EEXIST;
561
562 /* OK. Find a free X.25 channel to use. */ 558 /* OK. Find a free X.25 channel to use. */
563 sl = x25_asy_alloc(); 559 sl = x25_asy_alloc();
564 if (sl == NULL) 560 if (sl == NULL)
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index aa9bd92ac4ed..0947cc271e69 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -51,6 +51,7 @@ MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath");
51static const struct ath10k_hw_params ath10k_hw_params_list[] = { 51static const struct ath10k_hw_params ath10k_hw_params_list[] = {
52 { 52 {
53 .id = QCA988X_HW_2_0_VERSION, 53 .id = QCA988X_HW_2_0_VERSION,
54 .dev_id = QCA988X_2_0_DEVICE_ID,
54 .name = "qca988x hw2.0", 55 .name = "qca988x hw2.0",
55 .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, 56 .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
56 .uart_pin = 7, 57 .uart_pin = 7,
@@ -69,6 +70,25 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
69 }, 70 },
70 { 71 {
71 .id = QCA6174_HW_2_1_VERSION, 72 .id = QCA6174_HW_2_1_VERSION,
73 .dev_id = QCA6164_2_1_DEVICE_ID,
74 .name = "qca6164 hw2.1",
75 .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
76 .uart_pin = 6,
77 .otp_exe_param = 0,
78 .channel_counters_freq_hz = 88000,
79 .max_probe_resp_desc_thres = 0,
80 .fw = {
81 .dir = QCA6174_HW_2_1_FW_DIR,
82 .fw = QCA6174_HW_2_1_FW_FILE,
83 .otp = QCA6174_HW_2_1_OTP_FILE,
84 .board = QCA6174_HW_2_1_BOARD_DATA_FILE,
85 .board_size = QCA6174_BOARD_DATA_SZ,
86 .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
87 },
88 },
89 {
90 .id = QCA6174_HW_2_1_VERSION,
91 .dev_id = QCA6174_2_1_DEVICE_ID,
72 .name = "qca6174 hw2.1", 92 .name = "qca6174 hw2.1",
73 .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, 93 .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
74 .uart_pin = 6, 94 .uart_pin = 6,
@@ -86,6 +106,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
86 }, 106 },
87 { 107 {
88 .id = QCA6174_HW_3_0_VERSION, 108 .id = QCA6174_HW_3_0_VERSION,
109 .dev_id = QCA6174_2_1_DEVICE_ID,
89 .name = "qca6174 hw3.0", 110 .name = "qca6174 hw3.0",
90 .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, 111 .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
91 .uart_pin = 6, 112 .uart_pin = 6,
@@ -103,6 +124,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
103 }, 124 },
104 { 125 {
105 .id = QCA6174_HW_3_2_VERSION, 126 .id = QCA6174_HW_3_2_VERSION,
127 .dev_id = QCA6174_2_1_DEVICE_ID,
106 .name = "qca6174 hw3.2", 128 .name = "qca6174 hw3.2",
107 .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, 129 .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
108 .uart_pin = 6, 130 .uart_pin = 6,
@@ -121,6 +143,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
121 }, 143 },
122 { 144 {
123 .id = QCA99X0_HW_2_0_DEV_VERSION, 145 .id = QCA99X0_HW_2_0_DEV_VERSION,
146 .dev_id = QCA99X0_2_0_DEVICE_ID,
124 .name = "qca99x0 hw2.0", 147 .name = "qca99x0 hw2.0",
125 .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR, 148 .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
126 .uart_pin = 7, 149 .uart_pin = 7,
@@ -139,10 +162,31 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
139 }, 162 },
140 { 163 {
141 .id = QCA9377_HW_1_0_DEV_VERSION, 164 .id = QCA9377_HW_1_0_DEV_VERSION,
165 .dev_id = QCA9377_1_0_DEVICE_ID,
142 .name = "qca9377 hw1.0", 166 .name = "qca9377 hw1.0",
143 .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, 167 .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
144 .uart_pin = 7, 168 .uart_pin = 6,
145 .otp_exe_param = 0, 169 .otp_exe_param = 0,
170 .channel_counters_freq_hz = 88000,
171 .max_probe_resp_desc_thres = 0,
172 .fw = {
173 .dir = QCA9377_HW_1_0_FW_DIR,
174 .fw = QCA9377_HW_1_0_FW_FILE,
175 .otp = QCA9377_HW_1_0_OTP_FILE,
176 .board = QCA9377_HW_1_0_BOARD_DATA_FILE,
177 .board_size = QCA9377_BOARD_DATA_SZ,
178 .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
179 },
180 },
181 {
182 .id = QCA9377_HW_1_1_DEV_VERSION,
183 .dev_id = QCA9377_1_0_DEVICE_ID,
184 .name = "qca9377 hw1.1",
185 .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
186 .uart_pin = 6,
187 .otp_exe_param = 0,
188 .channel_counters_freq_hz = 88000,
189 .max_probe_resp_desc_thres = 0,
146 .fw = { 190 .fw = {
147 .dir = QCA9377_HW_1_0_FW_DIR, 191 .dir = QCA9377_HW_1_0_FW_DIR,
148 .fw = QCA9377_HW_1_0_FW_FILE, 192 .fw = QCA9377_HW_1_0_FW_FILE,
@@ -1263,7 +1307,8 @@ static int ath10k_init_hw_params(struct ath10k *ar)
1263 for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) { 1307 for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) {
1264 hw_params = &ath10k_hw_params_list[i]; 1308 hw_params = &ath10k_hw_params_list[i];
1265 1309
1266 if (hw_params->id == ar->target_version) 1310 if (hw_params->id == ar->target_version &&
1311 hw_params->dev_id == ar->dev_id)
1267 break; 1312 break;
1268 } 1313 }
1269 1314
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 018c64f4fd25..858d75f49a9f 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -636,6 +636,7 @@ struct ath10k {
636 636
637 struct ath10k_hw_params { 637 struct ath10k_hw_params {
638 u32 id; 638 u32 id;
639 u16 dev_id;
639 const char *name; 640 const char *name;
640 u32 patch_load_addr; 641 u32 patch_load_addr;
641 int uart_pin; 642 int uart_pin;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 39966a05c1cc..713c2bcea178 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -22,6 +22,12 @@
22 22
23#define ATH10K_FW_DIR "ath10k" 23#define ATH10K_FW_DIR "ath10k"
24 24
25#define QCA988X_2_0_DEVICE_ID (0x003c)
26#define QCA6164_2_1_DEVICE_ID (0x0041)
27#define QCA6174_2_1_DEVICE_ID (0x003e)
28#define QCA99X0_2_0_DEVICE_ID (0x0040)
29#define QCA9377_1_0_DEVICE_ID (0x0042)
30
25/* QCA988X 1.0 definitions (unsupported) */ 31/* QCA988X 1.0 definitions (unsupported) */
26#define QCA988X_HW_1_0_CHIP_ID_REV 0x0 32#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
27 33
@@ -42,6 +48,10 @@
42#define QCA6174_HW_3_0_VERSION 0x05020000 48#define QCA6174_HW_3_0_VERSION 0x05020000
43#define QCA6174_HW_3_2_VERSION 0x05030000 49#define QCA6174_HW_3_2_VERSION 0x05030000
44 50
51/* QCA9377 target BMI version signatures */
52#define QCA9377_HW_1_0_DEV_VERSION 0x05020000
53#define QCA9377_HW_1_1_DEV_VERSION 0x05020001
54
45enum qca6174_pci_rev { 55enum qca6174_pci_rev {
46 QCA6174_PCI_REV_1_1 = 0x11, 56 QCA6174_PCI_REV_1_1 = 0x11,
47 QCA6174_PCI_REV_1_3 = 0x13, 57 QCA6174_PCI_REV_1_3 = 0x13,
@@ -60,6 +70,11 @@ enum qca6174_chip_id_rev {
60 QCA6174_HW_3_2_CHIP_ID_REV = 10, 70 QCA6174_HW_3_2_CHIP_ID_REV = 10,
61}; 71};
62 72
73enum qca9377_chip_id_rev {
74 QCA9377_HW_1_0_CHIP_ID_REV = 0x0,
75 QCA9377_HW_1_1_CHIP_ID_REV = 0x1,
76};
77
63#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1" 78#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1"
64#define QCA6174_HW_2_1_FW_FILE "firmware.bin" 79#define QCA6174_HW_2_1_FW_FILE "firmware.bin"
65#define QCA6174_HW_2_1_OTP_FILE "otp.bin" 80#define QCA6174_HW_2_1_OTP_FILE "otp.bin"
@@ -85,8 +100,6 @@ enum qca6174_chip_id_rev {
85#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234 100#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
86 101
87/* QCA9377 1.0 definitions */ 102/* QCA9377 1.0 definitions */
88#define QCA9377_HW_1_0_DEV_VERSION 0x05020001
89#define QCA9377_HW_1_0_CHIP_ID_REV 0x1
90#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0" 103#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0"
91#define QCA9377_HW_1_0_FW_FILE "firmware.bin" 104#define QCA9377_HW_1_0_FW_FILE "firmware.bin"
92#define QCA9377_HW_1_0_OTP_FILE "otp.bin" 105#define QCA9377_HW_1_0_OTP_FILE "otp.bin"
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index a7411fe90cc4..95a55405ebf0 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -4225,7 +4225,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
4225 4225
4226static u32 get_nss_from_chainmask(u16 chain_mask) 4226static u32 get_nss_from_chainmask(u16 chain_mask)
4227{ 4227{
4228 if ((chain_mask & 0x15) == 0x15) 4228 if ((chain_mask & 0xf) == 0xf)
4229 return 4; 4229 return 4;
4230 else if ((chain_mask & 0x7) == 0x7) 4230 else if ((chain_mask & 0x7) == 0x7)
4231 return 3; 4231 return 3;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 3fca200b986c..930785a724e1 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -57,12 +57,6 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
57#define ATH10K_PCI_TARGET_WAIT 3000 57#define ATH10K_PCI_TARGET_WAIT 3000
58#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 58#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
59 59
60#define QCA988X_2_0_DEVICE_ID (0x003c)
61#define QCA6164_2_1_DEVICE_ID (0x0041)
62#define QCA6174_2_1_DEVICE_ID (0x003e)
63#define QCA99X0_2_0_DEVICE_ID (0x0040)
64#define QCA9377_1_0_DEVICE_ID (0x0042)
65
66static const struct pci_device_id ath10k_pci_id_table[] = { 60static const struct pci_device_id ath10k_pci_id_table[] = {
67 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ 61 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
68 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ 62 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
@@ -92,7 +86,9 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
92 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, 86 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
93 87
94 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, 88 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
89
95 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, 90 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
91 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
96}; 92};
97 93
98static void ath10k_pci_buffer_cleanup(struct ath10k *ar); 94static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
@@ -111,8 +107,9 @@ static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
111static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); 107static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
112static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); 108static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
113static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); 109static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
110static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
114 111
115static const struct ce_attr host_ce_config_wlan[] = { 112static struct ce_attr host_ce_config_wlan[] = {
116 /* CE0: host->target HTC control and raw streams */ 113 /* CE0: host->target HTC control and raw streams */
117 { 114 {
118 .flags = CE_ATTR_FLAGS, 115 .flags = CE_ATTR_FLAGS,
@@ -128,7 +125,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
128 .src_nentries = 0, 125 .src_nentries = 0,
129 .src_sz_max = 2048, 126 .src_sz_max = 2048,
130 .dest_nentries = 512, 127 .dest_nentries = 512,
131 .recv_cb = ath10k_pci_htc_rx_cb, 128 .recv_cb = ath10k_pci_htt_htc_rx_cb,
132 }, 129 },
133 130
134 /* CE2: target->host WMI */ 131 /* CE2: target->host WMI */
@@ -217,7 +214,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
217}; 214};
218 215
219/* Target firmware's Copy Engine configuration. */ 216/* Target firmware's Copy Engine configuration. */
220static const struct ce_pipe_config target_ce_config_wlan[] = { 217static struct ce_pipe_config target_ce_config_wlan[] = {
221 /* CE0: host->target HTC control and raw streams */ 218 /* CE0: host->target HTC control and raw streams */
222 { 219 {
223 .pipenum = __cpu_to_le32(0), 220 .pipenum = __cpu_to_le32(0),
@@ -330,7 +327,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
330 * This table is derived from the CE_PCI TABLE, above. 327 * This table is derived from the CE_PCI TABLE, above.
331 * It is passed to the Target at startup for use by firmware. 328 * It is passed to the Target at startup for use by firmware.
332 */ 329 */
333static const struct service_to_pipe target_service_to_ce_map_wlan[] = { 330static struct service_to_pipe target_service_to_ce_map_wlan[] = {
334 { 331 {
335 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), 332 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
336 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 333 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
@@ -1208,6 +1205,16 @@ static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1208 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); 1205 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1209} 1206}
1210 1207
1208static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1209{
1210 /* CE4 polling needs to be done whenever CE pipe which transports
1211 * HTT Rx (target->host) is processed.
1212 */
1213 ath10k_ce_per_engine_service(ce_state->ar, 4);
1214
1215 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1216}
1217
1211/* Called by lower (CE) layer when a send to HTT Target completes. */ 1218/* Called by lower (CE) layer when a send to HTT Target completes. */
1212static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) 1219static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1213{ 1220{
@@ -2027,6 +2034,29 @@ static int ath10k_pci_init_config(struct ath10k *ar)
2027 return 0; 2034 return 0;
2028} 2035}
2029 2036
2037static void ath10k_pci_override_ce_config(struct ath10k *ar)
2038{
2039 struct ce_attr *attr;
2040 struct ce_pipe_config *config;
2041
2042 /* For QCA6174 we're overriding the Copy Engine 5 configuration,
2043 * since it is currently used for other feature.
2044 */
2045
2046 /* Override Host's Copy Engine 5 configuration */
2047 attr = &host_ce_config_wlan[5];
2048 attr->src_sz_max = 0;
2049 attr->dest_nentries = 0;
2050
2051 /* Override Target firmware's Copy Engine configuration */
2052 config = &target_ce_config_wlan[5];
2053 config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2054 config->nbytes_max = __cpu_to_le32(2048);
2055
2056 /* Map from service/endpoint to Copy Engine */
2057 target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
2058}
2059
2030static int ath10k_pci_alloc_pipes(struct ath10k *ar) 2060static int ath10k_pci_alloc_pipes(struct ath10k *ar)
2031{ 2061{
2032 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2062 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -3020,6 +3050,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
3020 goto err_core_destroy; 3050 goto err_core_destroy;
3021 } 3051 }
3022 3052
3053 if (QCA_REV_6174(ar))
3054 ath10k_pci_override_ce_config(ar);
3055
3023 ret = ath10k_pci_alloc_pipes(ar); 3056 ret = ath10k_pci_alloc_pipes(ar);
3024 if (ret) { 3057 if (ret) {
3025 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", 3058 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 1a73c7a1da77..d9a4aee246a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -70,12 +70,18 @@
70 70
71/* Highest firmware API version supported */ 71/* Highest firmware API version supported */
72#define IWL7260_UCODE_API_MAX 17 72#define IWL7260_UCODE_API_MAX 17
73#define IWL7265_UCODE_API_MAX 19
74#define IWL7265D_UCODE_API_MAX 19
73 75
74/* Oldest version we won't warn about */ 76/* Oldest version we won't warn about */
75#define IWL7260_UCODE_API_OK 13 77#define IWL7260_UCODE_API_OK 13
78#define IWL7265_UCODE_API_OK 13
79#define IWL7265D_UCODE_API_OK 13
76 80
77/* Lowest firmware API version supported */ 81/* Lowest firmware API version supported */
78#define IWL7260_UCODE_API_MIN 13 82#define IWL7260_UCODE_API_MIN 13
83#define IWL7265_UCODE_API_MIN 13
84#define IWL7265D_UCODE_API_MIN 13
79 85
80/* NVM versions */ 86/* NVM versions */
81#define IWL7260_NVM_VERSION 0x0a1d 87#define IWL7260_NVM_VERSION 0x0a1d
@@ -149,10 +155,7 @@ static const struct iwl_ht_params iwl7000_ht_params = {
149 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), 155 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
150}; 156};
151 157
152#define IWL_DEVICE_7000 \ 158#define IWL_DEVICE_7000_COMMON \
153 .ucode_api_max = IWL7260_UCODE_API_MAX, \
154 .ucode_api_ok = IWL7260_UCODE_API_OK, \
155 .ucode_api_min = IWL7260_UCODE_API_MIN, \
156 .device_family = IWL_DEVICE_FAMILY_7000, \ 159 .device_family = IWL_DEVICE_FAMILY_7000, \
157 .max_inst_size = IWL60_RTC_INST_SIZE, \ 160 .max_inst_size = IWL60_RTC_INST_SIZE, \
158 .max_data_size = IWL60_RTC_DATA_SIZE, \ 161 .max_data_size = IWL60_RTC_DATA_SIZE, \
@@ -163,6 +166,24 @@ static const struct iwl_ht_params iwl7000_ht_params = {
163 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ 166 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
164 .dccm_offset = IWL7000_DCCM_OFFSET 167 .dccm_offset = IWL7000_DCCM_OFFSET
165 168
169#define IWL_DEVICE_7000 \
170 IWL_DEVICE_7000_COMMON, \
171 .ucode_api_max = IWL7260_UCODE_API_MAX, \
172 .ucode_api_ok = IWL7260_UCODE_API_OK, \
173 .ucode_api_min = IWL7260_UCODE_API_MIN
174
175#define IWL_DEVICE_7005 \
176 IWL_DEVICE_7000_COMMON, \
177 .ucode_api_max = IWL7265_UCODE_API_MAX, \
178 .ucode_api_ok = IWL7265_UCODE_API_OK, \
179 .ucode_api_min = IWL7265_UCODE_API_MIN
180
181#define IWL_DEVICE_7005D \
182 IWL_DEVICE_7000_COMMON, \
183 .ucode_api_max = IWL7265D_UCODE_API_MAX, \
184 .ucode_api_ok = IWL7265D_UCODE_API_OK, \
185 .ucode_api_min = IWL7265D_UCODE_API_MIN
186
166const struct iwl_cfg iwl7260_2ac_cfg = { 187const struct iwl_cfg iwl7260_2ac_cfg = {
167 .name = "Intel(R) Dual Band Wireless AC 7260", 188 .name = "Intel(R) Dual Band Wireless AC 7260",
168 .fw_name_pre = IWL7260_FW_PRE, 189 .fw_name_pre = IWL7260_FW_PRE,
@@ -266,7 +287,7 @@ static const struct iwl_ht_params iwl7265_ht_params = {
266const struct iwl_cfg iwl3165_2ac_cfg = { 287const struct iwl_cfg iwl3165_2ac_cfg = {
267 .name = "Intel(R) Dual Band Wireless AC 3165", 288 .name = "Intel(R) Dual Band Wireless AC 3165",
268 .fw_name_pre = IWL7265D_FW_PRE, 289 .fw_name_pre = IWL7265D_FW_PRE,
269 IWL_DEVICE_7000, 290 IWL_DEVICE_7005D,
270 .ht_params = &iwl7000_ht_params, 291 .ht_params = &iwl7000_ht_params,
271 .nvm_ver = IWL3165_NVM_VERSION, 292 .nvm_ver = IWL3165_NVM_VERSION,
272 .nvm_calib_ver = IWL3165_TX_POWER_VERSION, 293 .nvm_calib_ver = IWL3165_TX_POWER_VERSION,
@@ -277,7 +298,7 @@ const struct iwl_cfg iwl3165_2ac_cfg = {
277const struct iwl_cfg iwl7265_2ac_cfg = { 298const struct iwl_cfg iwl7265_2ac_cfg = {
278 .name = "Intel(R) Dual Band Wireless AC 7265", 299 .name = "Intel(R) Dual Band Wireless AC 7265",
279 .fw_name_pre = IWL7265_FW_PRE, 300 .fw_name_pre = IWL7265_FW_PRE,
280 IWL_DEVICE_7000, 301 IWL_DEVICE_7005,
281 .ht_params = &iwl7265_ht_params, 302 .ht_params = &iwl7265_ht_params,
282 .nvm_ver = IWL7265_NVM_VERSION, 303 .nvm_ver = IWL7265_NVM_VERSION,
283 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, 304 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
@@ -288,7 +309,7 @@ const struct iwl_cfg iwl7265_2ac_cfg = {
288const struct iwl_cfg iwl7265_2n_cfg = { 309const struct iwl_cfg iwl7265_2n_cfg = {
289 .name = "Intel(R) Dual Band Wireless N 7265", 310 .name = "Intel(R) Dual Band Wireless N 7265",
290 .fw_name_pre = IWL7265_FW_PRE, 311 .fw_name_pre = IWL7265_FW_PRE,
291 IWL_DEVICE_7000, 312 IWL_DEVICE_7005,
292 .ht_params = &iwl7265_ht_params, 313 .ht_params = &iwl7265_ht_params,
293 .nvm_ver = IWL7265_NVM_VERSION, 314 .nvm_ver = IWL7265_NVM_VERSION,
294 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, 315 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
@@ -299,7 +320,7 @@ const struct iwl_cfg iwl7265_2n_cfg = {
299const struct iwl_cfg iwl7265_n_cfg = { 320const struct iwl_cfg iwl7265_n_cfg = {
300 .name = "Intel(R) Wireless N 7265", 321 .name = "Intel(R) Wireless N 7265",
301 .fw_name_pre = IWL7265_FW_PRE, 322 .fw_name_pre = IWL7265_FW_PRE,
302 IWL_DEVICE_7000, 323 IWL_DEVICE_7005,
303 .ht_params = &iwl7265_ht_params, 324 .ht_params = &iwl7265_ht_params,
304 .nvm_ver = IWL7265_NVM_VERSION, 325 .nvm_ver = IWL7265_NVM_VERSION,
305 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, 326 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
@@ -310,7 +331,7 @@ const struct iwl_cfg iwl7265_n_cfg = {
310const struct iwl_cfg iwl7265d_2ac_cfg = { 331const struct iwl_cfg iwl7265d_2ac_cfg = {
311 .name = "Intel(R) Dual Band Wireless AC 7265", 332 .name = "Intel(R) Dual Band Wireless AC 7265",
312 .fw_name_pre = IWL7265D_FW_PRE, 333 .fw_name_pre = IWL7265D_FW_PRE,
313 IWL_DEVICE_7000, 334 IWL_DEVICE_7005D,
314 .ht_params = &iwl7265_ht_params, 335 .ht_params = &iwl7265_ht_params,
315 .nvm_ver = IWL7265D_NVM_VERSION, 336 .nvm_ver = IWL7265D_NVM_VERSION,
316 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, 337 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
@@ -321,7 +342,7 @@ const struct iwl_cfg iwl7265d_2ac_cfg = {
321const struct iwl_cfg iwl7265d_2n_cfg = { 342const struct iwl_cfg iwl7265d_2n_cfg = {
322 .name = "Intel(R) Dual Band Wireless N 7265", 343 .name = "Intel(R) Dual Band Wireless N 7265",
323 .fw_name_pre = IWL7265D_FW_PRE, 344 .fw_name_pre = IWL7265D_FW_PRE,
324 IWL_DEVICE_7000, 345 IWL_DEVICE_7005D,
325 .ht_params = &iwl7265_ht_params, 346 .ht_params = &iwl7265_ht_params,
326 .nvm_ver = IWL7265D_NVM_VERSION, 347 .nvm_ver = IWL7265D_NVM_VERSION,
327 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, 348 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
@@ -332,7 +353,7 @@ const struct iwl_cfg iwl7265d_2n_cfg = {
332const struct iwl_cfg iwl7265d_n_cfg = { 353const struct iwl_cfg iwl7265d_n_cfg = {
333 .name = "Intel(R) Wireless N 7265", 354 .name = "Intel(R) Wireless N 7265",
334 .fw_name_pre = IWL7265D_FW_PRE, 355 .fw_name_pre = IWL7265D_FW_PRE,
335 IWL_DEVICE_7000, 356 IWL_DEVICE_7005D,
336 .ht_params = &iwl7265_ht_params, 357 .ht_params = &iwl7265_ht_params,
337 .nvm_ver = IWL7265D_NVM_VERSION, 358 .nvm_ver = IWL7265D_NVM_VERSION,
338 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, 359 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
@@ -342,5 +363,5 @@ const struct iwl_cfg iwl7265d_n_cfg = {
342 363
343MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 364MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
344MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 365MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
345MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 366MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK));
346MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 367MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index 0116e5a4c393..9bcc0bf937d8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -69,7 +69,7 @@
69#include "iwl-agn-hw.h" 69#include "iwl-agn-hw.h"
70 70
71/* Highest firmware API version supported */ 71/* Highest firmware API version supported */
72#define IWL8000_UCODE_API_MAX 17 72#define IWL8000_UCODE_API_MAX 19
73 73
74/* Oldest version we won't warn about */ 74/* Oldest version we won't warn about */
75#define IWL8000_UCODE_API_OK 13 75#define IWL8000_UCODE_API_OK 13
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 85ae902df7c0..29ae58ebf223 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -309,9 +309,9 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
309 * to transmit packets to the AP, i.e. the PTK. 309 * to transmit packets to the AP, i.e. the PTK.
310 */ 310 */
311 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { 311 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
312 key->hw_key_idx = 0;
313 mvm->ptk_ivlen = key->iv_len; 312 mvm->ptk_ivlen = key->iv_len;
314 mvm->ptk_icvlen = key->icv_len; 313 mvm->ptk_icvlen = key->icv_len;
314 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0);
315 } else { 315 } else {
316 /* 316 /*
317 * firmware only supports TSC/RSC for a single key, 317 * firmware only supports TSC/RSC for a single key,
@@ -319,12 +319,11 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
319 * with new ones -- this relies on mac80211 doing 319 * with new ones -- this relies on mac80211 doing
320 * list_add_tail(). 320 * list_add_tail().
321 */ 321 */
322 key->hw_key_idx = 1;
323 mvm->gtk_ivlen = key->iv_len; 322 mvm->gtk_ivlen = key->iv_len;
324 mvm->gtk_icvlen = key->icv_len; 323 mvm->gtk_icvlen = key->icv_len;
324 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1);
325 } 325 }
326 326
327 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true);
328 data->error = ret != 0; 327 data->error = ret != 0;
329out_unlock: 328out_unlock:
330 mutex_unlock(&mvm->mutex); 329 mutex_unlock(&mvm->mutex);
@@ -772,9 +771,6 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
772 */ 771 */
773 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 772 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
774 773
775 /* We reprogram keys and shouldn't allocate new key indices */
776 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
777
778 mvm->ptk_ivlen = 0; 774 mvm->ptk_ivlen = 0;
779 mvm->ptk_icvlen = 0; 775 mvm->ptk_icvlen = 0;
780 mvm->ptk_ivlen = 0; 776 mvm->ptk_ivlen = 0;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 1fb684693040..e88afac51c5d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -2941,6 +2941,7 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
2941{ 2941{
2942 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2942 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2943 int ret; 2943 int ret;
2944 u8 key_offset;
2944 2945
2945 if (iwlwifi_mod_params.sw_crypto) { 2946 if (iwlwifi_mod_params.sw_crypto) {
2946 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n"); 2947 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
@@ -3006,10 +3007,14 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
3006 break; 3007 break;
3007 } 3008 }
3008 3009
3010 /* in HW restart reuse the index, otherwise request a new one */
3011 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
3012 key_offset = key->hw_key_idx;
3013 else
3014 key_offset = STA_KEY_IDX_INVALID;
3015
3009 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); 3016 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
3010 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 3017 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
3011 test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
3012 &mvm->status));
3013 if (ret) { 3018 if (ret) {
3014 IWL_WARN(mvm, "set key failed\n"); 3019 IWL_WARN(mvm, "set key failed\n");
3015 /* 3020 /*
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 300a249486e4..2b976b110207 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -1201,7 +1201,8 @@ static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
1201 return max_offs; 1201 return max_offs;
1202} 1202}
1203 1203
1204static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, 1204static u8 iwl_mvm_get_key_sta_id(struct iwl_mvm *mvm,
1205 struct ieee80211_vif *vif,
1205 struct ieee80211_sta *sta) 1206 struct ieee80211_sta *sta)
1206{ 1207{
1207 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1208 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -1218,8 +1219,21 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
1218 * station ID, then use AP's station ID. 1219 * station ID, then use AP's station ID.
1219 */ 1220 */
1220 if (vif->type == NL80211_IFTYPE_STATION && 1221 if (vif->type == NL80211_IFTYPE_STATION &&
1221 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) 1222 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
1222 return mvmvif->ap_sta_id; 1223 u8 sta_id = mvmvif->ap_sta_id;
1224
1225 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
1226 lockdep_is_held(&mvm->mutex));
1227 /*
1228 * It is possible that the 'sta' parameter is NULL,
1229 * for example when a GTK is removed - the sta_id will then
1230 * be the AP ID, and no station was passed by mac80211.
1231 */
1232 if (IS_ERR_OR_NULL(sta))
1233 return IWL_MVM_STATION_COUNT;
1234
1235 return sta_id;
1236 }
1223 1237
1224 return IWL_MVM_STATION_COUNT; 1238 return IWL_MVM_STATION_COUNT;
1225} 1239}
@@ -1227,7 +1241,8 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
1227static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, 1241static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
1228 struct iwl_mvm_sta *mvm_sta, 1242 struct iwl_mvm_sta *mvm_sta,
1229 struct ieee80211_key_conf *keyconf, bool mcast, 1243 struct ieee80211_key_conf *keyconf, bool mcast,
1230 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags) 1244 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
1245 u8 key_offset)
1231{ 1246{
1232 struct iwl_mvm_add_sta_key_cmd cmd = {}; 1247 struct iwl_mvm_add_sta_key_cmd cmd = {};
1233 __le16 key_flags; 1248 __le16 key_flags;
@@ -1269,7 +1284,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
1269 if (mcast) 1284 if (mcast)
1270 key_flags |= cpu_to_le16(STA_KEY_MULTICAST); 1285 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
1271 1286
1272 cmd.key_offset = keyconf->hw_key_idx; 1287 cmd.key_offset = key_offset;
1273 cmd.key_flags = key_flags; 1288 cmd.key_flags = key_flags;
1274 cmd.sta_id = sta_id; 1289 cmd.sta_id = sta_id;
1275 1290
@@ -1360,6 +1375,7 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1360 struct ieee80211_vif *vif, 1375 struct ieee80211_vif *vif,
1361 struct ieee80211_sta *sta, 1376 struct ieee80211_sta *sta,
1362 struct ieee80211_key_conf *keyconf, 1377 struct ieee80211_key_conf *keyconf,
1378 u8 key_offset,
1363 bool mcast) 1379 bool mcast)
1364{ 1380{
1365 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1381 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
@@ -1375,17 +1391,17 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1375 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 1391 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
1376 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); 1392 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
1377 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 1393 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1378 seq.tkip.iv32, p1k, 0); 1394 seq.tkip.iv32, p1k, 0, key_offset);
1379 break; 1395 break;
1380 case WLAN_CIPHER_SUITE_CCMP: 1396 case WLAN_CIPHER_SUITE_CCMP:
1381 case WLAN_CIPHER_SUITE_WEP40: 1397 case WLAN_CIPHER_SUITE_WEP40:
1382 case WLAN_CIPHER_SUITE_WEP104: 1398 case WLAN_CIPHER_SUITE_WEP104:
1383 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 1399 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1384 0, NULL, 0); 1400 0, NULL, 0, key_offset);
1385 break; 1401 break;
1386 default: 1402 default:
1387 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 1403 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1388 0, NULL, 0); 1404 0, NULL, 0, key_offset);
1389 } 1405 }
1390 1406
1391 return ret; 1407 return ret;
@@ -1433,7 +1449,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1433 struct ieee80211_vif *vif, 1449 struct ieee80211_vif *vif,
1434 struct ieee80211_sta *sta, 1450 struct ieee80211_sta *sta,
1435 struct ieee80211_key_conf *keyconf, 1451 struct ieee80211_key_conf *keyconf,
1436 bool have_key_offset) 1452 u8 key_offset)
1437{ 1453{
1438 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 1454 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1439 u8 sta_id; 1455 u8 sta_id;
@@ -1443,7 +1459,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1443 lockdep_assert_held(&mvm->mutex); 1459 lockdep_assert_held(&mvm->mutex);
1444 1460
1445 /* Get the station id from the mvm local station table */ 1461 /* Get the station id from the mvm local station table */
1446 sta_id = iwl_mvm_get_key_sta_id(vif, sta); 1462 sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta);
1447 if (sta_id == IWL_MVM_STATION_COUNT) { 1463 if (sta_id == IWL_MVM_STATION_COUNT) {
1448 IWL_ERR(mvm, "Failed to find station id\n"); 1464 IWL_ERR(mvm, "Failed to find station id\n");
1449 return -EINVAL; 1465 return -EINVAL;
@@ -1470,18 +1486,25 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1470 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) 1486 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
1471 return -EINVAL; 1487 return -EINVAL;
1472 1488
1473 if (!have_key_offset) { 1489 /* If the key_offset is not pre-assigned, we need to find a
1474 /* 1490 * new offset to use. In normal cases, the offset is not
1475 * The D3 firmware hardcodes the PTK offset to 0, so we have to 1491 * pre-assigned, but during HW_RESTART we want to reuse the
1476 * configure it there. As a result, this workaround exists to 1492 * same indices, so we pass them when this function is called.
1477 * let the caller set the key offset (hw_key_idx), see d3.c. 1493 *
1478 */ 1494 * In D3 entry, we need to hardcoded the indices (because the
1479 keyconf->hw_key_idx = iwl_mvm_set_fw_key_idx(mvm); 1495 * firmware hardcodes the PTK offset to 0). In this case, we
1480 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) 1496 * need to make sure we don't overwrite the hw_key_idx in the
1497 * keyconf structure, because otherwise we cannot configure
1498 * the original ones back when resuming.
1499 */
1500 if (key_offset == STA_KEY_IDX_INVALID) {
1501 key_offset = iwl_mvm_set_fw_key_idx(mvm);
1502 if (key_offset == STA_KEY_IDX_INVALID)
1481 return -ENOSPC; 1503 return -ENOSPC;
1504 keyconf->hw_key_idx = key_offset;
1482 } 1505 }
1483 1506
1484 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, mcast); 1507 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
1485 if (ret) { 1508 if (ret) {
1486 __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); 1509 __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
1487 goto end; 1510 goto end;
@@ -1495,7 +1518,8 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1495 */ 1518 */
1496 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || 1519 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
1497 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) { 1520 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
1498 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, !mcast); 1521 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
1522 key_offset, !mcast);
1499 if (ret) { 1523 if (ret) {
1500 __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); 1524 __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
1501 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 1525 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
@@ -1521,7 +1545,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
1521 lockdep_assert_held(&mvm->mutex); 1545 lockdep_assert_held(&mvm->mutex);
1522 1546
1523 /* Get the station id from the mvm local station table */ 1547 /* Get the station id from the mvm local station table */
1524 sta_id = iwl_mvm_get_key_sta_id(vif, sta); 1548 sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta);
1525 1549
1526 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", 1550 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
1527 keyconf->keyidx, sta_id); 1551 keyconf->keyidx, sta_id);
@@ -1547,24 +1571,6 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
1547 return 0; 1571 return 0;
1548 } 1572 }
1549 1573
1550 /*
1551 * It is possible that the 'sta' parameter is NULL, and thus
1552 * there is a need to retrieve the sta from the local station table,
1553 * for example when a GTK is removed (where the sta_id will then be
1554 * the AP ID, and no station was passed by mac80211.)
1555 */
1556 if (!sta) {
1557 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1558 lockdep_is_held(&mvm->mutex));
1559 if (!sta) {
1560 IWL_ERR(mvm, "Invalid station id\n");
1561 return -EINVAL;
1562 }
1563 }
1564
1565 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
1566 return -EINVAL;
1567
1568 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 1574 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
1569 if (ret) 1575 if (ret)
1570 return ret; 1576 return ret;
@@ -1584,14 +1590,15 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
1584 u16 *phase1key) 1590 u16 *phase1key)
1585{ 1591{
1586 struct iwl_mvm_sta *mvm_sta; 1592 struct iwl_mvm_sta *mvm_sta;
1587 u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta); 1593 u8 sta_id;
1588 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 1594 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1589 1595
1590 if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
1591 return;
1592
1593 rcu_read_lock(); 1596 rcu_read_lock();
1594 1597
1598 sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta);
1599 if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
1600 goto unlock;
1601
1595 if (!sta) { 1602 if (!sta) {
1596 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 1603 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1597 if (WARN_ON(IS_ERR_OR_NULL(sta))) { 1604 if (WARN_ON(IS_ERR_OR_NULL(sta))) {
@@ -1602,7 +1609,9 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
1602 1609
1603 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1610 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1604 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 1611 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1605 iv32, phase1key, CMD_ASYNC); 1612 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
1613
1614 unlock:
1606 rcu_read_unlock(); 1615 rcu_read_unlock();
1607} 1616}
1608 1617
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index eedb215eba3f..0631cc0a6d3c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -365,8 +365,8 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
365int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, 365int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
366 struct ieee80211_vif *vif, 366 struct ieee80211_vif *vif,
367 struct ieee80211_sta *sta, 367 struct ieee80211_sta *sta,
368 struct ieee80211_key_conf *key, 368 struct ieee80211_key_conf *keyconf,
369 bool have_key_offset); 369 u8 key_offset);
370int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, 370int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
371 struct ieee80211_vif *vif, 371 struct ieee80211_vif *vif,
372 struct ieee80211_sta *sta, 372 struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 644b58bc5226..639761fb2bfb 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -423,14 +423,21 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
423/* 8000 Series */ 423/* 8000 Series */
424 {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, 424 {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
425 {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)}, 425 {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
426 {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)},
427 {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)},
428 {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)},
429 {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)},
426 {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)}, 430 {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)},
432 {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)},
433 {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)},
427 {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)}, 434 {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)},
428 {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)}, 435 {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)},
429 {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)}, 436 {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)},
430 {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)}, 437 {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)}, 438 {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)},
439 {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)},
432 {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, 440 {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
433 {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
434 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, 441 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
435 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, 442 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
436 {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, 443 {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
@@ -438,18 +445,28 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
438 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, 445 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
439 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, 446 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
440 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, 447 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
448 {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)},
441 {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)}, 449 {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
450 {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)},
442 {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, 451 {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
443 {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, 452 {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
453 {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)},
454 {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)},
455 {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)},
456 {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)},
444 {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)}, 457 {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)},
458 {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)},
445 {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)}, 459 {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)},
460 {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)},
446 {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, 461 {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
462 {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)},
447 {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)}, 463 {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
448 {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)}, 464 {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
449 {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)}, 465 {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)},
450 {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)}, 466 {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)},
451 {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)}, 467 {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)},
452 {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, 468 {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
469 {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
453#endif /* CONFIG_IWLMVM */ 470#endif /* CONFIG_IWLMVM */
454 471
455 {0} 472 {0}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 6e9418ed90c2..bbb789f8990b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -2272,7 +2272,7 @@ void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
2272 struct rtl_priv *rtlpriv = rtl_priv(hw); 2272 struct rtl_priv *rtlpriv = rtl_priv(hw);
2273 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 2273 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2274 2274
2275 if (!rtlpci->int_clear) 2275 if (rtlpci->int_clear)
2276 rtl8821ae_clear_interrupt(hw);/*clear it here first*/ 2276 rtl8821ae_clear_interrupt(hw);/*clear it here first*/
2277 2277
2278 rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); 2278 rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 8ee141a55bc5..142bdff4ed60 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -448,7 +448,7 @@ MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
448MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); 448MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
449MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); 449MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
450MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); 450MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
451MODULE_PARM_DESC(int_clear, "Set to 1 to disable interrupt clear before set (default 0)\n"); 451MODULE_PARM_DESC(int_clear, "Set to 0 to disable interrupt clear before set (default 1)\n");
452 452
453static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); 453static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
454 454
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index e481f3710bd3..1049c34e7d43 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -258,18 +258,18 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
258 struct netrx_pending_operations *npo) 258 struct netrx_pending_operations *npo)
259{ 259{
260 struct xenvif_rx_meta *meta; 260 struct xenvif_rx_meta *meta;
261 struct xen_netif_rx_request *req; 261 struct xen_netif_rx_request req;
262 262
263 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 263 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
264 264
265 meta = npo->meta + npo->meta_prod++; 265 meta = npo->meta + npo->meta_prod++;
266 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; 266 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
267 meta->gso_size = 0; 267 meta->gso_size = 0;
268 meta->size = 0; 268 meta->size = 0;
269 meta->id = req->id; 269 meta->id = req.id;
270 270
271 npo->copy_off = 0; 271 npo->copy_off = 0;
272 npo->copy_gref = req->gref; 272 npo->copy_gref = req.gref;
273 273
274 return meta; 274 return meta;
275} 275}
@@ -424,7 +424,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
424 struct xenvif *vif = netdev_priv(skb->dev); 424 struct xenvif *vif = netdev_priv(skb->dev);
425 int nr_frags = skb_shinfo(skb)->nr_frags; 425 int nr_frags = skb_shinfo(skb)->nr_frags;
426 int i; 426 int i;
427 struct xen_netif_rx_request *req; 427 struct xen_netif_rx_request req;
428 struct xenvif_rx_meta *meta; 428 struct xenvif_rx_meta *meta;
429 unsigned char *data; 429 unsigned char *data;
430 int head = 1; 430 int head = 1;
@@ -443,15 +443,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,
443 443
444 /* Set up a GSO prefix descriptor, if necessary */ 444 /* Set up a GSO prefix descriptor, if necessary */
445 if ((1 << gso_type) & vif->gso_prefix_mask) { 445 if ((1 << gso_type) & vif->gso_prefix_mask) {
446 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 446 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
447 meta = npo->meta + npo->meta_prod++; 447 meta = npo->meta + npo->meta_prod++;
448 meta->gso_type = gso_type; 448 meta->gso_type = gso_type;
449 meta->gso_size = skb_shinfo(skb)->gso_size; 449 meta->gso_size = skb_shinfo(skb)->gso_size;
450 meta->size = 0; 450 meta->size = 0;
451 meta->id = req->id; 451 meta->id = req.id;
452 } 452 }
453 453
454 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 454 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
455 meta = npo->meta + npo->meta_prod++; 455 meta = npo->meta + npo->meta_prod++;
456 456
457 if ((1 << gso_type) & vif->gso_mask) { 457 if ((1 << gso_type) & vif->gso_mask) {
@@ -463,9 +463,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
463 } 463 }
464 464
465 meta->size = 0; 465 meta->size = 0;
466 meta->id = req->id; 466 meta->id = req.id;
467 npo->copy_off = 0; 467 npo->copy_off = 0;
468 npo->copy_gref = req->gref; 468 npo->copy_gref = req.gref;
469 469
470 data = skb->data; 470 data = skb->data;
471 while (data < skb_tail_pointer(skb)) { 471 while (data < skb_tail_pointer(skb)) {
@@ -679,9 +679,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
679 * Allow a burst big enough to transmit a jumbo packet of up to 128kB. 679 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
680 * Otherwise the interface can seize up due to insufficient credit. 680 * Otherwise the interface can seize up due to insufficient credit.
681 */ 681 */
682 max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size; 682 max_burst = max(131072UL, queue->credit_bytes);
683 max_burst = min(max_burst, 131072UL);
684 max_burst = max(max_burst, queue->credit_bytes);
685 683
686 /* Take care that adding a new chunk of credit doesn't wrap to zero. */ 684 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
687 max_credit = queue->remaining_credit + queue->credit_bytes; 685 max_credit = queue->remaining_credit + queue->credit_bytes;
@@ -711,7 +709,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
711 spin_unlock_irqrestore(&queue->response_lock, flags); 709 spin_unlock_irqrestore(&queue->response_lock, flags);
712 if (cons == end) 710 if (cons == end)
713 break; 711 break;
714 txp = RING_GET_REQUEST(&queue->tx, cons++); 712 RING_COPY_REQUEST(&queue->tx, cons++, txp);
715 } while (1); 713 } while (1);
716 queue->tx.req_cons = cons; 714 queue->tx.req_cons = cons;
717} 715}
@@ -778,8 +776,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
778 if (drop_err) 776 if (drop_err)
779 txp = &dropped_tx; 777 txp = &dropped_tx;
780 778
781 memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots), 779 RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
782 sizeof(*txp));
783 780
784 /* If the guest submitted a frame >= 64 KiB then 781 /* If the guest submitted a frame >= 64 KiB then
785 * first->size overflowed and following slots will 782 * first->size overflowed and following slots will
@@ -1112,8 +1109,7 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
1112 return -EBADR; 1109 return -EBADR;
1113 } 1110 }
1114 1111
1115 memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons), 1112 RING_COPY_REQUEST(&queue->tx, cons, &extra);
1116 sizeof(extra));
1117 if (unlikely(!extra.type || 1113 if (unlikely(!extra.type ||
1118 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1114 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1119 queue->tx.req_cons = ++cons; 1115 queue->tx.req_cons = ++cons;
@@ -1322,7 +1318,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1322 1318
1323 idx = queue->tx.req_cons; 1319 idx = queue->tx.req_cons;
1324 rmb(); /* Ensure that we see the request before we copy it. */ 1320 rmb(); /* Ensure that we see the request before we copy it. */
1325 memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq)); 1321 RING_COPY_REQUEST(&queue->tx, idx, &txreq);
1326 1322
1327 /* Credit-based scheduling. */ 1323 /* Credit-based scheduling. */
1328 if (txreq.size > queue->remaining_credit && 1324 if (txreq.size > queue->remaining_credit &&
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index 219dc206fa5f..a5fe23952586 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -1,4 +1,5 @@
1 1
2obj-$(CONFIG_BLK_DEV_NVME) += nvme.o 2obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
3 3
4nvme-y += pci.o scsi.o lightnvm.o 4lightnvm-$(CONFIG_NVM) := lightnvm.o
5nvme-y += pci.o scsi.o $(lightnvm-y)
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index e0b7b95813bc..15f2acb4d5cd 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -22,8 +22,6 @@
22 22
23#include "nvme.h" 23#include "nvme.h"
24 24
25#ifdef CONFIG_NVM
26
27#include <linux/nvme.h> 25#include <linux/nvme.h>
28#include <linux/bitops.h> 26#include <linux/bitops.h>
29#include <linux/lightnvm.h> 27#include <linux/lightnvm.h>
@@ -93,7 +91,7 @@ struct nvme_nvm_l2ptbl {
93 __le16 cdw14[6]; 91 __le16 cdw14[6];
94}; 92};
95 93
96struct nvme_nvm_bbtbl { 94struct nvme_nvm_getbbtbl {
97 __u8 opcode; 95 __u8 opcode;
98 __u8 flags; 96 __u8 flags;
99 __u16 command_id; 97 __u16 command_id;
@@ -101,10 +99,23 @@ struct nvme_nvm_bbtbl {
101 __u64 rsvd[2]; 99 __u64 rsvd[2];
102 __le64 prp1; 100 __le64 prp1;
103 __le64 prp2; 101 __le64 prp2;
104 __le32 prp1_len; 102 __le64 spba;
105 __le32 prp2_len; 103 __u32 rsvd4[4];
106 __le32 lbb; 104};
107 __u32 rsvd11[3]; 105
106struct nvme_nvm_setbbtbl {
107 __u8 opcode;
108 __u8 flags;
109 __u16 command_id;
110 __le32 nsid;
111 __le64 rsvd[2];
112 __le64 prp1;
113 __le64 prp2;
114 __le64 spba;
115 __le16 nlb;
116 __u8 value;
117 __u8 rsvd3;
118 __u32 rsvd4[3];
108}; 119};
109 120
110struct nvme_nvm_erase_blk { 121struct nvme_nvm_erase_blk {
@@ -129,8 +140,8 @@ struct nvme_nvm_command {
129 struct nvme_nvm_hb_rw hb_rw; 140 struct nvme_nvm_hb_rw hb_rw;
130 struct nvme_nvm_ph_rw ph_rw; 141 struct nvme_nvm_ph_rw ph_rw;
131 struct nvme_nvm_l2ptbl l2p; 142 struct nvme_nvm_l2ptbl l2p;
132 struct nvme_nvm_bbtbl get_bb; 143 struct nvme_nvm_getbbtbl get_bb;
133 struct nvme_nvm_bbtbl set_bb; 144 struct nvme_nvm_setbbtbl set_bb;
134 struct nvme_nvm_erase_blk erase; 145 struct nvme_nvm_erase_blk erase;
135 }; 146 };
136}; 147};
@@ -142,11 +153,13 @@ struct nvme_nvm_id_group {
142 __u8 num_ch; 153 __u8 num_ch;
143 __u8 num_lun; 154 __u8 num_lun;
144 __u8 num_pln; 155 __u8 num_pln;
156 __u8 rsvd1;
145 __le16 num_blk; 157 __le16 num_blk;
146 __le16 num_pg; 158 __le16 num_pg;
147 __le16 fpg_sz; 159 __le16 fpg_sz;
148 __le16 csecs; 160 __le16 csecs;
149 __le16 sos; 161 __le16 sos;
162 __le16 rsvd2;
150 __le32 trdt; 163 __le32 trdt;
151 __le32 trdm; 164 __le32 trdm;
152 __le32 tprt; 165 __le32 tprt;
@@ -154,8 +167,9 @@ struct nvme_nvm_id_group {
154 __le32 tbet; 167 __le32 tbet;
155 __le32 tbem; 168 __le32 tbem;
156 __le32 mpos; 169 __le32 mpos;
170 __le32 mccap;
157 __le16 cpar; 171 __le16 cpar;
158 __u8 reserved[913]; 172 __u8 reserved[906];
159} __packed; 173} __packed;
160 174
161struct nvme_nvm_addr_format { 175struct nvme_nvm_addr_format {
@@ -178,15 +192,28 @@ struct nvme_nvm_id {
178 __u8 ver_id; 192 __u8 ver_id;
179 __u8 vmnt; 193 __u8 vmnt;
180 __u8 cgrps; 194 __u8 cgrps;
181 __u8 res[5]; 195 __u8 res;
182 __le32 cap; 196 __le32 cap;
183 __le32 dom; 197 __le32 dom;
184 struct nvme_nvm_addr_format ppaf; 198 struct nvme_nvm_addr_format ppaf;
185 __u8 ppat; 199 __u8 resv[228];
186 __u8 resv[223];
187 struct nvme_nvm_id_group groups[4]; 200 struct nvme_nvm_id_group groups[4];
188} __packed; 201} __packed;
189 202
203struct nvme_nvm_bb_tbl {
204 __u8 tblid[4];
205 __le16 verid;
206 __le16 revid;
207 __le32 rvsd1;
208 __le32 tblks;
209 __le32 tfact;
210 __le32 tgrown;
211 __le32 tdresv;
212 __le32 thresv;
213 __le32 rsvd2[8];
214 __u8 blk[0];
215};
216
190/* 217/*
191 * Check we didn't inadvertently grow the command struct 218 * Check we didn't inadvertently grow the command struct
192 */ 219 */
@@ -195,12 +222,14 @@ static inline void _nvme_nvm_check_size(void)
195 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64); 222 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
196 BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64); 223 BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
197 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64); 224 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
198 BUILD_BUG_ON(sizeof(struct nvme_nvm_bbtbl) != 64); 225 BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
226 BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
199 BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64); 227 BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
200 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); 228 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
201 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960); 229 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
202 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128); 230 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128);
203 BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096); 231 BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
232 BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512);
204} 233}
205 234
206static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) 235static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
@@ -234,6 +263,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
234 dst->tbet = le32_to_cpu(src->tbet); 263 dst->tbet = le32_to_cpu(src->tbet);
235 dst->tbem = le32_to_cpu(src->tbem); 264 dst->tbem = le32_to_cpu(src->tbem);
236 dst->mpos = le32_to_cpu(src->mpos); 265 dst->mpos = le32_to_cpu(src->mpos);
266 dst->mccap = le32_to_cpu(src->mccap);
237 267
238 dst->cpar = le16_to_cpu(src->cpar); 268 dst->cpar = le16_to_cpu(src->cpar);
239 } 269 }
@@ -241,9 +271,10 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
241 return 0; 271 return 0;
242} 272}
243 273
244static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) 274static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
245{ 275{
246 struct nvme_ns *ns = q->queuedata; 276 struct nvme_ns *ns = nvmdev->q->queuedata;
277 struct nvme_dev *dev = ns->dev;
247 struct nvme_nvm_id *nvme_nvm_id; 278 struct nvme_nvm_id *nvme_nvm_id;
248 struct nvme_nvm_command c = {}; 279 struct nvme_nvm_command c = {};
249 int ret; 280 int ret;
@@ -256,8 +287,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
256 if (!nvme_nvm_id) 287 if (!nvme_nvm_id)
257 return -ENOMEM; 288 return -ENOMEM;
258 289
259 ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, nvme_nvm_id, 290 ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
260 sizeof(struct nvme_nvm_id)); 291 nvme_nvm_id, sizeof(struct nvme_nvm_id));
261 if (ret) { 292 if (ret) {
262 ret = -EIO; 293 ret = -EIO;
263 goto out; 294 goto out;
@@ -268,6 +299,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
268 nvm_id->cgrps = nvme_nvm_id->cgrps; 299 nvm_id->cgrps = nvme_nvm_id->cgrps;
269 nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap); 300 nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
270 nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom); 301 nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
302 memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
303 sizeof(struct nvme_nvm_addr_format));
271 304
272 ret = init_grps(nvm_id, nvme_nvm_id); 305 ret = init_grps(nvm_id, nvme_nvm_id);
273out: 306out:
@@ -275,13 +308,13 @@ out:
275 return ret; 308 return ret;
276} 309}
277 310
278static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, 311static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
279 nvm_l2p_update_fn *update_l2p, void *priv) 312 nvm_l2p_update_fn *update_l2p, void *priv)
280{ 313{
281 struct nvme_ns *ns = q->queuedata; 314 struct nvme_ns *ns = nvmdev->q->queuedata;
282 struct nvme_dev *dev = ns->dev; 315 struct nvme_dev *dev = ns->dev;
283 struct nvme_nvm_command c = {}; 316 struct nvme_nvm_command c = {};
284 u32 len = queue_max_hw_sectors(q) << 9; 317 u32 len = queue_max_hw_sectors(dev->admin_q) << 9;
285 u32 nlb_pr_rq = len / sizeof(u64); 318 u32 nlb_pr_rq = len / sizeof(u64);
286 u64 cmd_slba = slba; 319 u64 cmd_slba = slba;
287 void *entries; 320 void *entries;
@@ -299,8 +332,8 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb,
299 c.l2p.slba = cpu_to_le64(cmd_slba); 332 c.l2p.slba = cpu_to_le64(cmd_slba);
300 c.l2p.nlb = cpu_to_le32(cmd_nlb); 333 c.l2p.nlb = cpu_to_le32(cmd_nlb);
301 334
302 ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, 335 ret = nvme_submit_sync_cmd(dev->admin_q,
303 entries, len); 336 (struct nvme_command *)&c, entries, len);
304 if (ret) { 337 if (ret) {
305 dev_err(dev->dev, "L2P table transfer failed (%d)\n", 338 dev_err(dev->dev, "L2P table transfer failed (%d)\n",
306 ret); 339 ret);
@@ -322,43 +355,84 @@ out:
322 return ret; 355 return ret;
323} 356}
324 357
325static int nvme_nvm_get_bb_tbl(struct request_queue *q, int lunid, 358static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
326 unsigned int nr_blocks, 359 int nr_blocks, nvm_bb_update_fn *update_bbtbl,
327 nvm_bb_update_fn *update_bbtbl, void *priv) 360 void *priv)
328{ 361{
362 struct request_queue *q = nvmdev->q;
329 struct nvme_ns *ns = q->queuedata; 363 struct nvme_ns *ns = q->queuedata;
330 struct nvme_dev *dev = ns->dev; 364 struct nvme_dev *dev = ns->dev;
331 struct nvme_nvm_command c = {}; 365 struct nvme_nvm_command c = {};
332 void *bb_bitmap; 366 struct nvme_nvm_bb_tbl *bb_tbl;
333 u16 bb_bitmap_size; 367 int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks;
334 int ret = 0; 368 int ret = 0;
335 369
336 c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; 370 c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
337 c.get_bb.nsid = cpu_to_le32(ns->ns_id); 371 c.get_bb.nsid = cpu_to_le32(ns->ns_id);
338 c.get_bb.lbb = cpu_to_le32(lunid); 372 c.get_bb.spba = cpu_to_le64(ppa.ppa);
339 bb_bitmap_size = ((nr_blocks >> 15) + 1) * PAGE_SIZE;
340 bb_bitmap = kmalloc(bb_bitmap_size, GFP_KERNEL);
341 if (!bb_bitmap)
342 return -ENOMEM;
343 373
344 bitmap_zero(bb_bitmap, nr_blocks); 374 bb_tbl = kzalloc(tblsz, GFP_KERNEL);
375 if (!bb_tbl)
376 return -ENOMEM;
345 377
346 ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_bitmap, 378 ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
347 bb_bitmap_size); 379 bb_tbl, tblsz);
348 if (ret) { 380 if (ret) {
349 dev_err(dev->dev, "get bad block table failed (%d)\n", ret); 381 dev_err(dev->dev, "get bad block table failed (%d)\n", ret);
350 ret = -EIO; 382 ret = -EIO;
351 goto out; 383 goto out;
352 } 384 }
353 385
354 ret = update_bbtbl(lunid, bb_bitmap, nr_blocks, priv); 386 if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
387 bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
388 dev_err(dev->dev, "bbt format mismatch\n");
389 ret = -EINVAL;
390 goto out;
391 }
392
393 if (le16_to_cpu(bb_tbl->verid) != 1) {
394 ret = -EINVAL;
395 dev_err(dev->dev, "bbt version not supported\n");
396 goto out;
397 }
398
399 if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) {
400 ret = -EINVAL;
401 dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)",
402 le32_to_cpu(bb_tbl->tblks), nr_blocks);
403 goto out;
404 }
405
406 ppa = dev_to_generic_addr(nvmdev, ppa);
407 ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv);
355 if (ret) { 408 if (ret) {
356 ret = -EINTR; 409 ret = -EINTR;
357 goto out; 410 goto out;
358 } 411 }
359 412
360out: 413out:
361 kfree(bb_bitmap); 414 kfree(bb_tbl);
415 return ret;
416}
417
418static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
419 int type)
420{
421 struct nvme_ns *ns = nvmdev->q->queuedata;
422 struct nvme_dev *dev = ns->dev;
423 struct nvme_nvm_command c = {};
424 int ret = 0;
425
426 c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
427 c.set_bb.nsid = cpu_to_le32(ns->ns_id);
428 c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa);
429 c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
430 c.set_bb.value = type;
431
432 ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
433 NULL, 0);
434 if (ret)
435 dev_err(dev->dev, "set bad block table failed (%d)\n", ret);
362 return ret; 436 return ret;
363} 437}
364 438
@@ -381,7 +455,7 @@ static void nvme_nvm_end_io(struct request *rq, int error)
381 struct nvm_rq *rqd = rq->end_io_data; 455 struct nvm_rq *rqd = rq->end_io_data;
382 struct nvm_dev *dev = rqd->dev; 456 struct nvm_dev *dev = rqd->dev;
383 457
384 if (dev->mt->end_io(rqd, error)) 458 if (dev->mt && dev->mt->end_io(rqd, error))
385 pr_err("nvme: err status: %x result: %lx\n", 459 pr_err("nvme: err status: %x result: %lx\n",
386 rq->errors, (unsigned long)rq->special); 460 rq->errors, (unsigned long)rq->special);
387 461
@@ -389,8 +463,9 @@ static void nvme_nvm_end_io(struct request *rq, int error)
389 blk_mq_free_request(rq); 463 blk_mq_free_request(rq);
390} 464}
391 465
392static int nvme_nvm_submit_io(struct request_queue *q, struct nvm_rq *rqd) 466static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
393{ 467{
468 struct request_queue *q = dev->q;
394 struct nvme_ns *ns = q->queuedata; 469 struct nvme_ns *ns = q->queuedata;
395 struct request *rq; 470 struct request *rq;
396 struct bio *bio = rqd->bio; 471 struct bio *bio = rqd->bio;
@@ -428,8 +503,9 @@ static int nvme_nvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
428 return 0; 503 return 0;
429} 504}
430 505
431static int nvme_nvm_erase_block(struct request_queue *q, struct nvm_rq *rqd) 506static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
432{ 507{
508 struct request_queue *q = dev->q;
433 struct nvme_ns *ns = q->queuedata; 509 struct nvme_ns *ns = q->queuedata;
434 struct nvme_nvm_command c = {}; 510 struct nvme_nvm_command c = {};
435 511
@@ -441,9 +517,9 @@ static int nvme_nvm_erase_block(struct request_queue *q, struct nvm_rq *rqd)
441 return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0); 517 return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
442} 518}
443 519
444static void *nvme_nvm_create_dma_pool(struct request_queue *q, char *name) 520static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
445{ 521{
446 struct nvme_ns *ns = q->queuedata; 522 struct nvme_ns *ns = nvmdev->q->queuedata;
447 struct nvme_dev *dev = ns->dev; 523 struct nvme_dev *dev = ns->dev;
448 524
449 return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0); 525 return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0);
@@ -456,7 +532,7 @@ static void nvme_nvm_destroy_dma_pool(void *pool)
456 dma_pool_destroy(dma_pool); 532 dma_pool_destroy(dma_pool);
457} 533}
458 534
459static void *nvme_nvm_dev_dma_alloc(struct request_queue *q, void *pool, 535static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
460 gfp_t mem_flags, dma_addr_t *dma_handler) 536 gfp_t mem_flags, dma_addr_t *dma_handler)
461{ 537{
462 return dma_pool_alloc(pool, mem_flags, dma_handler); 538 return dma_pool_alloc(pool, mem_flags, dma_handler);
@@ -474,6 +550,7 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
474 .get_l2p_tbl = nvme_nvm_get_l2p_tbl, 550 .get_l2p_tbl = nvme_nvm_get_l2p_tbl,
475 551
476 .get_bb_tbl = nvme_nvm_get_bb_tbl, 552 .get_bb_tbl = nvme_nvm_get_bb_tbl,
553 .set_bb_tbl = nvme_nvm_set_bb_tbl,
477 554
478 .submit_io = nvme_nvm_submit_io, 555 .submit_io = nvme_nvm_submit_io,
479 .erase_block = nvme_nvm_erase_block, 556 .erase_block = nvme_nvm_erase_block,
@@ -496,31 +573,27 @@ void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
496 nvm_unregister(disk_name); 573 nvm_unregister(disk_name);
497} 574}
498 575
576/* move to shared place when used in multiple places. */
577#define PCI_VENDOR_ID_CNEX 0x1d1d
578#define PCI_DEVICE_ID_CNEX_WL 0x2807
579#define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f
580
499int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) 581int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
500{ 582{
501 struct nvme_dev *dev = ns->dev; 583 struct nvme_dev *dev = ns->dev;
502 struct pci_dev *pdev = to_pci_dev(dev->dev); 584 struct pci_dev *pdev = to_pci_dev(dev->dev);
503 585
504 /* QEMU NVMe simulator - PCI ID + Vendor specific bit */ 586 /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
505 if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x5845 && 587 if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
588 pdev->device == PCI_DEVICE_ID_CNEX_QEMU &&
506 id->vs[0] == 0x1) 589 id->vs[0] == 0x1)
507 return 1; 590 return 1;
508 591
509 /* CNEX Labs - PCI ID + Vendor specific bit */ 592 /* CNEX Labs - PCI ID + Vendor specific bit */
510 if (pdev->vendor == 0x1d1d && pdev->device == 0x2807 && 593 if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
594 pdev->device == PCI_DEVICE_ID_CNEX_WL &&
511 id->vs[0] == 0x1) 595 id->vs[0] == 0x1)
512 return 1; 596 return 1;
513 597
514 return 0; 598 return 0;
515} 599}
516#else
517int nvme_nvm_register(struct request_queue *q, char *disk_name)
518{
519 return 0;
520}
521void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
522int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
523{
524 return 0;
525}
526#endif /* CONFIG_NVM */
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index fdb4e5bad9ac..044253dca30a 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -136,8 +136,22 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
136int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg); 136int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
137int nvme_sg_get_version_num(int __user *ip); 137int nvme_sg_get_version_num(int __user *ip);
138 138
139#ifdef CONFIG_NVM
139int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id); 140int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
140int nvme_nvm_register(struct request_queue *q, char *disk_name); 141int nvme_nvm_register(struct request_queue *q, char *disk_name);
141void nvme_nvm_unregister(struct request_queue *q, char *disk_name); 142void nvme_nvm_unregister(struct request_queue *q, char *disk_name);
143#else
144static inline int nvme_nvm_register(struct request_queue *q, char *disk_name)
145{
146 return 0;
147}
148
149static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
150
151static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
152{
153 return 0;
154}
155#endif /* CONFIG_NVM */
142 156
143#endif /* _NVME_H */ 157#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8187df204695..0c67b57be83c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -896,19 +896,28 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
896 goto retry_cmd; 896 goto retry_cmd;
897 } 897 }
898 if (blk_integrity_rq(req)) { 898 if (blk_integrity_rq(req)) {
899 if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) 899 if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) {
900 dma_unmap_sg(dev->dev, iod->sg, iod->nents,
901 dma_dir);
900 goto error_cmd; 902 goto error_cmd;
903 }
901 904
902 sg_init_table(iod->meta_sg, 1); 905 sg_init_table(iod->meta_sg, 1);
903 if (blk_rq_map_integrity_sg( 906 if (blk_rq_map_integrity_sg(
904 req->q, req->bio, iod->meta_sg) != 1) 907 req->q, req->bio, iod->meta_sg) != 1) {
908 dma_unmap_sg(dev->dev, iod->sg, iod->nents,
909 dma_dir);
905 goto error_cmd; 910 goto error_cmd;
911 }
906 912
907 if (rq_data_dir(req)) 913 if (rq_data_dir(req))
908 nvme_dif_remap(req, nvme_dif_prep); 914 nvme_dif_remap(req, nvme_dif_prep);
909 915
910 if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) 916 if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) {
917 dma_unmap_sg(dev->dev, iod->sg, iod->nents,
918 dma_dir);
911 goto error_cmd; 919 goto error_cmd;
920 }
912 } 921 }
913 } 922 }
914 923
@@ -968,7 +977,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
968 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) 977 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
969 return; 978 return;
970 979
971 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 980 if (likely(nvmeq->cq_vector >= 0))
981 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
972 nvmeq->cq_head = head; 982 nvmeq->cq_head = head;
973 nvmeq->cq_phase = phase; 983 nvmeq->cq_phase = phase;
974 984
@@ -1727,9 +1737,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1727 u32 aqa; 1737 u32 aqa;
1728 u64 cap = lo_hi_readq(&dev->bar->cap); 1738 u64 cap = lo_hi_readq(&dev->bar->cap);
1729 struct nvme_queue *nvmeq; 1739 struct nvme_queue *nvmeq;
1730 unsigned page_shift = PAGE_SHIFT; 1740 /*
1741 * default to a 4K page size, with the intention to update this
1742 * path in the future to accomodate architectures with differing
1743 * kernel and IO page sizes.
1744 */
1745 unsigned page_shift = 12;
1731 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; 1746 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
1732 unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
1733 1747
1734 if (page_shift < dev_page_min) { 1748 if (page_shift < dev_page_min) {
1735 dev_err(dev->dev, 1749 dev_err(dev->dev,
@@ -1738,13 +1752,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1738 1 << page_shift); 1752 1 << page_shift);
1739 return -ENODEV; 1753 return -ENODEV;
1740 } 1754 }
1741 if (page_shift > dev_page_max) {
1742 dev_info(dev->dev,
1743 "Device maximum page size (%u) smaller than "
1744 "host (%u); enabling work-around\n",
1745 1 << dev_page_max, 1 << page_shift);
1746 page_shift = dev_page_max;
1747 }
1748 1755
1749 dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ? 1756 dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ?
1750 NVME_CAP_NSSRC(cap) : 0; 1757 NVME_CAP_NSSRC(cap) : 0;
@@ -2268,7 +2275,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
2268 if (dev->max_hw_sectors) { 2275 if (dev->max_hw_sectors) {
2269 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); 2276 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
2270 blk_queue_max_segments(ns->queue, 2277 blk_queue_max_segments(ns->queue,
2271 ((dev->max_hw_sectors << 9) / dev->page_size) + 1); 2278 (dev->max_hw_sectors / (dev->page_size >> 9)) + 1);
2272 } 2279 }
2273 if (dev->stripe_size) 2280 if (dev->stripe_size)
2274 blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); 2281 blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
@@ -2533,8 +2540,17 @@ static void nvme_ns_remove(struct nvme_ns *ns)
2533{ 2540{
2534 bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue); 2541 bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue);
2535 2542
2536 if (kill) 2543 if (kill) {
2537 blk_set_queue_dying(ns->queue); 2544 blk_set_queue_dying(ns->queue);
2545
2546 /*
2547 * The controller was shutdown first if we got here through
2548 * device removal. The shutdown may requeue outstanding
2549 * requests. These need to be aborted immediately so
2550 * del_gendisk doesn't block indefinitely for their completion.
2551 */
2552 blk_mq_abort_requeue_list(ns->queue);
2553 }
2538 if (ns->disk->flags & GENHD_FL_UP) 2554 if (ns->disk->flags & GENHD_FL_UP)
2539 del_gendisk(ns->disk); 2555 del_gendisk(ns->disk);
2540 if (kill || !blk_queue_dying(ns->queue)) { 2556 if (kill || !blk_queue_dying(ns->queue)) {
@@ -2701,6 +2717,18 @@ static int nvme_dev_map(struct nvme_dev *dev)
2701 dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); 2717 dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
2702 dev->db_stride = 1 << NVME_CAP_STRIDE(cap); 2718 dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
2703 dev->dbs = ((void __iomem *)dev->bar) + 4096; 2719 dev->dbs = ((void __iomem *)dev->bar) + 4096;
2720
2721 /*
2722 * Temporary fix for the Apple controller found in the MacBook8,1 and
2723 * some MacBook7,1 to avoid controller resets and data loss.
2724 */
2725 if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
2726 dev->q_depth = 2;
2727 dev_warn(dev->dev, "detected Apple NVMe controller, set "
2728 "queue depth=%u to work around controller resets\n",
2729 dev->q_depth);
2730 }
2731
2704 if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) 2732 if (readl(&dev->bar->vs) >= NVME_VS(1, 2))
2705 dev->cmb = nvme_map_cmb(dev); 2733 dev->cmb = nvme_map_cmb(dev);
2706 2734
@@ -2787,6 +2815,10 @@ static void nvme_del_queue_end(struct nvme_queue *nvmeq)
2787{ 2815{
2788 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; 2816 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
2789 nvme_put_dq(dq); 2817 nvme_put_dq(dq);
2818
2819 spin_lock_irq(&nvmeq->q_lock);
2820 nvme_process_cq(nvmeq);
2821 spin_unlock_irq(&nvmeq->q_lock);
2790} 2822}
2791 2823
2792static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, 2824static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
@@ -2954,6 +2986,15 @@ static void nvme_dev_remove(struct nvme_dev *dev)
2954{ 2986{
2955 struct nvme_ns *ns, *next; 2987 struct nvme_ns *ns, *next;
2956 2988
2989 if (nvme_io_incapable(dev)) {
2990 /*
2991 * If the device is not capable of IO (surprise hot-removal,
2992 * for example), we need to quiesce prior to deleting the
2993 * namespaces. This will end outstanding requests and prevent
2994 * attempts to sync dirty data.
2995 */
2996 nvme_dev_shutdown(dev);
2997 }
2957 list_for_each_entry_safe(ns, next, &dev->namespaces, list) 2998 list_for_each_entry_safe(ns, next, &dev->namespaces, list)
2958 nvme_ns_remove(ns); 2999 nvme_ns_remove(ns);
2959} 3000}
diff --git a/drivers/of/address.c b/drivers/of/address.c
index cd53fe4a0c86..9582c5703b3c 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -485,9 +485,10 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
485 int rone; 485 int rone;
486 u64 offset = OF_BAD_ADDR; 486 u64 offset = OF_BAD_ADDR;
487 487
488 /* Normally, an absence of a "ranges" property means we are 488 /*
489 * Normally, an absence of a "ranges" property means we are
489 * crossing a non-translatable boundary, and thus the addresses 490 * crossing a non-translatable boundary, and thus the addresses
490 * below the current not cannot be converted to CPU physical ones. 491 * below the current cannot be converted to CPU physical ones.
491 * Unfortunately, while this is very clear in the spec, it's not 492 * Unfortunately, while this is very clear in the spec, it's not
492 * what Apple understood, and they do have things like /uni-n or 493 * what Apple understood, and they do have things like /uni-n or
493 * /ht nodes with no "ranges" property and a lot of perfectly 494 * /ht nodes with no "ranges" property and a lot of perfectly
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index d2430298a309..655f79db7899 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -13,6 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/initrd.h> 14#include <linux/initrd.h>
15#include <linux/memblock.h> 15#include <linux/memblock.h>
16#include <linux/mutex.h>
16#include <linux/of.h> 17#include <linux/of.h>
17#include <linux/of_fdt.h> 18#include <linux/of_fdt.h>
18#include <linux/of_reserved_mem.h> 19#include <linux/of_reserved_mem.h>
@@ -436,6 +437,8 @@ static void *kernel_tree_alloc(u64 size, u64 align)
436 return kzalloc(size, GFP_KERNEL); 437 return kzalloc(size, GFP_KERNEL);
437} 438}
438 439
440static DEFINE_MUTEX(of_fdt_unflatten_mutex);
441
439/** 442/**
440 * of_fdt_unflatten_tree - create tree of device_nodes from flat blob 443 * of_fdt_unflatten_tree - create tree of device_nodes from flat blob
441 * 444 *
@@ -447,7 +450,9 @@ static void *kernel_tree_alloc(u64 size, u64 align)
447void of_fdt_unflatten_tree(const unsigned long *blob, 450void of_fdt_unflatten_tree(const unsigned long *blob,
448 struct device_node **mynodes) 451 struct device_node **mynodes)
449{ 452{
453 mutex_lock(&of_fdt_unflatten_mutex);
450 __unflatten_device_tree(blob, mynodes, &kernel_tree_alloc); 454 __unflatten_device_tree(blob, mynodes, &kernel_tree_alloc);
455 mutex_unlock(&of_fdt_unflatten_mutex);
451} 456}
452EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree); 457EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
453 458
@@ -1041,7 +1046,7 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
1041int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base, 1046int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
1042 phys_addr_t size, bool nomap) 1047 phys_addr_t size, bool nomap)
1043{ 1048{
1044 pr_err("Reserved memory not supported, ignoring range 0x%pa - 0x%pa%s\n", 1049 pr_err("Reserved memory not supported, ignoring range %pa - %pa%s\n",
1045 &base, &size, nomap ? " (nomap)" : ""); 1050 &base, &size, nomap ? " (nomap)" : "");
1046 return -ENOSYS; 1051 return -ENOSYS;
1047} 1052}
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 902b89be7217..4fa916dffc91 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -53,7 +53,7 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
53 * Returns a pointer to the interrupt parent node, or NULL if the interrupt 53 * Returns a pointer to the interrupt parent node, or NULL if the interrupt
54 * parent could not be determined. 54 * parent could not be determined.
55 */ 55 */
56static struct device_node *of_irq_find_parent(struct device_node *child) 56struct device_node *of_irq_find_parent(struct device_node *child)
57{ 57{
58 struct device_node *p; 58 struct device_node *p;
59 const __be32 *parp; 59 const __be32 *parp;
@@ -77,6 +77,7 @@ static struct device_node *of_irq_find_parent(struct device_node *child)
77 77
78 return p; 78 return p;
79} 79}
80EXPORT_SYMBOL_GPL(of_irq_find_parent);
80 81
81/** 82/**
82 * of_irq_parse_raw - Low level interrupt tree parsing 83 * of_irq_parse_raw - Low level interrupt tree parsing
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index be77e75c587d..1a3556a9e9ea 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -206,7 +206,13 @@ static int __init __rmem_cmp(const void *a, const void *b)
206{ 206{
207 const struct reserved_mem *ra = a, *rb = b; 207 const struct reserved_mem *ra = a, *rb = b;
208 208
209 return ra->base - rb->base; 209 if (ra->base < rb->base)
210 return -1;
211
212 if (ra->base > rb->base)
213 return 1;
214
215 return 0;
210} 216}
211 217
212static void __init __rmem_check_for_overlap(void) 218static void __init __rmem_check_for_overlap(void)
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
index 761e77bfce5d..e56f1569f6c3 100644
--- a/drivers/parisc/iommu-helpers.h
+++ b/drivers/parisc/iommu-helpers.h
@@ -104,7 +104,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
104 struct scatterlist *contig_sg; /* contig chunk head */ 104 struct scatterlist *contig_sg; /* contig chunk head */
105 unsigned long dma_offset, dma_len; /* start/len of DMA stream */ 105 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
106 unsigned int n_mappings = 0; 106 unsigned int n_mappings = 0;
107 unsigned int max_seg_size = dma_get_max_seg_size(dev); 107 unsigned int max_seg_size = min(dma_get_max_seg_size(dev),
108 (unsigned)DMA_CHUNK_SIZE);
109 unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1;
110 if (max_seg_boundary) /* check if the addition above didn't overflow */
111 max_seg_size = min(max_seg_size, max_seg_boundary);
108 112
109 while (nents > 0) { 113 while (nents > 0) {
110 114
@@ -138,14 +142,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
138 142
139 /* 143 /*
140 ** First make sure current dma stream won't 144 ** First make sure current dma stream won't
141 ** exceed DMA_CHUNK_SIZE if we coalesce the 145 ** exceed max_seg_size if we coalesce the
142 ** next entry. 146 ** next entry.
143 */ 147 */
144 if(unlikely(ALIGN(dma_len + dma_offset + startsg->length, 148 if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) >
145 IOVP_SIZE) > DMA_CHUNK_SIZE)) 149 max_seg_size))
146 break;
147
148 if (startsg->length + dma_len > max_seg_size)
149 break; 150 break;
150 151
151 /* 152 /*
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index e5dda38bdde5..99da549d5d06 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -55,8 +55,10 @@
55#define TLP_CFG_DW2(bus, devfn, offset) \ 55#define TLP_CFG_DW2(bus, devfn, offset) \
56 (((bus) << 24) | ((devfn) << 16) | (offset)) 56 (((bus) << 24) | ((devfn) << 16) | (offset))
57#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) 57#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
58#define TLP_COMP_STATUS(s) (((s) >> 12) & 7)
58#define TLP_HDR_SIZE 3 59#define TLP_HDR_SIZE 3
59#define TLP_LOOP 500 60#define TLP_LOOP 500
61#define RP_DEVFN 0
60 62
61#define INTX_NUM 4 63#define INTX_NUM 4
62 64
@@ -166,34 +168,41 @@ static bool altera_pcie_valid_config(struct altera_pcie *pcie,
166 168
167static int tlp_read_packet(struct altera_pcie *pcie, u32 *value) 169static int tlp_read_packet(struct altera_pcie *pcie, u32 *value)
168{ 170{
169 u8 loop; 171 int i;
170 bool sop = 0; 172 bool sop = 0;
171 u32 ctrl; 173 u32 ctrl;
172 u32 reg0, reg1; 174 u32 reg0, reg1;
175 u32 comp_status = 1;
173 176
174 /* 177 /*
175 * Minimum 2 loops to read TLP headers and 1 loop to read data 178 * Minimum 2 loops to read TLP headers and 1 loop to read data
176 * payload. 179 * payload.
177 */ 180 */
178 for (loop = 0; loop < TLP_LOOP; loop++) { 181 for (i = 0; i < TLP_LOOP; i++) {
179 ctrl = cra_readl(pcie, RP_RXCPL_STATUS); 182 ctrl = cra_readl(pcie, RP_RXCPL_STATUS);
180 if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) { 183 if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) {
181 reg0 = cra_readl(pcie, RP_RXCPL_REG0); 184 reg0 = cra_readl(pcie, RP_RXCPL_REG0);
182 reg1 = cra_readl(pcie, RP_RXCPL_REG1); 185 reg1 = cra_readl(pcie, RP_RXCPL_REG1);
183 186
184 if (ctrl & RP_RXCPL_SOP) 187 if (ctrl & RP_RXCPL_SOP) {
185 sop = true; 188 sop = true;
189 comp_status = TLP_COMP_STATUS(reg1);
190 }
186 191
187 if (ctrl & RP_RXCPL_EOP) { 192 if (ctrl & RP_RXCPL_EOP) {
193 if (comp_status)
194 return PCIBIOS_DEVICE_NOT_FOUND;
195
188 if (value) 196 if (value)
189 *value = reg0; 197 *value = reg0;
198
190 return PCIBIOS_SUCCESSFUL; 199 return PCIBIOS_SUCCESSFUL;
191 } 200 }
192 } 201 }
193 udelay(5); 202 udelay(5);
194 } 203 }
195 204
196 return -ENOENT; 205 return PCIBIOS_DEVICE_NOT_FOUND;
197} 206}
198 207
199static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers, 208static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
@@ -233,7 +242,7 @@ static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
233 else 242 else
234 headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD1); 243 headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD1);
235 244
236 headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, devfn), 245 headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
237 TLP_READ_TAG, byte_en); 246 TLP_READ_TAG, byte_en);
238 headers[2] = TLP_CFG_DW2(bus, devfn, where); 247 headers[2] = TLP_CFG_DW2(bus, devfn, where);
239 248
@@ -253,7 +262,7 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
253 else 262 else
254 headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR1); 263 headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR1);
255 264
256 headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, devfn), 265 headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
257 TLP_WRITE_TAG, byte_en); 266 TLP_WRITE_TAG, byte_en);
258 headers[2] = TLP_CFG_DW2(bus, devfn, where); 267 headers[2] = TLP_CFG_DW2(bus, devfn, where);
259 268
@@ -458,7 +467,7 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
458 struct device_node *node = dev->of_node; 467 struct device_node *node = dev->of_node;
459 468
460 /* Setup INTx */ 469 /* Setup INTx */
461 pcie->irq_domain = irq_domain_add_linear(node, INTX_NUM, 470 pcie->irq_domain = irq_domain_add_linear(node, INTX_NUM + 1,
462 &intx_domain_ops, pcie); 471 &intx_domain_ops, pcie);
463 if (!pcie->irq_domain) { 472 if (!pcie->irq_domain) {
464 dev_err(dev, "Failed to get a INTx IRQ domain\n"); 473 dev_err(dev, "Failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 540f077c37ea..02a7452bdf23 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -440,7 +440,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
440 ret, pp->io); 440 ret, pp->io);
441 continue; 441 continue;
442 } 442 }
443 pp->io_base = pp->io->start;
444 break; 443 break;
445 case IORESOURCE_MEM: 444 case IORESOURCE_MEM:
446 pp->mem = win->res; 445 pp->mem = win->res;
diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c
index 35457ecd8e70..77f7c669a1b9 100644
--- a/drivers/pci/host/pcie-hisi.c
+++ b/drivers/pci/host/pcie-hisi.c
@@ -61,7 +61,9 @@ static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size,
61 *val = *(u8 __force *) walker; 61 *val = *(u8 __force *) walker;
62 else if (size == 2) 62 else if (size == 2)
63 *val = *(u16 __force *) walker; 63 *val = *(u16 __force *) walker;
64 else if (size != 4) 64 else if (size == 4)
65 *val = reg_val;
66 else
65 return PCIBIOS_BAD_REGISTER_NUMBER; 67 return PCIBIOS_BAD_REGISTER_NUMBER;
66 68
67 return PCIBIOS_SUCCESSFUL; 69 return PCIBIOS_SUCCESSFUL;
@@ -111,7 +113,7 @@ static struct pcie_host_ops hisi_pcie_host_ops = {
111 .link_up = hisi_pcie_link_up, 113 .link_up = hisi_pcie_link_up,
112}; 114};
113 115
114static int __init hisi_add_pcie_port(struct pcie_port *pp, 116static int hisi_add_pcie_port(struct pcie_port *pp,
115 struct platform_device *pdev) 117 struct platform_device *pdev)
116{ 118{
117 int ret; 119 int ret;
@@ -139,7 +141,7 @@ static int __init hisi_add_pcie_port(struct pcie_port *pp,
139 return 0; 141 return 0;
140} 142}
141 143
142static int __init hisi_pcie_probe(struct platform_device *pdev) 144static int hisi_pcie_probe(struct platform_device *pdev)
143{ 145{
144 struct hisi_pcie *hisi_pcie; 146 struct hisi_pcie *hisi_pcie;
145 struct pcie_port *pp; 147 struct pcie_port *pp;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 53e463244bb7..7eaa4c87fec7 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -54,7 +54,7 @@ static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
54 struct irq_domain *domain; 54 struct irq_domain *domain;
55 55
56 domain = pci_msi_get_domain(dev); 56 domain = pci_msi_get_domain(dev);
57 if (domain) 57 if (domain && irq_domain_is_hierarchy(domain))
58 return pci_msi_domain_alloc_irqs(domain, dev, nvec, type); 58 return pci_msi_domain_alloc_irqs(domain, dev, nvec, type);
59 59
60 return arch_setup_msi_irqs(dev, nvec, type); 60 return arch_setup_msi_irqs(dev, nvec, type);
@@ -65,7 +65,7 @@ static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
65 struct irq_domain *domain; 65 struct irq_domain *domain;
66 66
67 domain = pci_msi_get_domain(dev); 67 domain = pci_msi_get_domain(dev);
68 if (domain) 68 if (domain && irq_domain_is_hierarchy(domain))
69 pci_msi_domain_free_irqs(domain, dev); 69 pci_msi_domain_free_irqs(domain, dev);
70 else 70 else
71 arch_teardown_msi_irqs(dev); 71 arch_teardown_msi_irqs(dev);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 4446fcb5effd..d7ffd66814bb 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1146,9 +1146,21 @@ static int pci_pm_runtime_suspend(struct device *dev)
1146 pci_dev->state_saved = false; 1146 pci_dev->state_saved = false;
1147 pci_dev->no_d3cold = false; 1147 pci_dev->no_d3cold = false;
1148 error = pm->runtime_suspend(dev); 1148 error = pm->runtime_suspend(dev);
1149 suspend_report_result(pm->runtime_suspend, error); 1149 if (error) {
1150 if (error) 1150 /*
1151 * -EBUSY and -EAGAIN is used to request the runtime PM core
1152 * to schedule a new suspend, so log the event only with debug
1153 * log level.
1154 */
1155 if (error == -EBUSY || error == -EAGAIN)
1156 dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
1157 pm->runtime_suspend, error);
1158 else
1159 dev_err(dev, "can't suspend (%pf returned %d)\n",
1160 pm->runtime_suspend, error);
1161
1151 return error; 1162 return error;
1163 }
1152 if (!pci_dev->d3cold_allowed) 1164 if (!pci_dev->d3cold_allowed)
1153 pci_dev->no_d3cold = true; 1165 pci_dev->no_d3cold = true;
1154 1166
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 92618686604c..eead54cd01b2 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -216,7 +216,10 @@ static ssize_t numa_node_store(struct device *dev,
216 if (ret) 216 if (ret)
217 return ret; 217 return ret;
218 218
219 if (node >= MAX_NUMNODES || !node_online(node)) 219 if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES)
220 return -EINVAL;
221
222 if (node != NUMA_NO_NODE && !node_online(node))
220 return -EINVAL; 223 return -EINVAL;
221 224
222 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); 225 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index fd2f03fa53f3..d390fc1475ec 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -337,6 +337,4 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
337} 337}
338#endif 338#endif
339 339
340struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
341
342#endif /* DRIVERS_PCI_H */ 340#endif /* DRIVERS_PCI_H */
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 7eb5859dd035..03cb3ea2d2c0 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -233,6 +233,7 @@ config PHY_SUN9I_USB
233 tristate "Allwinner sun9i SoC USB PHY driver" 233 tristate "Allwinner sun9i SoC USB PHY driver"
234 depends on ARCH_SUNXI && HAS_IOMEM && OF 234 depends on ARCH_SUNXI && HAS_IOMEM && OF
235 depends on RESET_CONTROLLER 235 depends on RESET_CONTROLLER
236 depends on USB_COMMON
236 select GENERIC_PHY 237 select GENERIC_PHY
237 help 238 help
238 Enable this to support the transceiver that is part of Allwinner 239 Enable this to support the transceiver that is part of Allwinner
diff --git a/drivers/phy/phy-bcm-cygnus-pcie.c b/drivers/phy/phy-bcm-cygnus-pcie.c
index 7ad72b7d2b98..082c03f6438f 100644
--- a/drivers/phy/phy-bcm-cygnus-pcie.c
+++ b/drivers/phy/phy-bcm-cygnus-pcie.c
@@ -128,6 +128,7 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
128 struct phy_provider *provider; 128 struct phy_provider *provider;
129 struct resource *res; 129 struct resource *res;
130 unsigned cnt = 0; 130 unsigned cnt = 0;
131 int ret;
131 132
132 if (of_get_child_count(node) == 0) { 133 if (of_get_child_count(node) == 0) {
133 dev_err(dev, "PHY no child node\n"); 134 dev_err(dev, "PHY no child node\n");
@@ -154,24 +155,28 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
154 if (of_property_read_u32(child, "reg", &id)) { 155 if (of_property_read_u32(child, "reg", &id)) {
155 dev_err(dev, "missing reg property for %s\n", 156 dev_err(dev, "missing reg property for %s\n",
156 child->name); 157 child->name);
157 return -EINVAL; 158 ret = -EINVAL;
159 goto put_child;
158 } 160 }
159 161
160 if (id >= MAX_NUM_PHYS) { 162 if (id >= MAX_NUM_PHYS) {
161 dev_err(dev, "invalid PHY id: %u\n", id); 163 dev_err(dev, "invalid PHY id: %u\n", id);
162 return -EINVAL; 164 ret = -EINVAL;
165 goto put_child;
163 } 166 }
164 167
165 if (core->phys[id].phy) { 168 if (core->phys[id].phy) {
166 dev_err(dev, "duplicated PHY id: %u\n", id); 169 dev_err(dev, "duplicated PHY id: %u\n", id);
167 return -EINVAL; 170 ret = -EINVAL;
171 goto put_child;
168 } 172 }
169 173
170 p = &core->phys[id]; 174 p = &core->phys[id];
171 p->phy = devm_phy_create(dev, child, &cygnus_pcie_phy_ops); 175 p->phy = devm_phy_create(dev, child, &cygnus_pcie_phy_ops);
172 if (IS_ERR(p->phy)) { 176 if (IS_ERR(p->phy)) {
173 dev_err(dev, "failed to create PHY\n"); 177 dev_err(dev, "failed to create PHY\n");
174 return PTR_ERR(p->phy); 178 ret = PTR_ERR(p->phy);
179 goto put_child;
175 } 180 }
176 181
177 p->core = core; 182 p->core = core;
@@ -191,6 +196,9 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
191 dev_dbg(dev, "registered %u PCIe PHY(s)\n", cnt); 196 dev_dbg(dev, "registered %u PCIe PHY(s)\n", cnt);
192 197
193 return 0; 198 return 0;
199put_child:
200 of_node_put(child);
201 return ret;
194} 202}
195 203
196static const struct of_device_id cygnus_pcie_phy_match_table[] = { 204static const struct of_device_id cygnus_pcie_phy_match_table[] = {
diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c
index 77a2e054fdea..f84a33a1bdd9 100644
--- a/drivers/phy/phy-berlin-sata.c
+++ b/drivers/phy/phy-berlin-sata.c
@@ -195,7 +195,7 @@ static int phy_berlin_sata_probe(struct platform_device *pdev)
195 struct phy_provider *phy_provider; 195 struct phy_provider *phy_provider;
196 struct phy_berlin_priv *priv; 196 struct phy_berlin_priv *priv;
197 struct resource *res; 197 struct resource *res;
198 int i = 0; 198 int ret, i = 0;
199 u32 phy_id; 199 u32 phy_id;
200 200
201 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 201 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -237,22 +237,27 @@ static int phy_berlin_sata_probe(struct platform_device *pdev)
237 if (of_property_read_u32(child, "reg", &phy_id)) { 237 if (of_property_read_u32(child, "reg", &phy_id)) {
238 dev_err(dev, "missing reg property in node %s\n", 238 dev_err(dev, "missing reg property in node %s\n",
239 child->name); 239 child->name);
240 return -EINVAL; 240 ret = -EINVAL;
241 goto put_child;
241 } 242 }
242 243
243 if (phy_id >= ARRAY_SIZE(phy_berlin_power_down_bits)) { 244 if (phy_id >= ARRAY_SIZE(phy_berlin_power_down_bits)) {
244 dev_err(dev, "invalid reg in node %s\n", child->name); 245 dev_err(dev, "invalid reg in node %s\n", child->name);
245 return -EINVAL; 246 ret = -EINVAL;
247 goto put_child;
246 } 248 }
247 249
248 phy_desc = devm_kzalloc(dev, sizeof(*phy_desc), GFP_KERNEL); 250 phy_desc = devm_kzalloc(dev, sizeof(*phy_desc), GFP_KERNEL);
249 if (!phy_desc) 251 if (!phy_desc) {
250 return -ENOMEM; 252 ret = -ENOMEM;
253 goto put_child;
254 }
251 255
252 phy = devm_phy_create(dev, NULL, &phy_berlin_sata_ops); 256 phy = devm_phy_create(dev, NULL, &phy_berlin_sata_ops);
253 if (IS_ERR(phy)) { 257 if (IS_ERR(phy)) {
254 dev_err(dev, "failed to create PHY %d\n", phy_id); 258 dev_err(dev, "failed to create PHY %d\n", phy_id);
255 return PTR_ERR(phy); 259 ret = PTR_ERR(phy);
260 goto put_child;
256 } 261 }
257 262
258 phy_desc->phy = phy; 263 phy_desc->phy = phy;
@@ -269,6 +274,9 @@ static int phy_berlin_sata_probe(struct platform_device *pdev)
269 phy_provider = 274 phy_provider =
270 devm_of_phy_provider_register(dev, phy_berlin_sata_phy_xlate); 275 devm_of_phy_provider_register(dev, phy_berlin_sata_phy_xlate);
271 return PTR_ERR_OR_ZERO(phy_provider); 276 return PTR_ERR_OR_ZERO(phy_provider);
277put_child:
278 of_node_put(child);
279 return ret;
272} 280}
273 281
274static const struct of_device_id phy_berlin_sata_of_match[] = { 282static const struct of_device_id phy_berlin_sata_of_match[] = {
diff --git a/drivers/phy/phy-brcmstb-sata.c b/drivers/phy/phy-brcmstb-sata.c
index 8a2cb16a1937..cd9dba820566 100644
--- a/drivers/phy/phy-brcmstb-sata.c
+++ b/drivers/phy/phy-brcmstb-sata.c
@@ -140,7 +140,7 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
140 struct brcm_sata_phy *priv; 140 struct brcm_sata_phy *priv;
141 struct resource *res; 141 struct resource *res;
142 struct phy_provider *provider; 142 struct phy_provider *provider;
143 int count = 0; 143 int ret, count = 0;
144 144
145 if (of_get_child_count(dn) == 0) 145 if (of_get_child_count(dn) == 0)
146 return -ENODEV; 146 return -ENODEV;
@@ -163,16 +163,19 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
163 if (of_property_read_u32(child, "reg", &id)) { 163 if (of_property_read_u32(child, "reg", &id)) {
164 dev_err(dev, "missing reg property in node %s\n", 164 dev_err(dev, "missing reg property in node %s\n",
165 child->name); 165 child->name);
166 return -EINVAL; 166 ret = -EINVAL;
167 goto put_child;
167 } 168 }
168 169
169 if (id >= MAX_PORTS) { 170 if (id >= MAX_PORTS) {
170 dev_err(dev, "invalid reg: %u\n", id); 171 dev_err(dev, "invalid reg: %u\n", id);
171 return -EINVAL; 172 ret = -EINVAL;
173 goto put_child;
172 } 174 }
173 if (priv->phys[id].phy) { 175 if (priv->phys[id].phy) {
174 dev_err(dev, "already registered port %u\n", id); 176 dev_err(dev, "already registered port %u\n", id);
175 return -EINVAL; 177 ret = -EINVAL;
178 goto put_child;
176 } 179 }
177 180
178 port = &priv->phys[id]; 181 port = &priv->phys[id];
@@ -182,7 +185,8 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
182 port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc"); 185 port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc");
183 if (IS_ERR(port->phy)) { 186 if (IS_ERR(port->phy)) {
184 dev_err(dev, "failed to create PHY\n"); 187 dev_err(dev, "failed to create PHY\n");
185 return PTR_ERR(port->phy); 188 ret = PTR_ERR(port->phy);
189 goto put_child;
186 } 190 }
187 191
188 phy_set_drvdata(port->phy, port); 192 phy_set_drvdata(port->phy, port);
@@ -198,6 +202,9 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
198 dev_info(dev, "registered %d port(s)\n", count); 202 dev_info(dev, "registered %d port(s)\n", count);
199 203
200 return 0; 204 return 0;
205put_child:
206 of_node_put(child);
207 return ret;
201} 208}
202 209
203static struct platform_driver brcm_sata_phy_driver = { 210static struct platform_driver brcm_sata_phy_driver = {
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index fc48fac003a6..8c7f27db6ad3 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -636,8 +636,9 @@ EXPORT_SYMBOL_GPL(devm_of_phy_get);
636 * @np: node containing the phy 636 * @np: node containing the phy
637 * @index: index of the phy 637 * @index: index of the phy
638 * 638 *
639 * Gets the phy using _of_phy_get(), and associates a device with it using 639 * Gets the phy using _of_phy_get(), then gets a refcount to it,
640 * devres. On driver detach, release function is invoked on the devres data, 640 * and associates a device with it using devres. On driver detach,
641 * release function is invoked on the devres data,
641 * then, devres data is freed. 642 * then, devres data is freed.
642 * 643 *
643 */ 644 */
@@ -651,13 +652,21 @@ struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
651 return ERR_PTR(-ENOMEM); 652 return ERR_PTR(-ENOMEM);
652 653
653 phy = _of_phy_get(np, index); 654 phy = _of_phy_get(np, index);
654 if (!IS_ERR(phy)) { 655 if (IS_ERR(phy)) {
655 *ptr = phy;
656 devres_add(dev, ptr);
657 } else {
658 devres_free(ptr); 656 devres_free(ptr);
657 return phy;
659 } 658 }
660 659
660 if (!try_module_get(phy->ops->owner)) {
661 devres_free(ptr);
662 return ERR_PTR(-EPROBE_DEFER);
663 }
664
665 get_device(&phy->dev);
666
667 *ptr = phy;
668 devres_add(dev, ptr);
669
661 return phy; 670 return phy;
662} 671}
663EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index); 672EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index);
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c
index c47b56b4a2b8..3acd2a1808df 100644
--- a/drivers/phy/phy-miphy28lp.c
+++ b/drivers/phy/phy-miphy28lp.c
@@ -1226,15 +1226,18 @@ static int miphy28lp_probe(struct platform_device *pdev)
1226 1226
1227 miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy), 1227 miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy),
1228 GFP_KERNEL); 1228 GFP_KERNEL);
1229 if (!miphy_phy) 1229 if (!miphy_phy) {
1230 return -ENOMEM; 1230 ret = -ENOMEM;
1231 goto put_child;
1232 }
1231 1233
1232 miphy_dev->phys[port] = miphy_phy; 1234 miphy_dev->phys[port] = miphy_phy;
1233 1235
1234 phy = devm_phy_create(&pdev->dev, child, &miphy28lp_ops); 1236 phy = devm_phy_create(&pdev->dev, child, &miphy28lp_ops);
1235 if (IS_ERR(phy)) { 1237 if (IS_ERR(phy)) {
1236 dev_err(&pdev->dev, "failed to create PHY\n"); 1238 dev_err(&pdev->dev, "failed to create PHY\n");
1237 return PTR_ERR(phy); 1239 ret = PTR_ERR(phy);
1240 goto put_child;
1238 } 1241 }
1239 1242
1240 miphy_dev->phys[port]->phy = phy; 1243 miphy_dev->phys[port]->phy = phy;
@@ -1242,11 +1245,11 @@ static int miphy28lp_probe(struct platform_device *pdev)
1242 1245
1243 ret = miphy28lp_of_probe(child, miphy_phy); 1246 ret = miphy28lp_of_probe(child, miphy_phy);
1244 if (ret) 1247 if (ret)
1245 return ret; 1248 goto put_child;
1246 1249
1247 ret = miphy28lp_probe_resets(child, miphy_dev->phys[port]); 1250 ret = miphy28lp_probe_resets(child, miphy_dev->phys[port]);
1248 if (ret) 1251 if (ret)
1249 return ret; 1252 goto put_child;
1250 1253
1251 phy_set_drvdata(phy, miphy_dev->phys[port]); 1254 phy_set_drvdata(phy, miphy_dev->phys[port]);
1252 port++; 1255 port++;
@@ -1255,6 +1258,9 @@ static int miphy28lp_probe(struct platform_device *pdev)
1255 1258
1256 provider = devm_of_phy_provider_register(&pdev->dev, miphy28lp_xlate); 1259 provider = devm_of_phy_provider_register(&pdev->dev, miphy28lp_xlate);
1257 return PTR_ERR_OR_ZERO(provider); 1260 return PTR_ERR_OR_ZERO(provider);
1261put_child:
1262 of_node_put(child);
1263 return ret;
1258} 1264}
1259 1265
1260static const struct of_device_id miphy28lp_of_match[] = { 1266static const struct of_device_id miphy28lp_of_match[] = {
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c
index 00a686a073ed..e661f3b36eaa 100644
--- a/drivers/phy/phy-miphy365x.c
+++ b/drivers/phy/phy-miphy365x.c
@@ -566,22 +566,25 @@ static int miphy365x_probe(struct platform_device *pdev)
566 566
567 miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy), 567 miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy),
568 GFP_KERNEL); 568 GFP_KERNEL);
569 if (!miphy_phy) 569 if (!miphy_phy) {
570 return -ENOMEM; 570 ret = -ENOMEM;
571 goto put_child;
572 }
571 573
572 miphy_dev->phys[port] = miphy_phy; 574 miphy_dev->phys[port] = miphy_phy;
573 575
574 phy = devm_phy_create(&pdev->dev, child, &miphy365x_ops); 576 phy = devm_phy_create(&pdev->dev, child, &miphy365x_ops);
575 if (IS_ERR(phy)) { 577 if (IS_ERR(phy)) {
576 dev_err(&pdev->dev, "failed to create PHY\n"); 578 dev_err(&pdev->dev, "failed to create PHY\n");
577 return PTR_ERR(phy); 579 ret = PTR_ERR(phy);
580 goto put_child;
578 } 581 }
579 582
580 miphy_dev->phys[port]->phy = phy; 583 miphy_dev->phys[port]->phy = phy;
581 584
582 ret = miphy365x_of_probe(child, miphy_phy); 585 ret = miphy365x_of_probe(child, miphy_phy);
583 if (ret) 586 if (ret)
584 return ret; 587 goto put_child;
585 588
586 phy_set_drvdata(phy, miphy_dev->phys[port]); 589 phy_set_drvdata(phy, miphy_dev->phys[port]);
587 590
@@ -591,12 +594,15 @@ static int miphy365x_probe(struct platform_device *pdev)
591 &miphy_phy->ctrlreg); 594 &miphy_phy->ctrlreg);
592 if (ret) { 595 if (ret) {
593 dev_err(&pdev->dev, "No sysconfig offset found\n"); 596 dev_err(&pdev->dev, "No sysconfig offset found\n");
594 return ret; 597 goto put_child;
595 } 598 }
596 } 599 }
597 600
598 provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate); 601 provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate);
599 return PTR_ERR_OR_ZERO(provider); 602 return PTR_ERR_OR_ZERO(provider);
603put_child:
604 of_node_put(child);
605 return ret;
600} 606}
601 607
602static const struct of_device_id miphy365x_of_match[] = { 608static const struct of_device_id miphy365x_of_match[] = {
diff --git a/drivers/phy/phy-mt65xx-usb3.c b/drivers/phy/phy-mt65xx-usb3.c
index f30b28bd41fe..e427c3b788ff 100644
--- a/drivers/phy/phy-mt65xx-usb3.c
+++ b/drivers/phy/phy-mt65xx-usb3.c
@@ -415,7 +415,7 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev)
415 struct resource *sif_res; 415 struct resource *sif_res;
416 struct mt65xx_u3phy *u3phy; 416 struct mt65xx_u3phy *u3phy;
417 struct resource res; 417 struct resource res;
418 int port; 418 int port, retval;
419 419
420 u3phy = devm_kzalloc(dev, sizeof(*u3phy), GFP_KERNEL); 420 u3phy = devm_kzalloc(dev, sizeof(*u3phy), GFP_KERNEL);
421 if (!u3phy) 421 if (!u3phy)
@@ -447,31 +447,34 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev)
447 for_each_child_of_node(np, child_np) { 447 for_each_child_of_node(np, child_np) {
448 struct mt65xx_phy_instance *instance; 448 struct mt65xx_phy_instance *instance;
449 struct phy *phy; 449 struct phy *phy;
450 int retval;
451 450
452 instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL); 451 instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL);
453 if (!instance) 452 if (!instance) {
454 return -ENOMEM; 453 retval = -ENOMEM;
454 goto put_child;
455 }
455 456
456 u3phy->phys[port] = instance; 457 u3phy->phys[port] = instance;
457 458
458 phy = devm_phy_create(dev, child_np, &mt65xx_u3phy_ops); 459 phy = devm_phy_create(dev, child_np, &mt65xx_u3phy_ops);
459 if (IS_ERR(phy)) { 460 if (IS_ERR(phy)) {
460 dev_err(dev, "failed to create phy\n"); 461 dev_err(dev, "failed to create phy\n");
461 return PTR_ERR(phy); 462 retval = PTR_ERR(phy);
463 goto put_child;
462 } 464 }
463 465
464 retval = of_address_to_resource(child_np, 0, &res); 466 retval = of_address_to_resource(child_np, 0, &res);
465 if (retval) { 467 if (retval) {
466 dev_err(dev, "failed to get address resource(id-%d)\n", 468 dev_err(dev, "failed to get address resource(id-%d)\n",
467 port); 469 port);
468 return retval; 470 goto put_child;
469 } 471 }
470 472
471 instance->port_base = devm_ioremap_resource(&phy->dev, &res); 473 instance->port_base = devm_ioremap_resource(&phy->dev, &res);
472 if (IS_ERR(instance->port_base)) { 474 if (IS_ERR(instance->port_base)) {
473 dev_err(dev, "failed to remap phy regs\n"); 475 dev_err(dev, "failed to remap phy regs\n");
474 return PTR_ERR(instance->port_base); 476 retval = PTR_ERR(instance->port_base);
477 goto put_child;
475 } 478 }
476 479
477 instance->phy = phy; 480 instance->phy = phy;
@@ -483,6 +486,9 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev)
483 provider = devm_of_phy_provider_register(dev, mt65xx_phy_xlate); 486 provider = devm_of_phy_provider_register(dev, mt65xx_phy_xlate);
484 487
485 return PTR_ERR_OR_ZERO(provider); 488 return PTR_ERR_OR_ZERO(provider);
489put_child:
490 of_node_put(child_np);
491 return retval;
486} 492}
487 493
488static const struct of_device_id mt65xx_u3phy_id_table[] = { 494static const struct of_device_id mt65xx_u3phy_id_table[] = {
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
index 91d6f342c565..62c43c435194 100644
--- a/drivers/phy/phy-rockchip-usb.c
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -108,13 +108,16 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
108 108
109 for_each_available_child_of_node(dev->of_node, child) { 109 for_each_available_child_of_node(dev->of_node, child) {
110 rk_phy = devm_kzalloc(dev, sizeof(*rk_phy), GFP_KERNEL); 110 rk_phy = devm_kzalloc(dev, sizeof(*rk_phy), GFP_KERNEL);
111 if (!rk_phy) 111 if (!rk_phy) {
112 return -ENOMEM; 112 err = -ENOMEM;
113 goto put_child;
114 }
113 115
114 if (of_property_read_u32(child, "reg", &reg_offset)) { 116 if (of_property_read_u32(child, "reg", &reg_offset)) {
115 dev_err(dev, "missing reg property in node %s\n", 117 dev_err(dev, "missing reg property in node %s\n",
116 child->name); 118 child->name);
117 return -EINVAL; 119 err = -EINVAL;
120 goto put_child;
118 } 121 }
119 122
120 rk_phy->reg_offset = reg_offset; 123 rk_phy->reg_offset = reg_offset;
@@ -127,18 +130,22 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
127 rk_phy->phy = devm_phy_create(dev, child, &ops); 130 rk_phy->phy = devm_phy_create(dev, child, &ops);
128 if (IS_ERR(rk_phy->phy)) { 131 if (IS_ERR(rk_phy->phy)) {
129 dev_err(dev, "failed to create PHY\n"); 132 dev_err(dev, "failed to create PHY\n");
130 return PTR_ERR(rk_phy->phy); 133 err = PTR_ERR(rk_phy->phy);
134 goto put_child;
131 } 135 }
132 phy_set_drvdata(rk_phy->phy, rk_phy); 136 phy_set_drvdata(rk_phy->phy, rk_phy);
133 137
134 /* only power up usb phy when it use, so disable it when init*/ 138 /* only power up usb phy when it use, so disable it when init*/
135 err = rockchip_usb_phy_power(rk_phy, 1); 139 err = rockchip_usb_phy_power(rk_phy, 1);
136 if (err) 140 if (err)
137 return err; 141 goto put_child;
138 } 142 }
139 143
140 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); 144 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
141 return PTR_ERR_OR_ZERO(phy_provider); 145 return PTR_ERR_OR_ZERO(phy_provider);
146put_child:
147 of_node_put(child);
148 return err;
142} 149}
143 150
144static const struct of_device_id rockchip_usb_phy_dt_ids[] = { 151static const struct of_device_id rockchip_usb_phy_dt_ids[] = {
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index b422e4ed73f4..312c78b27a32 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -5,8 +5,6 @@
5config PINCTRL 5config PINCTRL
6 bool 6 bool
7 7
8if PINCTRL
9
10menu "Pin controllers" 8menu "Pin controllers"
11 depends on PINCTRL 9 depends on PINCTRL
12 10
@@ -274,5 +272,3 @@ config PINCTRL_TB10X
274 select GPIOLIB 272 select GPIOLIB
275 273
276endmenu 274endmenu
277
278endif
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index a1ea565fcd46..2e6ca69635aa 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -342,12 +342,6 @@ static int bcm2835_gpio_get(struct gpio_chip *chip, unsigned offset)
342 return bcm2835_gpio_get_bit(pc, GPLEV0, offset); 342 return bcm2835_gpio_get_bit(pc, GPLEV0, offset);
343} 343}
344 344
345static int bcm2835_gpio_direction_output(struct gpio_chip *chip,
346 unsigned offset, int value)
347{
348 return pinctrl_gpio_direction_output(chip->base + offset);
349}
350
351static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 345static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
352{ 346{
353 struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev); 347 struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev);
@@ -355,6 +349,13 @@ static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
355 bcm2835_gpio_set_bit(pc, value ? GPSET0 : GPCLR0, offset); 349 bcm2835_gpio_set_bit(pc, value ? GPSET0 : GPCLR0, offset);
356} 350}
357 351
352static int bcm2835_gpio_direction_output(struct gpio_chip *chip,
353 unsigned offset, int value)
354{
355 bcm2835_gpio_set(chip, offset, value);
356 return pinctrl_gpio_direction_output(chip->base + offset);
357}
358
358static int bcm2835_gpio_to_irq(struct gpio_chip *chip, unsigned offset) 359static int bcm2835_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
359{ 360{
360 struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev); 361 struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index 88a7fac11bd4..acaf84cadca3 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -538,8 +538,10 @@ static int imx1_pinctrl_parse_functions(struct device_node *np,
538 func->groups[i] = child->name; 538 func->groups[i] = child->name;
539 grp = &info->groups[grp_index++]; 539 grp = &info->groups[grp_index++];
540 ret = imx1_pinctrl_parse_groups(child, grp, info, i++); 540 ret = imx1_pinctrl_parse_groups(child, grp, info, i++);
541 if (ret == -ENOMEM) 541 if (ret == -ENOMEM) {
542 of_node_put(child);
542 return ret; 543 return ret;
544 }
543 } 545 }
544 546
545 return 0; 547 return 0;
@@ -582,8 +584,10 @@ static int imx1_pinctrl_parse_dt(struct platform_device *pdev,
582 584
583 for_each_child_of_node(np, child) { 585 for_each_child_of_node(np, child) {
584 ret = imx1_pinctrl_parse_functions(child, info, ifunc++); 586 ret = imx1_pinctrl_parse_functions(child, info, ifunc++);
585 if (ret == -ENOMEM) 587 if (ret == -ENOMEM) {
588 of_node_put(child);
586 return -ENOMEM; 589 return -ENOMEM;
590 }
587 } 591 }
588 592
589 return 0; 593 return 0;
diff --git a/drivers/pinctrl/freescale/pinctrl-vf610.c b/drivers/pinctrl/freescale/pinctrl-vf610.c
index 37a037543d29..587d1ff6210e 100644
--- a/drivers/pinctrl/freescale/pinctrl-vf610.c
+++ b/drivers/pinctrl/freescale/pinctrl-vf610.c
@@ -299,7 +299,7 @@ static const struct pinctrl_pin_desc vf610_pinctrl_pads[] = {
299static struct imx_pinctrl_soc_info vf610_pinctrl_info = { 299static struct imx_pinctrl_soc_info vf610_pinctrl_info = {
300 .pins = vf610_pinctrl_pads, 300 .pins = vf610_pinctrl_pads,
301 .npins = ARRAY_SIZE(vf610_pinctrl_pads), 301 .npins = ARRAY_SIZE(vf610_pinctrl_pads),
302 .flags = SHARE_MUX_CONF_REG, 302 .flags = SHARE_MUX_CONF_REG | ZERO_OFFSET_VALID,
303}; 303};
304 304
305static const struct of_device_id vf610_pinctrl_of_match[] = { 305static const struct of_device_id vf610_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
index e42d5d4183f5..5979d38c46b2 100644
--- a/drivers/pinctrl/intel/pinctrl-broxton.c
+++ b/drivers/pinctrl/intel/pinctrl-broxton.c
@@ -28,6 +28,7 @@
28 .padcfglock_offset = BXT_PADCFGLOCK, \ 28 .padcfglock_offset = BXT_PADCFGLOCK, \
29 .hostown_offset = BXT_HOSTSW_OWN, \ 29 .hostown_offset = BXT_HOSTSW_OWN, \
30 .ie_offset = BXT_GPI_IE, \ 30 .ie_offset = BXT_GPI_IE, \
31 .gpp_size = 32, \
31 .pin_base = (s), \ 32 .pin_base = (s), \
32 .npins = ((e) - (s) + 1), \ 33 .npins = ((e) - (s) + 1), \
33 } 34 }
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 392e28d3f48d..26f6b6ffea5b 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -25,9 +25,6 @@
25 25
26#include "pinctrl-intel.h" 26#include "pinctrl-intel.h"
27 27
28/* Maximum number of pads in each group */
29#define NPADS_IN_GPP 24
30
31/* Offset from regs */ 28/* Offset from regs */
32#define PADBAR 0x00c 29#define PADBAR 0x00c
33#define GPI_IS 0x100 30#define GPI_IS 0x100
@@ -37,6 +34,7 @@
37#define PADOWN_BITS 4 34#define PADOWN_BITS 4
38#define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS) 35#define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS)
39#define PADOWN_MASK(p) (0xf << PADOWN_SHIFT(p)) 36#define PADOWN_MASK(p) (0xf << PADOWN_SHIFT(p))
37#define PADOWN_GPP(p) ((p) / 8)
40 38
41/* Offset from pad_regs */ 39/* Offset from pad_regs */
42#define PADCFG0 0x000 40#define PADCFG0 0x000
@@ -142,7 +140,7 @@ static void __iomem *intel_get_padcfg(struct intel_pinctrl *pctrl, unsigned pin,
142static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin) 140static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin)
143{ 141{
144 const struct intel_community *community; 142 const struct intel_community *community;
145 unsigned padno, gpp, gpp_offset, offset; 143 unsigned padno, gpp, offset, group;
146 void __iomem *padown; 144 void __iomem *padown;
147 145
148 community = intel_get_community(pctrl, pin); 146 community = intel_get_community(pctrl, pin);
@@ -152,9 +150,9 @@ static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin)
152 return true; 150 return true;
153 151
154 padno = pin_to_padno(community, pin); 152 padno = pin_to_padno(community, pin);
155 gpp = padno / NPADS_IN_GPP; 153 group = padno / community->gpp_size;
156 gpp_offset = padno % NPADS_IN_GPP; 154 gpp = PADOWN_GPP(padno % community->gpp_size);
157 offset = community->padown_offset + gpp * 16 + (gpp_offset / 8) * 4; 155 offset = community->padown_offset + 0x10 * group + gpp * 4;
158 padown = community->regs + offset; 156 padown = community->regs + offset;
159 157
160 return !(readl(padown) & PADOWN_MASK(padno)); 158 return !(readl(padown) & PADOWN_MASK(padno));
@@ -173,11 +171,11 @@ static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned pin)
173 return false; 171 return false;
174 172
175 padno = pin_to_padno(community, pin); 173 padno = pin_to_padno(community, pin);
176 gpp = padno / NPADS_IN_GPP; 174 gpp = padno / community->gpp_size;
177 offset = community->hostown_offset + gpp * 4; 175 offset = community->hostown_offset + gpp * 4;
178 hostown = community->regs + offset; 176 hostown = community->regs + offset;
179 177
180 return !(readl(hostown) & BIT(padno % NPADS_IN_GPP)); 178 return !(readl(hostown) & BIT(padno % community->gpp_size));
181} 179}
182 180
183static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin) 181static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
@@ -193,7 +191,7 @@ static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
193 return false; 191 return false;
194 192
195 padno = pin_to_padno(community, pin); 193 padno = pin_to_padno(community, pin);
196 gpp = padno / NPADS_IN_GPP; 194 gpp = padno / community->gpp_size;
197 195
198 /* 196 /*
199 * If PADCFGLOCK and PADCFGLOCKTX bits are both clear for this pad, 197 * If PADCFGLOCK and PADCFGLOCKTX bits are both clear for this pad,
@@ -202,12 +200,12 @@ static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
202 */ 200 */
203 offset = community->padcfglock_offset + gpp * 8; 201 offset = community->padcfglock_offset + gpp * 8;
204 value = readl(community->regs + offset); 202 value = readl(community->regs + offset);
205 if (value & BIT(pin % NPADS_IN_GPP)) 203 if (value & BIT(pin % community->gpp_size))
206 return true; 204 return true;
207 205
208 offset = community->padcfglock_offset + 4 + gpp * 8; 206 offset = community->padcfglock_offset + 4 + gpp * 8;
209 value = readl(community->regs + offset); 207 value = readl(community->regs + offset);
210 if (value & BIT(pin % NPADS_IN_GPP)) 208 if (value & BIT(pin % community->gpp_size))
211 return true; 209 return true;
212 210
213 return false; 211 return false;
@@ -663,8 +661,8 @@ static void intel_gpio_irq_ack(struct irq_data *d)
663 community = intel_get_community(pctrl, pin); 661 community = intel_get_community(pctrl, pin);
664 if (community) { 662 if (community) {
665 unsigned padno = pin_to_padno(community, pin); 663 unsigned padno = pin_to_padno(community, pin);
666 unsigned gpp_offset = padno % NPADS_IN_GPP; 664 unsigned gpp_offset = padno % community->gpp_size;
667 unsigned gpp = padno / NPADS_IN_GPP; 665 unsigned gpp = padno / community->gpp_size;
668 666
669 writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4); 667 writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
670 } 668 }
@@ -685,8 +683,8 @@ static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
685 community = intel_get_community(pctrl, pin); 683 community = intel_get_community(pctrl, pin);
686 if (community) { 684 if (community) {
687 unsigned padno = pin_to_padno(community, pin); 685 unsigned padno = pin_to_padno(community, pin);
688 unsigned gpp_offset = padno % NPADS_IN_GPP; 686 unsigned gpp_offset = padno % community->gpp_size;
689 unsigned gpp = padno / NPADS_IN_GPP; 687 unsigned gpp = padno / community->gpp_size;
690 void __iomem *reg; 688 void __iomem *reg;
691 u32 value; 689 u32 value;
692 690
@@ -780,8 +778,8 @@ static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on)
780 return -EINVAL; 778 return -EINVAL;
781 779
782 padno = pin_to_padno(community, pin); 780 padno = pin_to_padno(community, pin);
783 gpp = padno / NPADS_IN_GPP; 781 gpp = padno / community->gpp_size;
784 gpp_offset = padno % NPADS_IN_GPP; 782 gpp_offset = padno % community->gpp_size;
785 783
786 /* Clear the existing wake status */ 784 /* Clear the existing wake status */
787 writel(BIT(gpp_offset), community->regs + GPI_GPE_STS + gpp * 4); 785 writel(BIT(gpp_offset), community->regs + GPI_GPE_STS + gpp * 4);
@@ -819,14 +817,14 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
819 /* Only interrupts that are enabled */ 817 /* Only interrupts that are enabled */
820 pending &= enabled; 818 pending &= enabled;
821 819
822 for_each_set_bit(gpp_offset, &pending, NPADS_IN_GPP) { 820 for_each_set_bit(gpp_offset, &pending, community->gpp_size) {
823 unsigned padno, irq; 821 unsigned padno, irq;
824 822
825 /* 823 /*
826 * The last group in community can have less pins 824 * The last group in community can have less pins
827 * than NPADS_IN_GPP. 825 * than NPADS_IN_GPP.
828 */ 826 */
829 padno = gpp_offset + gpp * NPADS_IN_GPP; 827 padno = gpp_offset + gpp * community->gpp_size;
830 if (padno >= community->npins) 828 if (padno >= community->npins)
831 break; 829 break;
832 830
@@ -1002,7 +1000,8 @@ int intel_pinctrl_probe(struct platform_device *pdev,
1002 1000
1003 community->regs = regs; 1001 community->regs = regs;
1004 community->pad_regs = regs + padbar; 1002 community->pad_regs = regs + padbar;
1005 community->ngpps = DIV_ROUND_UP(community->npins, NPADS_IN_GPP); 1003 community->ngpps = DIV_ROUND_UP(community->npins,
1004 community->gpp_size);
1006 } 1005 }
1007 1006
1008 irq = platform_get_irq(pdev, 0); 1007 irq = platform_get_irq(pdev, 0);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 4ec8b572a288..b60215793017 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -55,6 +55,8 @@ struct intel_function {
55 * ACPI). 55 * ACPI).
56 * @ie_offset: Register offset of GPI_IE from @regs. 56 * @ie_offset: Register offset of GPI_IE from @regs.
57 * @pin_base: Starting pin of pins in this community 57 * @pin_base: Starting pin of pins in this community
58 * @gpp_size: Maximum number of pads in each group, such as PADCFGLOCK,
59 * HOSTSW_OWN, GPI_IS, GPI_IE, etc.
58 * @npins: Number of pins in this community 60 * @npins: Number of pins in this community
59 * @regs: Community specific common registers (reserved for core driver) 61 * @regs: Community specific common registers (reserved for core driver)
60 * @pad_regs: Community specific pad registers (reserved for core driver) 62 * @pad_regs: Community specific pad registers (reserved for core driver)
@@ -68,6 +70,7 @@ struct intel_community {
68 unsigned hostown_offset; 70 unsigned hostown_offset;
69 unsigned ie_offset; 71 unsigned ie_offset;
70 unsigned pin_base; 72 unsigned pin_base;
73 unsigned gpp_size;
71 size_t npins; 74 size_t npins;
72 void __iomem *regs; 75 void __iomem *regs;
73 void __iomem *pad_regs; 76 void __iomem *pad_regs;
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index 1de9ae5010db..c725a5313b4e 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -30,6 +30,7 @@
30 .padcfglock_offset = SPT_PADCFGLOCK, \ 30 .padcfglock_offset = SPT_PADCFGLOCK, \
31 .hostown_offset = SPT_HOSTSW_OWN, \ 31 .hostown_offset = SPT_HOSTSW_OWN, \
32 .ie_offset = SPT_GPI_IE, \ 32 .ie_offset = SPT_GPI_IE, \
33 .gpp_size = 24, \
33 .pin_base = (s), \ 34 .pin_base = (s), \
34 .npins = ((e) - (s) + 1), \ 35 .npins = ((e) - (s) + 1), \
35 } 36 }
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index f307f1d27d64..5c717275a7fa 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -747,7 +747,7 @@ static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
747 reg_addr = mtk_get_port(pctl, offset) + pctl->devdata->dir_offset; 747 reg_addr = mtk_get_port(pctl, offset) + pctl->devdata->dir_offset;
748 bit = BIT(offset & 0xf); 748 bit = BIT(offset & 0xf);
749 regmap_read(pctl->regmap1, reg_addr, &read_val); 749 regmap_read(pctl->regmap1, reg_addr, &read_val);
750 return !!(read_val & bit); 750 return !(read_val & bit);
751} 751}
752 752
753static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset) 753static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -757,12 +757,8 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset)
757 unsigned int read_val = 0; 757 unsigned int read_val = 0;
758 struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev); 758 struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev);
759 759
760 if (mtk_gpio_get_direction(chip, offset)) 760 reg_addr = mtk_get_port(pctl, offset) +
761 reg_addr = mtk_get_port(pctl, offset) + 761 pctl->devdata->din_offset;
762 pctl->devdata->dout_offset;
763 else
764 reg_addr = mtk_get_port(pctl, offset) +
765 pctl->devdata->din_offset;
766 762
767 bit = BIT(offset & 0xf); 763 bit = BIT(offset & 0xf);
768 regmap_read(pctl->regmap1, reg_addr, &read_val); 764 regmap_read(pctl->regmap1, reg_addr, &read_val);
@@ -997,6 +993,7 @@ static struct gpio_chip mtk_gpio_chip = {
997 .owner = THIS_MODULE, 993 .owner = THIS_MODULE,
998 .request = gpiochip_generic_request, 994 .request = gpiochip_generic_request,
999 .free = gpiochip_generic_free, 995 .free = gpiochip_generic_free,
996 .get_direction = mtk_gpio_get_direction,
1000 .direction_input = mtk_gpio_direction_input, 997 .direction_input = mtk_gpio_direction_input,
1001 .direction_output = mtk_gpio_direction_output, 998 .direction_output = mtk_gpio_direction_output,
1002 .get = mtk_gpio_get, 999 .get = mtk_gpio_get,
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index d809c9eaa323..19a3c3bc2f1f 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -672,7 +672,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
672 return -ENOMEM; 672 return -ENOMEM;
673 673
674 pctrl->dev = &pdev->dev; 674 pctrl->dev = &pdev->dev;
675 pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev); 675 pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev);
676 676
677 pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL); 677 pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
678 if (!pctrl->regmap) { 678 if (!pctrl->regmap) {
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 8982027de8e8..b868ef1766a0 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -763,7 +763,7 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev)
763 return -ENOMEM; 763 return -ENOMEM;
764 764
765 pctrl->dev = &pdev->dev; 765 pctrl->dev = &pdev->dev;
766 pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev); 766 pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev);
767 767
768 pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL); 768 pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
769 if (!pctrl->regmap) { 769 if (!pctrl->regmap) {
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
index e7deb51de7dc..9842bb106796 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
@@ -31,11 +31,11 @@
31 PORT_GP_12(5, fn, sfx) 31 PORT_GP_12(5, fn, sfx)
32 32
33#undef _GP_DATA 33#undef _GP_DATA
34#define _GP_DATA(bank, pin, name, sfx) \ 34#define _GP_DATA(bank, pin, name, sfx, cfg) \
35 PINMUX_DATA(name##_DATA, name##_FN, name##_IN, name##_OUT) 35 PINMUX_DATA(name##_DATA, name##_FN, name##_IN, name##_OUT)
36 36
37#define _GP_INOUTSEL(bank, pin, name, sfx) name##_IN, name##_OUT 37#define _GP_INOUTSEL(bank, pin, name, sfx, cfg) name##_IN, name##_OUT
38#define _GP_INDT(bank, pin, name, sfx) name##_DATA 38#define _GP_INDT(bank, pin, name, sfx, cfg) name##_DATA
39#define GP_INOUTSEL(bank) PORT_GP_32_REV(bank, _GP_INOUTSEL, unused) 39#define GP_INOUTSEL(bank) PORT_GP_32_REV(bank, _GP_INOUTSEL, unused)
40#define GP_INDT(bank) PORT_GP_32_REV(bank, _GP_INDT, unused) 40#define GP_INDT(bank) PORT_GP_32_REV(bank, _GP_INDT, unused)
41 41
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index cc97f0869791..48747c28a43d 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1341,10 +1341,13 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
1341 1341
1342 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { 1342 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
1343 /* check if the domain is locked by BIOS */ 1343 /* check if the domain is locked by BIOS */
1344 if (rapl_read_data_raw(rd, FW_LOCK, false, &locked)) { 1344 ret = rapl_read_data_raw(rd, FW_LOCK, false, &locked);
1345 if (ret)
1346 return ret;
1347 if (locked) {
1345 pr_info("RAPL package %d domain %s locked by BIOS\n", 1348 pr_info("RAPL package %d domain %s locked by BIOS\n",
1346 rp->id, rd->name); 1349 rp->id, rd->name);
1347 rd->state |= DOMAIN_STATE_BIOS_LOCKED; 1350 rd->state |= DOMAIN_STATE_BIOS_LOCKED;
1348 } 1351 }
1349 } 1352 }
1350 1353
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 8b3130f22b42..9e03d158f411 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -1478,6 +1478,8 @@ module_init(remoteproc_init);
1478 1478
1479static void __exit remoteproc_exit(void) 1479static void __exit remoteproc_exit(void)
1480{ 1480{
1481 ida_destroy(&rproc_dev_index);
1482
1481 rproc_exit_debugfs(); 1483 rproc_exit_debugfs();
1482} 1484}
1483module_exit(remoteproc_exit); 1485module_exit(remoteproc_exit);
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index 9d30809bb407..916af5096f57 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -156,7 +156,7 @@ rproc_recovery_write(struct file *filp, const char __user *user_buf,
156 char buf[10]; 156 char buf[10];
157 int ret; 157 int ret;
158 158
159 if (count > sizeof(buf)) 159 if (count < 1 || count > sizeof(buf))
160 return count; 160 return count;
161 161
162 ret = copy_from_user(buf, user_buf, count); 162 ret = copy_from_user(buf, user_buf, count);
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index 284b587da65c..d6c853bbfa9f 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -483,24 +483,23 @@ static int da9063_rtc_probe(struct platform_device *pdev)
483 483
484 platform_set_drvdata(pdev, rtc); 484 platform_set_drvdata(pdev, rtc);
485 485
486 rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, DA9063_DRVNAME_RTC,
487 &da9063_rtc_ops, THIS_MODULE);
488 if (IS_ERR(rtc->rtc_dev))
489 return PTR_ERR(rtc->rtc_dev);
490
491 da9063_data_to_tm(data, &rtc->alarm_time, rtc);
492 rtc->rtc_sync = false;
493
486 irq_alarm = platform_get_irq_byname(pdev, "ALARM"); 494 irq_alarm = platform_get_irq_byname(pdev, "ALARM");
487 ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, 495 ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
488 da9063_alarm_event, 496 da9063_alarm_event,
489 IRQF_TRIGGER_LOW | IRQF_ONESHOT, 497 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
490 "ALARM", rtc); 498 "ALARM", rtc);
491 if (ret) { 499 if (ret)
492 dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n", 500 dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n",
493 irq_alarm, ret); 501 irq_alarm, ret);
494 return ret;
495 }
496
497 rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, DA9063_DRVNAME_RTC,
498 &da9063_rtc_ops, THIS_MODULE);
499 if (IS_ERR(rtc->rtc_dev))
500 return PTR_ERR(rtc->rtc_dev);
501 502
502 da9063_data_to_tm(data, &rtc->alarm_time, rtc);
503 rtc->rtc_sync = false;
504 return ret; 503 return ret;
505} 504}
506 505
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 188006c55ce0..aa705bb4748c 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -15,9 +15,6 @@
15#include <linux/i2c.h> 15#include <linux/i2c.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/of_device.h>
19#include <linux/of_irq.h>
20#include <linux/pm_wakeirq.h>
21#include <linux/rtc/ds1307.h> 18#include <linux/rtc/ds1307.h>
22#include <linux/rtc.h> 19#include <linux/rtc.h>
23#include <linux/slab.h> 20#include <linux/slab.h>
@@ -117,7 +114,6 @@ struct ds1307 {
117#define HAS_ALARM 1 /* bit 1 == irq claimed */ 114#define HAS_ALARM 1 /* bit 1 == irq claimed */
118 struct i2c_client *client; 115 struct i2c_client *client;
119 struct rtc_device *rtc; 116 struct rtc_device *rtc;
120 int wakeirq;
121 s32 (*read_block_data)(const struct i2c_client *client, u8 command, 117 s32 (*read_block_data)(const struct i2c_client *client, u8 command,
122 u8 length, u8 *values); 118 u8 length, u8 *values);
123 s32 (*write_block_data)(const struct i2c_client *client, u8 command, 119 s32 (*write_block_data)(const struct i2c_client *client, u8 command,
@@ -1138,7 +1134,10 @@ read_rtc:
1138 bin2bcd(tmp)); 1134 bin2bcd(tmp));
1139 } 1135 }
1140 1136
1141 device_set_wakeup_capable(&client->dev, want_irq); 1137 if (want_irq) {
1138 device_set_wakeup_capable(&client->dev, true);
1139 set_bit(HAS_ALARM, &ds1307->flags);
1140 }
1142 ds1307->rtc = devm_rtc_device_register(&client->dev, client->name, 1141 ds1307->rtc = devm_rtc_device_register(&client->dev, client->name,
1143 rtc_ops, THIS_MODULE); 1142 rtc_ops, THIS_MODULE);
1144 if (IS_ERR(ds1307->rtc)) { 1143 if (IS_ERR(ds1307->rtc)) {
@@ -1146,43 +1145,19 @@ read_rtc:
1146 } 1145 }
1147 1146
1148 if (want_irq) { 1147 if (want_irq) {
1149 struct device_node *node = client->dev.of_node;
1150
1151 err = devm_request_threaded_irq(&client->dev, 1148 err = devm_request_threaded_irq(&client->dev,
1152 client->irq, NULL, irq_handler, 1149 client->irq, NULL, irq_handler,
1153 IRQF_SHARED | IRQF_ONESHOT, 1150 IRQF_SHARED | IRQF_ONESHOT,
1154 ds1307->rtc->name, client); 1151 ds1307->rtc->name, client);
1155 if (err) { 1152 if (err) {
1156 client->irq = 0; 1153 client->irq = 0;
1154 device_set_wakeup_capable(&client->dev, false);
1155 clear_bit(HAS_ALARM, &ds1307->flags);
1157 dev_err(&client->dev, "unable to request IRQ!\n"); 1156 dev_err(&client->dev, "unable to request IRQ!\n");
1158 goto no_irq; 1157 } else
1159 } 1158 dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
1160
1161 set_bit(HAS_ALARM, &ds1307->flags);
1162 dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
1163
1164 /* Currently supported by OF code only! */
1165 if (!node)
1166 goto no_irq;
1167
1168 err = of_irq_get(node, 1);
1169 if (err <= 0) {
1170 if (err == -EPROBE_DEFER)
1171 goto exit;
1172 goto no_irq;
1173 }
1174 ds1307->wakeirq = err;
1175
1176 err = dev_pm_set_dedicated_wake_irq(&client->dev,
1177 ds1307->wakeirq);
1178 if (err) {
1179 dev_err(&client->dev, "unable to setup wakeIRQ %d!\n",
1180 err);
1181 goto exit;
1182 }
1183 } 1159 }
1184 1160
1185no_irq:
1186 if (chip->nvram_size) { 1161 if (chip->nvram_size) {
1187 1162
1188 ds1307->nvram = devm_kzalloc(&client->dev, 1163 ds1307->nvram = devm_kzalloc(&client->dev,
@@ -1226,9 +1201,6 @@ static int ds1307_remove(struct i2c_client *client)
1226{ 1201{
1227 struct ds1307 *ds1307 = i2c_get_clientdata(client); 1202 struct ds1307 *ds1307 = i2c_get_clientdata(client);
1228 1203
1229 if (ds1307->wakeirq)
1230 dev_pm_clear_wake_irq(&client->dev);
1231
1232 if (test_and_clear_bit(HAS_NVRAM, &ds1307->flags)) 1204 if (test_and_clear_bit(HAS_NVRAM, &ds1307->flags))
1233 sysfs_remove_bin_file(&client->dev.kobj, ds1307->nvram); 1205 sysfs_remove_bin_file(&client->dev.kobj, ds1307->nvram);
1234 1206
diff --git a/drivers/rtc/rtc-rk808.c b/drivers/rtc/rtc-rk808.c
index 91ca0bc1b484..35c9aada07c8 100644
--- a/drivers/rtc/rtc-rk808.c
+++ b/drivers/rtc/rtc-rk808.c
@@ -56,6 +56,42 @@ struct rk808_rtc {
56 int irq; 56 int irq;
57}; 57};
58 58
59/*
60 * The Rockchip calendar used by the RK808 counts November with 31 days. We use
61 * these translation functions to convert its dates to/from the Gregorian
62 * calendar used by the rest of the world. We arbitrarily define Jan 1st, 2016
63 * as the day when both calendars were in sync, and treat all other dates
64 * relative to that.
65 * NOTE: Other system software (e.g. firmware) that reads the same hardware must
66 * implement this exact same conversion algorithm, with the same anchor date.
67 */
68static time64_t nov2dec_transitions(struct rtc_time *tm)
69{
70 return (tm->tm_year + 1900) - 2016 + (tm->tm_mon + 1 > 11 ? 1 : 0);
71}
72
73static void rockchip_to_gregorian(struct rtc_time *tm)
74{
75 /* If it's Nov 31st, rtc_tm_to_time64() will count that like Dec 1st */
76 time64_t time = rtc_tm_to_time64(tm);
77 rtc_time64_to_tm(time + nov2dec_transitions(tm) * 86400, tm);
78}
79
80static void gregorian_to_rockchip(struct rtc_time *tm)
81{
82 time64_t extra_days = nov2dec_transitions(tm);
83 time64_t time = rtc_tm_to_time64(tm);
84 rtc_time64_to_tm(time - extra_days * 86400, tm);
85
86 /* Compensate if we went back over Nov 31st (will work up to 2381) */
87 if (nov2dec_transitions(tm) < extra_days) {
88 if (tm->tm_mon + 1 == 11)
89 tm->tm_mday++; /* This may result in 31! */
90 else
91 rtc_time64_to_tm(time - (extra_days - 1) * 86400, tm);
92 }
93}
94
59/* Read current time and date in RTC */ 95/* Read current time and date in RTC */
60static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm) 96static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm)
61{ 97{
@@ -101,9 +137,10 @@ static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm)
101 tm->tm_mon = (bcd2bin(rtc_data[4] & MONTHS_REG_MSK)) - 1; 137 tm->tm_mon = (bcd2bin(rtc_data[4] & MONTHS_REG_MSK)) - 1;
102 tm->tm_year = (bcd2bin(rtc_data[5] & YEARS_REG_MSK)) + 100; 138 tm->tm_year = (bcd2bin(rtc_data[5] & YEARS_REG_MSK)) + 100;
103 tm->tm_wday = bcd2bin(rtc_data[6] & WEEKS_REG_MSK); 139 tm->tm_wday = bcd2bin(rtc_data[6] & WEEKS_REG_MSK);
140 rockchip_to_gregorian(tm);
104 dev_dbg(dev, "RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n", 141 dev_dbg(dev, "RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n",
105 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, 142 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
106 tm->tm_wday, tm->tm_hour , tm->tm_min, tm->tm_sec); 143 tm->tm_wday, tm->tm_hour, tm->tm_min, tm->tm_sec);
107 144
108 return ret; 145 return ret;
109} 146}
@@ -116,6 +153,10 @@ static int rk808_rtc_set_time(struct device *dev, struct rtc_time *tm)
116 u8 rtc_data[NUM_TIME_REGS]; 153 u8 rtc_data[NUM_TIME_REGS];
117 int ret; 154 int ret;
118 155
156 dev_dbg(dev, "set RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n",
157 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
158 tm->tm_wday, tm->tm_hour, tm->tm_min, tm->tm_sec);
159 gregorian_to_rockchip(tm);
119 rtc_data[0] = bin2bcd(tm->tm_sec); 160 rtc_data[0] = bin2bcd(tm->tm_sec);
120 rtc_data[1] = bin2bcd(tm->tm_min); 161 rtc_data[1] = bin2bcd(tm->tm_min);
121 rtc_data[2] = bin2bcd(tm->tm_hour); 162 rtc_data[2] = bin2bcd(tm->tm_hour);
@@ -123,9 +164,6 @@ static int rk808_rtc_set_time(struct device *dev, struct rtc_time *tm)
123 rtc_data[4] = bin2bcd(tm->tm_mon + 1); 164 rtc_data[4] = bin2bcd(tm->tm_mon + 1);
124 rtc_data[5] = bin2bcd(tm->tm_year - 100); 165 rtc_data[5] = bin2bcd(tm->tm_year - 100);
125 rtc_data[6] = bin2bcd(tm->tm_wday); 166 rtc_data[6] = bin2bcd(tm->tm_wday);
126 dev_dbg(dev, "set RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n",
127 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
128 tm->tm_wday, tm->tm_hour , tm->tm_min, tm->tm_sec);
129 167
130 /* Stop RTC while updating the RTC registers */ 168 /* Stop RTC while updating the RTC registers */
131 ret = regmap_update_bits(rk808->regmap, RK808_RTC_CTRL_REG, 169 ret = regmap_update_bits(rk808->regmap, RK808_RTC_CTRL_REG,
@@ -170,6 +208,7 @@ static int rk808_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
170 alrm->time.tm_mday = bcd2bin(alrm_data[3] & DAYS_REG_MSK); 208 alrm->time.tm_mday = bcd2bin(alrm_data[3] & DAYS_REG_MSK);
171 alrm->time.tm_mon = (bcd2bin(alrm_data[4] & MONTHS_REG_MSK)) - 1; 209 alrm->time.tm_mon = (bcd2bin(alrm_data[4] & MONTHS_REG_MSK)) - 1;
172 alrm->time.tm_year = (bcd2bin(alrm_data[5] & YEARS_REG_MSK)) + 100; 210 alrm->time.tm_year = (bcd2bin(alrm_data[5] & YEARS_REG_MSK)) + 100;
211 rockchip_to_gregorian(&alrm->time);
173 212
174 ret = regmap_read(rk808->regmap, RK808_RTC_INT_REG, &int_reg); 213 ret = regmap_read(rk808->regmap, RK808_RTC_INT_REG, &int_reg);
175 if (ret) { 214 if (ret) {
@@ -227,6 +266,7 @@ static int rk808_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
227 alrm->time.tm_mday, alrm->time.tm_wday, alrm->time.tm_hour, 266 alrm->time.tm_mday, alrm->time.tm_wday, alrm->time.tm_hour,
228 alrm->time.tm_min, alrm->time.tm_sec); 267 alrm->time.tm_min, alrm->time.tm_sec);
229 268
269 gregorian_to_rockchip(&alrm->time);
230 alrm_data[0] = bin2bcd(alrm->time.tm_sec); 270 alrm_data[0] = bin2bcd(alrm->time.tm_sec);
231 alrm_data[1] = bin2bcd(alrm->time.tm_min); 271 alrm_data[1] = bin2bcd(alrm->time.tm_min);
232 alrm_data[2] = bin2bcd(alrm->time.tm_hour); 272 alrm_data[2] = bin2bcd(alrm->time.tm_hour);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 61f768518a34..24ec282e15d8 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -599,8 +599,10 @@ static enum ap_wait ap_sm_read(struct ap_device *ap_dev)
599 status = ap_sm_recv(ap_dev); 599 status = ap_sm_recv(ap_dev);
600 switch (status.response_code) { 600 switch (status.response_code) {
601 case AP_RESPONSE_NORMAL: 601 case AP_RESPONSE_NORMAL:
602 if (ap_dev->queue_count > 0) 602 if (ap_dev->queue_count > 0) {
603 ap_dev->state = AP_STATE_WORKING;
603 return AP_WAIT_AGAIN; 604 return AP_WAIT_AGAIN;
605 }
604 ap_dev->state = AP_STATE_IDLE; 606 ap_dev->state = AP_STATE_IDLE;
605 return AP_WAIT_NONE; 607 return AP_WAIT_NONE;
606 case AP_RESPONSE_NO_PENDING_REPLY: 608 case AP_RESPONSE_NO_PENDING_REPLY:
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index b2a1a81e6fc8..1b831598df7c 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -984,6 +984,36 @@ static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
984 return vq; 984 return vq;
985} 985}
986 986
987static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev,
988 __u32 activity)
989{
990 if (vcdev->curr_io & activity) {
991 switch (activity) {
992 case VIRTIO_CCW_DOING_READ_FEAT:
993 case VIRTIO_CCW_DOING_WRITE_FEAT:
994 case VIRTIO_CCW_DOING_READ_CONFIG:
995 case VIRTIO_CCW_DOING_WRITE_CONFIG:
996 case VIRTIO_CCW_DOING_WRITE_STATUS:
997 case VIRTIO_CCW_DOING_SET_VQ:
998 case VIRTIO_CCW_DOING_SET_IND:
999 case VIRTIO_CCW_DOING_SET_CONF_IND:
1000 case VIRTIO_CCW_DOING_RESET:
1001 case VIRTIO_CCW_DOING_READ_VQ_CONF:
1002 case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
1003 case VIRTIO_CCW_DOING_SET_VIRTIO_REV:
1004 vcdev->curr_io &= ~activity;
1005 wake_up(&vcdev->wait_q);
1006 break;
1007 default:
1008 /* don't know what to do... */
1009 dev_warn(&vcdev->cdev->dev,
1010 "Suspicious activity '%08x'\n", activity);
1011 WARN_ON(1);
1012 break;
1013 }
1014 }
1015}
1016
987static void virtio_ccw_int_handler(struct ccw_device *cdev, 1017static void virtio_ccw_int_handler(struct ccw_device *cdev,
988 unsigned long intparm, 1018 unsigned long intparm,
989 struct irb *irb) 1019 struct irb *irb)
@@ -995,6 +1025,12 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
995 1025
996 if (!vcdev) 1026 if (!vcdev)
997 return; 1027 return;
1028 if (IS_ERR(irb)) {
1029 vcdev->err = PTR_ERR(irb);
1030 virtio_ccw_check_activity(vcdev, activity);
1031 /* Don't poke around indicators, something's wrong. */
1032 return;
1033 }
998 /* Check if it's a notification from the host. */ 1034 /* Check if it's a notification from the host. */
999 if ((intparm == 0) && 1035 if ((intparm == 0) &&
1000 (scsw_stctl(&irb->scsw) == 1036 (scsw_stctl(&irb->scsw) ==
@@ -1010,31 +1046,7 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
1010 /* Map everything else to -EIO. */ 1046 /* Map everything else to -EIO. */
1011 vcdev->err = -EIO; 1047 vcdev->err = -EIO;
1012 } 1048 }
1013 if (vcdev->curr_io & activity) { 1049 virtio_ccw_check_activity(vcdev, activity);
1014 switch (activity) {
1015 case VIRTIO_CCW_DOING_READ_FEAT:
1016 case VIRTIO_CCW_DOING_WRITE_FEAT:
1017 case VIRTIO_CCW_DOING_READ_CONFIG:
1018 case VIRTIO_CCW_DOING_WRITE_CONFIG:
1019 case VIRTIO_CCW_DOING_WRITE_STATUS:
1020 case VIRTIO_CCW_DOING_SET_VQ:
1021 case VIRTIO_CCW_DOING_SET_IND:
1022 case VIRTIO_CCW_DOING_SET_CONF_IND:
1023 case VIRTIO_CCW_DOING_RESET:
1024 case VIRTIO_CCW_DOING_READ_VQ_CONF:
1025 case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
1026 case VIRTIO_CCW_DOING_SET_VIRTIO_REV:
1027 vcdev->curr_io &= ~activity;
1028 wake_up(&vcdev->wait_q);
1029 break;
1030 default:
1031 /* don't know what to do... */
1032 dev_warn(&cdev->dev, "Suspicious activity '%08x'\n",
1033 activity);
1034 WARN_ON(1);
1035 break;
1036 }
1037 }
1038 for_each_set_bit(i, &vcdev->indicators, 1050 for_each_set_bit(i, &vcdev->indicators,
1039 sizeof(vcdev->indicators) * BITS_PER_BYTE) { 1051 sizeof(vcdev->indicators) * BITS_PER_BYTE) {
1040 /* The bit clear must happen before the vring kick. */ 1052 /* The bit clear must happen before the vring kick. */
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 5f692ae40749..64eed87d34a8 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -364,6 +364,7 @@ config SCSI_HPSA
364 tristate "HP Smart Array SCSI driver" 364 tristate "HP Smart Array SCSI driver"
365 depends on PCI && SCSI 365 depends on PCI && SCSI
366 select CHECK_SIGNATURE 366 select CHECK_SIGNATURE
367 select SCSI_SAS_ATTRS
367 help 368 help
368 This driver supports HP Smart Array Controllers (circa 2009). 369 This driver supports HP Smart Array Controllers (circa 2009).
369 It is a SCSI alternative to the cciss driver, which is a block 370 It is a SCSI alternative to the cciss driver, which is a block
@@ -499,6 +500,7 @@ config SCSI_ADVANSYS
499 tristate "AdvanSys SCSI support" 500 tristate "AdvanSys SCSI support"
500 depends on SCSI 501 depends on SCSI
501 depends on ISA || EISA || PCI 502 depends on ISA || EISA || PCI
503 depends on ISA_DMA_API || !ISA
502 help 504 help
503 This is a driver for all SCSI host adapters manufactured by 505 This is a driver for all SCSI host adapters manufactured by
504 AdvanSys. It is documented in the kernel source in 506 AdvanSys. It is documented in the kernel source in
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 519f9a4b3dad..febbd83e2ecd 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -7803,7 +7803,7 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
7803 return ASC_BUSY; 7803 return ASC_BUSY;
7804 } 7804 }
7805 scsiqp->sense_addr = cpu_to_le32(sense_addr); 7805 scsiqp->sense_addr = cpu_to_le32(sense_addr);
7806 scsiqp->sense_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE); 7806 scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE;
7807 7807
7808 /* Build ADV_SCSI_REQ_Q */ 7808 /* Build ADV_SCSI_REQ_Q */
7809 7809
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 323982fd00c3..82ac1cd818ac 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -333,6 +333,17 @@ static void scsi_host_dev_release(struct device *dev)
333 kfree(queuedata); 333 kfree(queuedata);
334 } 334 }
335 335
336 if (shost->shost_state == SHOST_CREATED) {
337 /*
338 * Free the shost_dev device name here if scsi_host_alloc()
339 * and scsi_host_put() have been called but neither
340 * scsi_host_add() nor scsi_host_remove() has been called.
341 * This avoids that the memory allocated for the shost_dev
342 * name is leaked.
343 */
344 kfree(dev_name(&shost->shost_dev));
345 }
346
336 scsi_destroy_command_freelist(shost); 347 scsi_destroy_command_freelist(shost);
337 if (shost_use_blk_mq(shost)) { 348 if (shost_use_blk_mq(shost)) {
338 if (shost->tag_set.tags) 349 if (shost->tag_set.tags)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 6a8f95808ee0..a3860367b568 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -8671,7 +8671,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
8671 if ((rc != 0) || (c->err_info->CommandStatus != 0)) 8671 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8672 goto errout; 8672 goto errout;
8673 8673
8674 if (*options && HPSA_DIAG_OPTS_DISABLE_RLD_CACHING) 8674 if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
8675 goto out; 8675 goto out;
8676 8676
8677errout: 8677errout:
diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig
index 29061467cc17..b736dbc80485 100644
--- a/drivers/scsi/mpt3sas/Kconfig
+++ b/drivers/scsi/mpt3sas/Kconfig
@@ -71,3 +71,12 @@ config SCSI_MPT3SAS_MAX_SGE
71 MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this 71 MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this
72 can be 256. However, it may decreased down to 16. Decreasing this 72 can be 256. However, it may decreased down to 16. Decreasing this
73 parameter will reduce memory requirements on a per controller instance. 73 parameter will reduce memory requirements on a per controller instance.
74
75config SCSI_MPT2SAS
76 tristate "Legacy MPT2SAS config option"
77 default n
78 select SCSI_MPT3SAS
79 depends on PCI && SCSI
80 ---help---
81 Dummy config option for backwards compatiblity: configure the MPT3SAS
82 driver instead.
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index d95206b7e116..9ab77b06434d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3905,8 +3905,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
3905 * We do not expose raid functionality to upper layer for warpdrive. 3905 * We do not expose raid functionality to upper layer for warpdrive.
3906 */ 3906 */
3907 if (!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev) 3907 if (!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev)
3908 && (sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) && 3908 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
3909 scmd->cmd_len != 32)
3910 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; 3909 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
3911 3910
3912 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); 3911 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 90fdf0e859e3..675e7fab0796 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -758,7 +758,7 @@ mvs_store_interrupt_coalescing(struct device *cdev,
758 struct device_attribute *attr, 758 struct device_attribute *attr,
759 const char *buffer, size_t size) 759 const char *buffer, size_t size)
760{ 760{
761 int val = 0; 761 unsigned int val = 0;
762 struct mvs_info *mvi = NULL; 762 struct mvs_info *mvi = NULL;
763 struct Scsi_Host *shost = class_to_shost(cdev); 763 struct Scsi_Host *shost = class_to_shost(cdev);
764 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 764 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
@@ -766,7 +766,7 @@ mvs_store_interrupt_coalescing(struct device *cdev,
766 if (buffer == NULL) 766 if (buffer == NULL)
767 return size; 767 return size;
768 768
769 if (sscanf(buffer, "%d", &val) != 1) 769 if (sscanf(buffer, "%u", &val) != 1)
770 return -EINVAL; 770 return -EINVAL;
771 771
772 if (val >= 0x10000) { 772 if (val >= 0x10000) {
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index eb0cc5475c45..b6b4cfdd7620 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -433,7 +433,7 @@ qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
433 if (off_in < QLA82XX_PCI_CRBSPACE) 433 if (off_in < QLA82XX_PCI_CRBSPACE)
434 return -1; 434 return -1;
435 435
436 *off_out = (void __iomem *)(off_in - QLA82XX_PCI_CRBSPACE); 436 off_in -= QLA82XX_PCI_CRBSPACE;
437 437
438 /* Try direct map */ 438 /* Try direct map */
439 m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)]; 439 m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)];
@@ -443,6 +443,7 @@ qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
443 return 0; 443 return 0;
444 } 444 }
445 /* Not in direct map, use crb window */ 445 /* Not in direct map, use crb window */
446 *off_out = (void __iomem *)off_in;
446 return 1; 447 return 1;
447} 448}
448 449
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 3ba2e9564b9a..81af294f15a7 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -902,7 +902,7 @@ static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item,
902 return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type); 902 return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type);
903} 903}
904 904
905CONFIGFS_ATTR_WO(tcm_qla2xxx_tpg_, enable); 905CONFIGFS_ATTR(tcm_qla2xxx_tpg_, enable);
906CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions); 906CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions);
907CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type); 907CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type);
908 908
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index dfcc45bb03b1..d09d60293c27 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -465,8 +465,9 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
465 0} }, 465 0} },
466 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */ 466 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
467 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 467 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
468 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */ 468 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
469 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 469 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
470 0, 0, 0, 0, 0, 0} },
470 {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0, 471 {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
471 vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0, 472 vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
472 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */ 473 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
@@ -477,8 +478,8 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
477 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 478 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
478 0} }, 479 0} },
479/* 20 */ 480/* 20 */
480 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */ 481 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
481 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 482 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
482 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */ 483 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
483 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 484 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
484 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */ 485 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index e4b799837948..459abe1dcc87 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -219,13 +219,13 @@ static int sdev_runtime_suspend(struct device *dev)
219 struct scsi_device *sdev = to_scsi_device(dev); 219 struct scsi_device *sdev = to_scsi_device(dev);
220 int err = 0; 220 int err = 0;
221 221
222 if (pm && pm->runtime_suspend) { 222 err = blk_pre_runtime_suspend(sdev->request_queue);
223 err = blk_pre_runtime_suspend(sdev->request_queue); 223 if (err)
224 if (err) 224 return err;
225 return err; 225 if (pm && pm->runtime_suspend)
226 err = pm->runtime_suspend(dev); 226 err = pm->runtime_suspend(dev);
227 blk_post_runtime_suspend(sdev->request_queue, err); 227 blk_post_runtime_suspend(sdev->request_queue, err);
228 } 228
229 return err; 229 return err;
230} 230}
231 231
@@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev)
248 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 248 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
249 int err = 0; 249 int err = 0;
250 250
251 if (pm && pm->runtime_resume) { 251 blk_pre_runtime_resume(sdev->request_queue);
252 blk_pre_runtime_resume(sdev->request_queue); 252 if (pm && pm->runtime_resume)
253 err = pm->runtime_resume(dev); 253 err = pm->runtime_resume(dev);
254 blk_post_runtime_resume(sdev->request_queue, err); 254 blk_post_runtime_resume(sdev->request_queue, err);
255 } 255
256 return err; 256 return err;
257} 257}
258 258
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 83245391e956..054923e3393c 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -701,9 +701,12 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
701 * strings. 701 * strings.
702 */ 702 */
703 if (sdev->inquiry_len < 36) { 703 if (sdev->inquiry_len < 36) {
704 sdev_printk(KERN_INFO, sdev, 704 if (!sdev->host->short_inquiry) {
705 "scsi scan: INQUIRY result too short (%d)," 705 shost_printk(KERN_INFO, sdev->host,
706 " using 36\n", sdev->inquiry_len); 706 "scsi scan: INQUIRY result too short (%d),"
707 " using 36\n", sdev->inquiry_len);
708 sdev->host->short_inquiry = 1;
709 }
707 sdev->inquiry_len = 36; 710 sdev->inquiry_len = 36;
708 } 711 }
709 712
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 8d2312239ae0..21930c9ac9cd 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1102,6 +1102,14 @@ void __scsi_remove_device(struct scsi_device *sdev)
1102{ 1102{
1103 struct device *dev = &sdev->sdev_gendev; 1103 struct device *dev = &sdev->sdev_gendev;
1104 1104
1105 /*
1106 * This cleanup path is not reentrant and while it is impossible
1107 * to get a new reference with scsi_device_get() someone can still
1108 * hold a previously acquired one.
1109 */
1110 if (sdev->sdev_state == SDEV_DEL)
1111 return;
1112
1105 if (sdev->is_visible) { 1113 if (sdev->is_visible) {
1106 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0) 1114 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
1107 return; 1115 return;
@@ -1110,7 +1118,9 @@ void __scsi_remove_device(struct scsi_device *sdev)
1110 device_unregister(&sdev->sdev_dev); 1118 device_unregister(&sdev->sdev_dev);
1111 transport_remove_device(dev); 1119 transport_remove_device(dev);
1112 scsi_dh_remove_device(sdev); 1120 scsi_dh_remove_device(sdev);
1113 } 1121 device_del(dev);
1122 } else
1123 put_device(&sdev->sdev_dev);
1114 1124
1115 /* 1125 /*
1116 * Stop accepting new requests and wait until all queuecommand() and 1126 * Stop accepting new requests and wait until all queuecommand() and
@@ -1121,16 +1131,6 @@ void __scsi_remove_device(struct scsi_device *sdev)
1121 blk_cleanup_queue(sdev->request_queue); 1131 blk_cleanup_queue(sdev->request_queue);
1122 cancel_work_sync(&sdev->requeue_work); 1132 cancel_work_sync(&sdev->requeue_work);
1123 1133
1124 /*
1125 * Remove the device after blk_cleanup_queue() has been called such
1126 * a possible bdi_register() call with the same name occurs after
1127 * blk_cleanup_queue() has called bdi_destroy().
1128 */
1129 if (sdev->is_visible)
1130 device_del(dev);
1131 else
1132 put_device(&sdev->sdev_dev);
1133
1134 if (sdev->host->hostt->slave_destroy) 1134 if (sdev->host->hostt->slave_destroy)
1135 sdev->host->hostt->slave_destroy(sdev); 1135 sdev->host->hostt->slave_destroy(sdev);
1136 transport_destroy_device(dev); 1136 transport_destroy_device(dev);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 54519804c46a..3d22fc3e3c1a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -638,11 +638,24 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
638 unsigned int max_blocks = 0; 638 unsigned int max_blocks = 0;
639 639
640 q->limits.discard_zeroes_data = 0; 640 q->limits.discard_zeroes_data = 0;
641 q->limits.discard_alignment = sdkp->unmap_alignment * 641
642 logical_block_size; 642 /*
643 q->limits.discard_granularity = 643 * When LBPRZ is reported, discard alignment and granularity
644 max(sdkp->physical_block_size, 644 * must be fixed to the logical block size. Otherwise the block
645 sdkp->unmap_granularity * logical_block_size); 645 * layer will drop misaligned portions of the request which can
646 * lead to data corruption. If LBPRZ is not set, we honor the
647 * device preference.
648 */
649 if (sdkp->lbprz) {
650 q->limits.discard_alignment = 0;
651 q->limits.discard_granularity = 1;
652 } else {
653 q->limits.discard_alignment = sdkp->unmap_alignment *
654 logical_block_size;
655 q->limits.discard_granularity =
656 max(sdkp->physical_block_size,
657 sdkp->unmap_granularity * logical_block_size);
658 }
646 659
647 sdkp->provisioning_mode = mode; 660 sdkp->provisioning_mode = mode;
648 661
@@ -2321,11 +2334,8 @@ got_data:
2321 } 2334 }
2322 } 2335 }
2323 2336
2324 if (sdkp->capacity > 0xffffffff) { 2337 if (sdkp->capacity > 0xffffffff)
2325 sdp->use_16_for_rw = 1; 2338 sdp->use_16_for_rw = 1;
2326 sdkp->max_xfer_blocks = SD_MAX_XFER_BLOCKS;
2327 } else
2328 sdkp->max_xfer_blocks = SD_DEF_XFER_BLOCKS;
2329 2339
2330 /* Rescale capacity to 512-byte units */ 2340 /* Rescale capacity to 512-byte units */
2331 if (sector_size == 4096) 2341 if (sector_size == 4096)
@@ -2642,7 +2652,6 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
2642{ 2652{
2643 unsigned int sector_sz = sdkp->device->sector_size; 2653 unsigned int sector_sz = sdkp->device->sector_size;
2644 const int vpd_len = 64; 2654 const int vpd_len = 64;
2645 u32 max_xfer_length;
2646 unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); 2655 unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
2647 2656
2648 if (!buffer || 2657 if (!buffer ||
@@ -2650,14 +2659,11 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
2650 scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len)) 2659 scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
2651 goto out; 2660 goto out;
2652 2661
2653 max_xfer_length = get_unaligned_be32(&buffer[8]);
2654 if (max_xfer_length)
2655 sdkp->max_xfer_blocks = max_xfer_length;
2656
2657 blk_queue_io_min(sdkp->disk->queue, 2662 blk_queue_io_min(sdkp->disk->queue,
2658 get_unaligned_be16(&buffer[6]) * sector_sz); 2663 get_unaligned_be16(&buffer[6]) * sector_sz);
2659 blk_queue_io_opt(sdkp->disk->queue, 2664
2660 get_unaligned_be32(&buffer[12]) * sector_sz); 2665 sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
2666 sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
2661 2667
2662 if (buffer[3] == 0x3c) { 2668 if (buffer[3] == 0x3c) {
2663 unsigned int lba_count, desc_count; 2669 unsigned int lba_count, desc_count;
@@ -2806,6 +2812,11 @@ static int sd_try_extended_inquiry(struct scsi_device *sdp)
2806 return 0; 2812 return 0;
2807} 2813}
2808 2814
2815static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
2816{
2817 return blocks << (ilog2(sdev->sector_size) - 9);
2818}
2819
2809/** 2820/**
2810 * sd_revalidate_disk - called the first time a new disk is seen, 2821 * sd_revalidate_disk - called the first time a new disk is seen,
2811 * performs disk spin up, read_capacity, etc. 2822 * performs disk spin up, read_capacity, etc.
@@ -2815,8 +2826,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
2815{ 2826{
2816 struct scsi_disk *sdkp = scsi_disk(disk); 2827 struct scsi_disk *sdkp = scsi_disk(disk);
2817 struct scsi_device *sdp = sdkp->device; 2828 struct scsi_device *sdp = sdkp->device;
2829 struct request_queue *q = sdkp->disk->queue;
2818 unsigned char *buffer; 2830 unsigned char *buffer;
2819 unsigned int max_xfer; 2831 unsigned int dev_max, rw_max;
2820 2832
2821 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, 2833 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
2822 "sd_revalidate_disk\n")); 2834 "sd_revalidate_disk\n"));
@@ -2864,11 +2876,26 @@ static int sd_revalidate_disk(struct gendisk *disk)
2864 */ 2876 */
2865 sd_set_flush_flag(sdkp); 2877 sd_set_flush_flag(sdkp);
2866 2878
2867 max_xfer = sdkp->max_xfer_blocks; 2879 /* Initial block count limit based on CDB TRANSFER LENGTH field size. */
2868 max_xfer <<= ilog2(sdp->sector_size) - 9; 2880 dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
2881
2882 /* Some devices report a maximum block count for READ/WRITE requests. */
2883 dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
2884 q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
2885
2886 /*
2887 * Use the device's preferred I/O size for reads and writes
2888 * unless the reported value is unreasonably large (or garbage).
2889 */
2890 if (sdkp->opt_xfer_blocks && sdkp->opt_xfer_blocks <= dev_max &&
2891 sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS)
2892 rw_max = q->limits.io_opt =
2893 logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
2894 else
2895 rw_max = BLK_DEF_MAX_SECTORS;
2869 2896
2870 sdkp->disk->queue->limits.max_sectors = 2897 /* Combine with controller limits */
2871 min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer); 2898 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
2872 2899
2873 set_capacity(disk, sdkp->capacity); 2900 set_capacity(disk, sdkp->capacity);
2874 sd_config_write_same(sdkp); 2901 sd_config_write_same(sdkp);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 63ba5ca7f9a1..5f2a84aff29f 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -67,6 +67,7 @@ struct scsi_disk {
67 atomic_t openers; 67 atomic_t openers;
68 sector_t capacity; /* size in 512-byte sectors */ 68 sector_t capacity; /* size in 512-byte sectors */
69 u32 max_xfer_blocks; 69 u32 max_xfer_blocks;
70 u32 opt_xfer_blocks;
70 u32 max_ws_blocks; 71 u32 max_ws_blocks;
71 u32 max_unmap_blocks; 72 u32 max_unmap_blocks;
72 u32 unmap_granularity; 73 u32 unmap_granularity;
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index dcb0d76d7312..044d06410d4c 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -84,6 +84,7 @@ static void init_device_slot_control(unsigned char *dest_desc,
84static int ses_recv_diag(struct scsi_device *sdev, int page_code, 84static int ses_recv_diag(struct scsi_device *sdev, int page_code,
85 void *buf, int bufflen) 85 void *buf, int bufflen)
86{ 86{
87 int ret;
87 unsigned char cmd[] = { 88 unsigned char cmd[] = {
88 RECEIVE_DIAGNOSTIC, 89 RECEIVE_DIAGNOSTIC,
89 1, /* Set PCV bit */ 90 1, /* Set PCV bit */
@@ -92,9 +93,26 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
92 bufflen & 0xff, 93 bufflen & 0xff,
93 0 94 0
94 }; 95 };
96 unsigned char recv_page_code;
95 97
96 return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 98 ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
97 NULL, SES_TIMEOUT, SES_RETRIES, NULL); 99 NULL, SES_TIMEOUT, SES_RETRIES, NULL);
100 if (unlikely(!ret))
101 return ret;
102
103 recv_page_code = ((unsigned char *)buf)[0];
104
105 if (likely(recv_page_code == page_code))
106 return ret;
107
108 /* successful diagnostic but wrong page code. This happens to some
109 * USB devices, just print a message and pretend there was an error */
110
111 sdev_printk(KERN_ERR, sdev,
112 "Wrong diagnostic page; asked for %d got %u\n",
113 page_code, recv_page_code);
114
115 return -EINVAL;
98} 116}
99 117
100static int ses_send_diag(struct scsi_device *sdev, int page_code, 118static int ses_send_diag(struct scsi_device *sdev, int page_code,
@@ -541,7 +559,15 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
541 if (desc_ptr) 559 if (desc_ptr)
542 desc_ptr += len; 560 desc_ptr += len;
543 561
544 if (addl_desc_ptr) 562 if (addl_desc_ptr &&
563 /* only find additional descriptions for specific devices */
564 (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
565 type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE ||
566 type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER ||
567 /* these elements are optional */
568 type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT ||
569 type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT ||
570 type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS))
545 addl_desc_ptr += addl_desc_ptr[1] + 2; 571 addl_desc_ptr += addl_desc_ptr[1] + 2;
546 572
547 } 573 }
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index e0a1e52a04e7..2e522951b619 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4083,6 +4083,7 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
4083 } 4083 }
4084 cdev->owner = THIS_MODULE; 4084 cdev->owner = THIS_MODULE;
4085 cdev->ops = &st_fops; 4085 cdev->ops = &st_fops;
4086 STm->cdevs[rew] = cdev;
4086 4087
4087 error = cdev_add(cdev, cdev_devno, 1); 4088 error = cdev_add(cdev, cdev_devno, 1);
4088 if (error) { 4089 if (error) {
@@ -4091,7 +4092,6 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
4091 pr_err("st%d: Device not attached.\n", dev_num); 4092 pr_err("st%d: Device not attached.\n", dev_num);
4092 goto out_free; 4093 goto out_free;
4093 } 4094 }
4094 STm->cdevs[rew] = cdev;
4095 4095
4096 i = mode << (4 - ST_NBR_MODE_BITS); 4096 i = mode << (4 - ST_NBR_MODE_BITS);
4097 snprintf(name, 10, "%s%s%s", rew ? "n" : "", 4097 snprintf(name, 10, "%s%s%s", rew ? "n" : "",
@@ -4110,8 +4110,9 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
4110 return 0; 4110 return 0;
4111out_free: 4111out_free:
4112 cdev_del(STm->cdevs[rew]); 4112 cdev_del(STm->cdevs[rew]);
4113 STm->cdevs[rew] = NULL;
4114out: 4113out:
4114 STm->cdevs[rew] = NULL;
4115 STm->devs[rew] = NULL;
4115 return error; 4116 return error;
4116} 4117}
4117 4118
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 9d5068248aa0..0a4ea809a61b 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -23,6 +23,7 @@ config MTK_PMIC_WRAP
23config MTK_SCPSYS 23config MTK_SCPSYS
24 bool "MediaTek SCPSYS Support" 24 bool "MediaTek SCPSYS Support"
25 depends on ARCH_MEDIATEK || COMPILE_TEST 25 depends on ARCH_MEDIATEK || COMPILE_TEST
26 default ARM64 && ARCH_MEDIATEK
26 select REGMAP 27 select REGMAP
27 select MTK_INFRACFG 28 select MTK_INFRACFG
28 select PM_GENERIC_DOMAINS if PM 29 select PM_GENERIC_DOMAINS if PM
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index f3a0b6a4b54e..8c03a80b482d 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -1179,7 +1179,7 @@ static int knav_queue_setup_link_ram(struct knav_device *kdev)
1179 1179
1180 block++; 1180 block++;
1181 if (!block->size) 1181 if (!block->size)
1182 return 0; 1182 continue;
1183 1183
1184 dev_dbg(kdev->dev, "linkram1: phys:%x, virt:%p, size:%x\n", 1184 dev_dbg(kdev->dev, "linkram1: phys:%x, virt:%p, size:%x\n",
1185 block->phys, block->virt, block->size); 1185 block->phys, block->virt, block->size);
@@ -1519,9 +1519,9 @@ static int knav_queue_load_pdsp(struct knav_device *kdev,
1519 1519
1520 for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) { 1520 for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
1521 if (knav_acc_firmwares[i]) { 1521 if (knav_acc_firmwares[i]) {
1522 ret = request_firmware(&fw, 1522 ret = request_firmware_direct(&fw,
1523 knav_acc_firmwares[i], 1523 knav_acc_firmwares[i],
1524 kdev->dev); 1524 kdev->dev);
1525 if (!ret) { 1525 if (!ret) {
1526 found = true; 1526 found = true;
1527 break; 1527 break;
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 06858e04ec59..bf9a610e5b89 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -562,8 +562,8 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
562 goto out_clk_disable; 562 goto out_clk_disable;
563 } 563 }
564 564
565 dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d)\n", 565 dev_info(dev, "at %pr (irq %d, FIFOs size %d)\n",
566 r->start, irq, bs->fifo_size); 566 r, irq, bs->fifo_size);
567 567
568 return 0; 568 return 0;
569 569
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 59a11437db70..39412c9097c6 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -167,7 +167,7 @@ static inline int is_double_byte_mode(struct fsl_dspi *dspi)
167{ 167{
168 unsigned int val; 168 unsigned int val;
169 169
170 regmap_read(dspi->regmap, SPI_CTAR(dspi->cs), &val); 170 regmap_read(dspi->regmap, SPI_CTAR(0), &val);
171 171
172 return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1; 172 return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
173} 173}
@@ -257,7 +257,7 @@ static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word)
257 257
258 return SPI_PUSHR_TXDATA(d16) | 258 return SPI_PUSHR_TXDATA(d16) |
259 SPI_PUSHR_PCS(dspi->cs) | 259 SPI_PUSHR_PCS(dspi->cs) |
260 SPI_PUSHR_CTAS(dspi->cs) | 260 SPI_PUSHR_CTAS(0) |
261 SPI_PUSHR_CONT; 261 SPI_PUSHR_CONT;
262} 262}
263 263
@@ -290,7 +290,7 @@ static int dspi_eoq_write(struct fsl_dspi *dspi)
290 */ 290 */
291 if (tx_word && (dspi->len == 1)) { 291 if (tx_word && (dspi->len == 1)) {
292 dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM; 292 dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
293 regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), 293 regmap_update_bits(dspi->regmap, SPI_CTAR(0),
294 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8)); 294 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
295 tx_word = 0; 295 tx_word = 0;
296 } 296 }
@@ -339,7 +339,7 @@ static int dspi_tcfq_write(struct fsl_dspi *dspi)
339 339
340 if (tx_word && (dspi->len == 1)) { 340 if (tx_word && (dspi->len == 1)) {
341 dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM; 341 dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
342 regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), 342 regmap_update_bits(dspi->regmap, SPI_CTAR(0),
343 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8)); 343 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
344 tx_word = 0; 344 tx_word = 0;
345 } 345 }
@@ -407,7 +407,7 @@ static int dspi_transfer_one_message(struct spi_master *master,
407 regmap_update_bits(dspi->regmap, SPI_MCR, 407 regmap_update_bits(dspi->regmap, SPI_MCR,
408 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, 408 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
409 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); 409 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
410 regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), 410 regmap_write(dspi->regmap, SPI_CTAR(0),
411 dspi->cur_chip->ctar_val); 411 dspi->cur_chip->ctar_val);
412 412
413 trans_mode = dspi->devtype_data->trans_mode; 413 trans_mode = dspi->devtype_data->trans_mode;
@@ -566,7 +566,7 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
566 if (!dspi->len) { 566 if (!dspi->len) {
567 if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) { 567 if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) {
568 regmap_update_bits(dspi->regmap, 568 regmap_update_bits(dspi->regmap,
569 SPI_CTAR(dspi->cs), 569 SPI_CTAR(0),
570 SPI_FRAME_BITS_MASK, 570 SPI_FRAME_BITS_MASK,
571 SPI_FRAME_BITS(16)); 571 SPI_FRAME_BITS(16));
572 dspi->dataflags &= ~TRAN_STATE_WORD_ODD_NUM; 572 dspi->dataflags &= ~TRAN_STATE_WORD_ODD_NUM;
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 563954a61424..7840067062a8 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -410,7 +410,7 @@ static int mtk_spi_setup(struct spi_device *spi)
410 if (!spi->controller_data) 410 if (!spi->controller_data)
411 spi->controller_data = (void *)&mtk_default_chip_info; 411 spi->controller_data = (void *)&mtk_default_chip_info;
412 412
413 if (mdata->dev_comp->need_pad_sel) 413 if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio))
414 gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); 414 gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
415 415
416 return 0; 416 return 0;
@@ -632,13 +632,23 @@ static int mtk_spi_probe(struct platform_device *pdev)
632 goto err_put_master; 632 goto err_put_master;
633 } 633 }
634 634
635 for (i = 0; i < master->num_chipselect; i++) { 635 if (!master->cs_gpios && master->num_chipselect > 1) {
636 ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i], 636 dev_err(&pdev->dev,
637 dev_name(&pdev->dev)); 637 "cs_gpios not specified and num_chipselect > 1\n");
638 if (ret) { 638 ret = -EINVAL;
639 dev_err(&pdev->dev, 639 goto err_put_master;
640 "can't get CS GPIO %i\n", i); 640 }
641 goto err_put_master; 641
642 if (master->cs_gpios) {
643 for (i = 0; i < master->num_chipselect; i++) {
644 ret = devm_gpio_request(&pdev->dev,
645 master->cs_gpios[i],
646 dev_name(&pdev->dev));
647 if (ret) {
648 dev_err(&pdev->dev,
649 "can't get CS GPIO %i\n", i);
650 goto err_put_master;
651 }
642 } 652 }
643 } 653 }
644 } 654 }
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 94af80676684..5e5fd77e2711 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1171,19 +1171,31 @@ err_no_rxchan:
1171static int pl022_dma_autoprobe(struct pl022 *pl022) 1171static int pl022_dma_autoprobe(struct pl022 *pl022)
1172{ 1172{
1173 struct device *dev = &pl022->adev->dev; 1173 struct device *dev = &pl022->adev->dev;
1174 struct dma_chan *chan;
1175 int err;
1174 1176
1175 /* automatically configure DMA channels from platform, normally using DT */ 1177 /* automatically configure DMA channels from platform, normally using DT */
1176 pl022->dma_rx_channel = dma_request_slave_channel(dev, "rx"); 1178 chan = dma_request_slave_channel_reason(dev, "rx");
1177 if (!pl022->dma_rx_channel) 1179 if (IS_ERR(chan)) {
1180 err = PTR_ERR(chan);
1178 goto err_no_rxchan; 1181 goto err_no_rxchan;
1182 }
1183
1184 pl022->dma_rx_channel = chan;
1179 1185
1180 pl022->dma_tx_channel = dma_request_slave_channel(dev, "tx"); 1186 chan = dma_request_slave_channel_reason(dev, "tx");
1181 if (!pl022->dma_tx_channel) 1187 if (IS_ERR(chan)) {
1188 err = PTR_ERR(chan);
1182 goto err_no_txchan; 1189 goto err_no_txchan;
1190 }
1191
1192 pl022->dma_tx_channel = chan;
1183 1193
1184 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); 1194 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
1185 if (!pl022->dummypage) 1195 if (!pl022->dummypage) {
1196 err = -ENOMEM;
1186 goto err_no_dummypage; 1197 goto err_no_dummypage;
1198 }
1187 1199
1188 return 0; 1200 return 0;
1189 1201
@@ -1194,7 +1206,7 @@ err_no_txchan:
1194 dma_release_channel(pl022->dma_rx_channel); 1206 dma_release_channel(pl022->dma_rx_channel);
1195 pl022->dma_rx_channel = NULL; 1207 pl022->dma_rx_channel = NULL;
1196err_no_rxchan: 1208err_no_rxchan:
1197 return -ENODEV; 1209 return err;
1198} 1210}
1199 1211
1200static void terminate_dma(struct pl022 *pl022) 1212static void terminate_dma(struct pl022 *pl022)
@@ -2236,6 +2248,10 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
2236 2248
2237 /* Get DMA channels, try autoconfiguration first */ 2249 /* Get DMA channels, try autoconfiguration first */
2238 status = pl022_dma_autoprobe(pl022); 2250 status = pl022_dma_autoprobe(pl022);
2251 if (status == -EPROBE_DEFER) {
2252 dev_dbg(dev, "deferring probe to get DMA channel\n");
2253 goto err_no_irq;
2254 }
2239 2255
2240 /* If that failed, use channels from platform_info */ 2256 /* If that failed, use channels from platform_info */
2241 if (status == 0) 2257 if (status == 0)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index e2415be209d5..dee1cb87d24f 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -376,6 +376,7 @@ static void spi_drv_shutdown(struct device *dev)
376 376
377/** 377/**
378 * __spi_register_driver - register a SPI driver 378 * __spi_register_driver - register a SPI driver
379 * @owner: owner module of the driver to register
379 * @sdrv: the driver to register 380 * @sdrv: the driver to register
380 * Context: can sleep 381 * Context: can sleep
381 * 382 *
@@ -1704,7 +1705,7 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1704 master->bus_num = -1; 1705 master->bus_num = -1;
1705 master->num_chipselect = 1; 1706 master->num_chipselect = 1;
1706 master->dev.class = &spi_master_class; 1707 master->dev.class = &spi_master_class;
1707 master->dev.parent = get_device(dev); 1708 master->dev.parent = dev;
1708 spi_master_set_devdata(master, &master[1]); 1709 spi_master_set_devdata(master, &master[1]);
1709 1710
1710 return master; 1711 return master;
@@ -2130,6 +2131,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2130 * Set transfer tx_nbits and rx_nbits as single transfer default 2131 * Set transfer tx_nbits and rx_nbits as single transfer default
2131 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 2132 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2132 */ 2133 */
2134 message->frame_length = 0;
2133 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2135 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2134 message->frame_length += xfer->len; 2136 message->frame_length += xfer->len;
2135 if (!xfer->bits_per_word) 2137 if (!xfer->bits_per_word)
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 91a0fcd72423..d0e7dfc647cf 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -651,11 +651,11 @@ static int spidev_release(struct inode *inode, struct file *filp)
651 kfree(spidev->rx_buffer); 651 kfree(spidev->rx_buffer);
652 spidev->rx_buffer = NULL; 652 spidev->rx_buffer = NULL;
653 653
654 spin_lock_irq(&spidev->spi_lock);
654 if (spidev->spi) 655 if (spidev->spi)
655 spidev->speed_hz = spidev->spi->max_speed_hz; 656 spidev->speed_hz = spidev->spi->max_speed_hz;
656 657
657 /* ... after we unbound from the underlying device? */ 658 /* ... after we unbound from the underlying device? */
658 spin_lock_irq(&spidev->spi_lock);
659 dofree = (spidev->spi == NULL); 659 dofree = (spidev->spi == NULL);
660 spin_unlock_irq(&spidev->spi_lock); 660 spin_unlock_irq(&spidev->spi_lock);
661 661
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 195c41d7bd53..0813163f962f 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -81,7 +81,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
81err: 81err:
82 sg = table->sgl; 82 sg = table->sgl;
83 for (i -= 1; i >= 0; i--) { 83 for (i -= 1; i >= 0; i--) {
84 gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, 84 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
85 sg->length); 85 sg->length);
86 sg = sg_next(sg); 86 sg = sg_next(sg);
87 } 87 }
@@ -109,7 +109,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
109 DMA_BIDIRECTIONAL); 109 DMA_BIDIRECTIONAL);
110 110
111 for_each_sg(table->sgl, sg, table->nents, i) { 111 for_each_sg(table->sgl, sg, table->nents, i) {
112 gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, 112 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
113 sg->length); 113 sg->length);
114 } 114 }
115 chunk_heap->allocated -= allocated_size; 115 chunk_heap->allocated -= allocated_size;
diff --git a/drivers/staging/iio/iio_simple_dummy_events.c b/drivers/staging/iio/iio_simple_dummy_events.c
index bfbf1c56bd22..6eb600ff7056 100644
--- a/drivers/staging/iio/iio_simple_dummy_events.c
+++ b/drivers/staging/iio/iio_simple_dummy_events.c
@@ -159,7 +159,7 @@ static irqreturn_t iio_simple_dummy_get_timestamp(int irq, void *private)
159 struct iio_dummy_state *st = iio_priv(indio_dev); 159 struct iio_dummy_state *st = iio_priv(indio_dev);
160 160
161 st->event_timestamp = iio_get_time_ns(); 161 st->event_timestamp = iio_get_time_ns();
162 return IRQ_HANDLED; 162 return IRQ_WAKE_THREAD;
163} 163}
164 164
165/** 165/**
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
index f5d741f25ffd..485ab2670918 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
@@ -110,7 +110,6 @@ struct libcfs_ioctl_handler {
110#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, long) 110#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, long)
111#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, long) 111#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, long)
112#define IOC_LIBCFS_MEMHOG _IOWR('e', 36, long) 112#define IOC_LIBCFS_MEMHOG _IOWR('e', 36, long)
113#define IOC_LIBCFS_PING_TEST _IOWR('e', 37, long)
114/* lnet ioctls */ 113/* lnet ioctls */
115#define IOC_LIBCFS_GET_NI _IOWR('e', 50, long) 114#define IOC_LIBCFS_GET_NI _IOWR('e', 50, long)
116#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long) 115#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long)
diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
index 07a68594c279..e7c2b26156b9 100644
--- a/drivers/staging/lustre/lustre/libcfs/module.c
+++ b/drivers/staging/lustre/lustre/libcfs/module.c
@@ -274,23 +274,6 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd,
274 } 274 }
275 break; 275 break;
276 276
277 case IOC_LIBCFS_PING_TEST: {
278 extern void (kping_client)(struct libcfs_ioctl_data *);
279 void (*ping)(struct libcfs_ioctl_data *);
280
281 CDEBUG(D_IOCTL, "doing %d pings to nid %s (%s)\n",
282 data->ioc_count, libcfs_nid2str(data->ioc_nid),
283 libcfs_nid2str(data->ioc_nid));
284 ping = symbol_get(kping_client);
285 if (!ping)
286 CERROR("symbol_get failed\n");
287 else {
288 ping(data);
289 symbol_put(kping_client);
290 }
291 return 0;
292 }
293
294 default: { 277 default: {
295 struct libcfs_ioctl_handler *hand; 278 struct libcfs_ioctl_handler *hand;
296 279
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index f61ef669644c..a4a9a763ff02 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -1270,6 +1270,7 @@ static int
1270echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob) 1270echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
1271{ 1271{
1272 struct lov_stripe_md *ulsm = _ulsm; 1272 struct lov_stripe_md *ulsm = _ulsm;
1273 struct lov_oinfo **p;
1273 int nob, i; 1274 int nob, i;
1274 1275
1275 nob = offsetof(struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]); 1276 nob = offsetof(struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]);
@@ -1279,9 +1280,10 @@ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
1279 if (copy_to_user(ulsm, lsm, sizeof(*ulsm))) 1280 if (copy_to_user(ulsm, lsm, sizeof(*ulsm)))
1280 return -EFAULT; 1281 return -EFAULT;
1281 1282
1282 for (i = 0; i < lsm->lsm_stripe_count; i++) { 1283 for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
1283 if (copy_to_user(ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i], 1284 struct lov_oinfo __user *up;
1284 sizeof(lsm->lsm_oinfo[0]))) 1285 if (get_user(up, ulsm->lsm_oinfo + i) ||
1286 copy_to_user(up, *p, sizeof(struct lov_oinfo)))
1285 return -EFAULT; 1287 return -EFAULT;
1286 } 1288 }
1287 return 0; 1289 return 0;
@@ -1289,9 +1291,10 @@ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
1289 1291
1290static int 1292static int
1291echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm, 1293echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
1292 void *ulsm, int ulsm_nob) 1294 struct lov_stripe_md __user *ulsm, int ulsm_nob)
1293{ 1295{
1294 struct echo_client_obd *ec = ed->ed_ec; 1296 struct echo_client_obd *ec = ed->ed_ec;
1297 struct lov_oinfo **p;
1295 int i; 1298 int i;
1296 1299
1297 if (ulsm_nob < sizeof(*lsm)) 1300 if (ulsm_nob < sizeof(*lsm))
@@ -1306,11 +1309,10 @@ echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
1306 ((__u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count > ~0UL)) 1309 ((__u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count > ~0UL))
1307 return -EINVAL; 1310 return -EINVAL;
1308 1311
1309 for (i = 0; i < lsm->lsm_stripe_count; i++) { 1312 for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
1310 if (copy_from_user(lsm->lsm_oinfo[i], 1313 struct lov_oinfo __user *up;
1311 ((struct lov_stripe_md *)ulsm)-> \ 1314 if (get_user(up, ulsm->lsm_oinfo + i) ||
1312 lsm_oinfo[i], 1315 copy_from_user(*p, up, sizeof(struct lov_oinfo)))
1313 sizeof(lsm->lsm_oinfo[0])))
1314 return -EFAULT; 1316 return -EFAULT;
1315 } 1317 }
1316 return 0; 1318 return 0;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 342a07c58d89..72204fbf2bb1 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4074,6 +4074,17 @@ reject:
4074 return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); 4074 return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
4075} 4075}
4076 4076
4077static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
4078{
4079 bool ret;
4080
4081 spin_lock_bh(&conn->state_lock);
4082 ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
4083 spin_unlock_bh(&conn->state_lock);
4084
4085 return ret;
4086}
4087
4077int iscsi_target_rx_thread(void *arg) 4088int iscsi_target_rx_thread(void *arg)
4078{ 4089{
4079 int ret, rc; 4090 int ret, rc;
@@ -4091,7 +4102,7 @@ int iscsi_target_rx_thread(void *arg)
4091 * incoming iscsi/tcp socket I/O, and/or failing the connection. 4102 * incoming iscsi/tcp socket I/O, and/or failing the connection.
4092 */ 4103 */
4093 rc = wait_for_completion_interruptible(&conn->rx_login_comp); 4104 rc = wait_for_completion_interruptible(&conn->rx_login_comp);
4094 if (rc < 0) 4105 if (rc < 0 || iscsi_target_check_conn_state(conn))
4095 return 0; 4106 return 0;
4096 4107
4097 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { 4108 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 5c964c09c89f..9fc9117d0f22 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -388,6 +388,7 @@ err:
388 if (login->login_complete) { 388 if (login->login_complete) {
389 if (conn->rx_thread && conn->rx_thread_active) { 389 if (conn->rx_thread && conn->rx_thread_active) {
390 send_sig(SIGINT, conn->rx_thread, 1); 390 send_sig(SIGINT, conn->rx_thread, 1);
391 complete(&conn->rx_login_comp);
391 kthread_stop(conn->rx_thread); 392 kthread_stop(conn->rx_thread);
392 } 393 }
393 if (conn->tx_thread && conn->tx_thread_active) { 394 if (conn->tx_thread && conn->tx_thread_active) {
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 51d1734d5390..2cbea2af7cd0 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -208,7 +208,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
208 if (!pl) { 208 if (!pl) {
209 pr_err("Unable to allocate memory for" 209 pr_err("Unable to allocate memory for"
210 " struct iscsi_param_list.\n"); 210 " struct iscsi_param_list.\n");
211 return -1 ; 211 return -ENOMEM;
212 } 212 }
213 INIT_LIST_HEAD(&pl->param_list); 213 INIT_LIST_HEAD(&pl->param_list);
214 INIT_LIST_HEAD(&pl->extra_response_list); 214 INIT_LIST_HEAD(&pl->extra_response_list);
@@ -578,7 +578,7 @@ int iscsi_copy_param_list(
578 param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); 578 param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
579 if (!param_list) { 579 if (!param_list) {
580 pr_err("Unable to allocate memory for struct iscsi_param_list.\n"); 580 pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
581 return -1; 581 return -ENOMEM;
582 } 582 }
583 INIT_LIST_HEAD(&param_list->param_list); 583 INIT_LIST_HEAD(&param_list->param_list);
584 INIT_LIST_HEAD(&param_list->extra_response_list); 584 INIT_LIST_HEAD(&param_list->extra_response_list);
@@ -629,7 +629,7 @@ int iscsi_copy_param_list(
629 629
630err_out: 630err_out:
631 iscsi_release_param_list(param_list); 631 iscsi_release_param_list(param_list);
632 return -1; 632 return -ENOMEM;
633} 633}
634 634
635static void iscsi_release_extra_responses(struct iscsi_param_list *param_list) 635static void iscsi_release_extra_responses(struct iscsi_param_list *param_list)
@@ -729,7 +729,7 @@ static int iscsi_add_notunderstood_response(
729 if (!extra_response) { 729 if (!extra_response) {
730 pr_err("Unable to allocate memory for" 730 pr_err("Unable to allocate memory for"
731 " struct iscsi_extra_response.\n"); 731 " struct iscsi_extra_response.\n");
732 return -1; 732 return -ENOMEM;
733 } 733 }
734 INIT_LIST_HEAD(&extra_response->er_list); 734 INIT_LIST_HEAD(&extra_response->er_list);
735 735
@@ -1370,7 +1370,7 @@ int iscsi_decode_text_input(
1370 tmpbuf = kzalloc(length + 1, GFP_KERNEL); 1370 tmpbuf = kzalloc(length + 1, GFP_KERNEL);
1371 if (!tmpbuf) { 1371 if (!tmpbuf) {
1372 pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length); 1372 pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length);
1373 return -1; 1373 return -ENOMEM;
1374 } 1374 }
1375 1375
1376 memcpy(tmpbuf, textbuf, length); 1376 memcpy(tmpbuf, textbuf, length);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 0b4b2a67d9f9..98698d875742 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -371,7 +371,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
371 return 0; 371 return 0;
372} 372}
373 373
374static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success) 374static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
375 int *post_ret)
375{ 376{
376 unsigned char *buf, *addr; 377 unsigned char *buf, *addr;
377 struct scatterlist *sg; 378 struct scatterlist *sg;
@@ -437,7 +438,8 @@ sbc_execute_rw(struct se_cmd *cmd)
437 cmd->data_direction); 438 cmd->data_direction);
438} 439}
439 440
440static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) 441static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
442 int *post_ret)
441{ 443{
442 struct se_device *dev = cmd->se_dev; 444 struct se_device *dev = cmd->se_dev;
443 445
@@ -447,8 +449,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
447 * sent to the backend driver. 449 * sent to the backend driver.
448 */ 450 */
449 spin_lock_irq(&cmd->t_state_lock); 451 spin_lock_irq(&cmd->t_state_lock);
450 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) 452 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
451 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 453 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
454 *post_ret = 1;
455 }
452 spin_unlock_irq(&cmd->t_state_lock); 456 spin_unlock_irq(&cmd->t_state_lock);
453 457
454 /* 458 /*
@@ -460,7 +464,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
460 return TCM_NO_SENSE; 464 return TCM_NO_SENSE;
461} 465}
462 466
463static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success) 467static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
468 int *post_ret)
464{ 469{
465 struct se_device *dev = cmd->se_dev; 470 struct se_device *dev = cmd->se_dev;
466 struct scatterlist *write_sg = NULL, *sg; 471 struct scatterlist *write_sg = NULL, *sg;
@@ -556,11 +561,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
556 561
557 if (block_size < PAGE_SIZE) { 562 if (block_size < PAGE_SIZE) {
558 sg_set_page(&write_sg[i], m.page, block_size, 563 sg_set_page(&write_sg[i], m.page, block_size,
559 block_size); 564 m.piter.sg->offset + block_size);
560 } else { 565 } else {
561 sg_miter_next(&m); 566 sg_miter_next(&m);
562 sg_set_page(&write_sg[i], m.page, block_size, 567 sg_set_page(&write_sg[i], m.page, block_size,
563 0); 568 m.piter.sg->offset);
564 } 569 }
565 len -= block_size; 570 len -= block_size;
566 i++; 571 i++;
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 273c72b2b83d..81a6b3e07687 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -246,7 +246,7 @@ static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page)
246 char str[sizeof(dev->t10_wwn.model)+1]; 246 char str[sizeof(dev->t10_wwn.model)+1];
247 247
248 /* scsiLuProductId */ 248 /* scsiLuProductId */
249 for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++) 249 for (i = 0; i < sizeof(dev->t10_wwn.model); i++)
250 str[i] = ISPRINT(dev->t10_wwn.model[i]) ? 250 str[i] = ISPRINT(dev->t10_wwn.model[i]) ?
251 dev->t10_wwn.model[i] : ' '; 251 dev->t10_wwn.model[i] : ' ';
252 str[i] = '\0'; 252 str[i] = '\0';
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 5b2820312310..28fb3016370f 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -130,6 +130,9 @@ void core_tmr_abort_task(
130 if (tmr->ref_task_tag != ref_tag) 130 if (tmr->ref_task_tag != ref_tag)
131 continue; 131 continue;
132 132
133 if (!kref_get_unless_zero(&se_cmd->cmd_kref))
134 continue;
135
133 printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", 136 printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
134 se_cmd->se_tfo->get_fabric_name(), ref_tag); 137 se_cmd->se_tfo->get_fabric_name(), ref_tag);
135 138
@@ -139,13 +142,15 @@ void core_tmr_abort_task(
139 " skipping\n", ref_tag); 142 " skipping\n", ref_tag);
140 spin_unlock(&se_cmd->t_state_lock); 143 spin_unlock(&se_cmd->t_state_lock);
141 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 144 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
145
146 target_put_sess_cmd(se_cmd);
147
142 goto out; 148 goto out;
143 } 149 }
144 se_cmd->transport_state |= CMD_T_ABORTED; 150 se_cmd->transport_state |= CMD_T_ABORTED;
145 spin_unlock(&se_cmd->t_state_lock); 151 spin_unlock(&se_cmd->t_state_lock);
146 152
147 list_del_init(&se_cmd->se_cmd_list); 153 list_del_init(&se_cmd->se_cmd_list);
148 kref_get(&se_cmd->cmd_kref);
149 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 154 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
150 155
151 cancel_work_sync(&se_cmd->work); 156 cancel_work_sync(&se_cmd->work);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 5bacc7b5ed6d..4fdcee2006d1 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1658,7 +1658,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
1658void transport_generic_request_failure(struct se_cmd *cmd, 1658void transport_generic_request_failure(struct se_cmd *cmd,
1659 sense_reason_t sense_reason) 1659 sense_reason_t sense_reason)
1660{ 1660{
1661 int ret = 0; 1661 int ret = 0, post_ret = 0;
1662 1662
1663 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" 1663 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
1664 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); 1664 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
@@ -1680,7 +1680,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1680 */ 1680 */
1681 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1681 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
1682 cmd->transport_complete_callback) 1682 cmd->transport_complete_callback)
1683 cmd->transport_complete_callback(cmd, false); 1683 cmd->transport_complete_callback(cmd, false, &post_ret);
1684 1684
1685 switch (sense_reason) { 1685 switch (sense_reason) {
1686 case TCM_NON_EXISTENT_LUN: 1686 case TCM_NON_EXISTENT_LUN:
@@ -2068,11 +2068,13 @@ static void target_complete_ok_work(struct work_struct *work)
2068 */ 2068 */
2069 if (cmd->transport_complete_callback) { 2069 if (cmd->transport_complete_callback) {
2070 sense_reason_t rc; 2070 sense_reason_t rc;
2071 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2072 bool zero_dl = !(cmd->data_length);
2073 int post_ret = 0;
2071 2074
2072 rc = cmd->transport_complete_callback(cmd, true); 2075 rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2073 if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { 2076 if (!rc && !post_ret) {
2074 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2077 if (caw && zero_dl)
2075 !cmd->data_length)
2076 goto queue_rsp; 2078 goto queue_rsp;
2077 2079
2078 return; 2080 return;
@@ -2507,23 +2509,24 @@ out:
2507EXPORT_SYMBOL(target_get_sess_cmd); 2509EXPORT_SYMBOL(target_get_sess_cmd);
2508 2510
2509static void target_release_cmd_kref(struct kref *kref) 2511static void target_release_cmd_kref(struct kref *kref)
2510 __releases(&se_cmd->se_sess->sess_cmd_lock)
2511{ 2512{
2512 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2513 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2513 struct se_session *se_sess = se_cmd->se_sess; 2514 struct se_session *se_sess = se_cmd->se_sess;
2515 unsigned long flags;
2514 2516
2517 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2515 if (list_empty(&se_cmd->se_cmd_list)) { 2518 if (list_empty(&se_cmd->se_cmd_list)) {
2516 spin_unlock(&se_sess->sess_cmd_lock); 2519 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2517 se_cmd->se_tfo->release_cmd(se_cmd); 2520 se_cmd->se_tfo->release_cmd(se_cmd);
2518 return; 2521 return;
2519 } 2522 }
2520 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 2523 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
2521 spin_unlock(&se_sess->sess_cmd_lock); 2524 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2522 complete(&se_cmd->cmd_wait_comp); 2525 complete(&se_cmd->cmd_wait_comp);
2523 return; 2526 return;
2524 } 2527 }
2525 list_del(&se_cmd->se_cmd_list); 2528 list_del(&se_cmd->se_cmd_list);
2526 spin_unlock(&se_sess->sess_cmd_lock); 2529 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2527 2530
2528 se_cmd->se_tfo->release_cmd(se_cmd); 2531 se_cmd->se_tfo->release_cmd(se_cmd);
2529} 2532}
@@ -2539,8 +2542,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
2539 se_cmd->se_tfo->release_cmd(se_cmd); 2542 se_cmd->se_tfo->release_cmd(se_cmd);
2540 return 1; 2543 return 1;
2541 } 2544 }
2542 return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, 2545 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2543 &se_sess->sess_cmd_lock);
2544} 2546}
2545EXPORT_SYMBOL(target_put_sess_cmd); 2547EXPORT_SYMBOL(target_put_sess_cmd);
2546 2548
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 937cebf76633..5e6d6cb348fc 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -638,7 +638,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
638 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 638 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
639 return 0; 639 return 0;
640 640
641 if (!time_after(cmd->deadline, jiffies)) 641 if (!time_after(jiffies, cmd->deadline))
642 return 0; 642 return 0;
643 643
644 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 644 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
@@ -1101,8 +1101,6 @@ tcmu_parse_cdb(struct se_cmd *cmd)
1101 1101
1102static const struct target_backend_ops tcmu_ops = { 1102static const struct target_backend_ops tcmu_ops = {
1103 .name = "user", 1103 .name = "user",
1104 .inquiry_prod = "USER",
1105 .inquiry_rev = TCMU_VERSION,
1106 .owner = THIS_MODULE, 1104 .owner = THIS_MODULE,
1107 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, 1105 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
1108 .attach_hba = tcmu_attach_hba, 1106 .attach_hba = tcmu_attach_hba,
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index c463c89b90ef..8cc4ac64a91c 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -382,7 +382,7 @@ endmenu
382 382
383config QCOM_SPMI_TEMP_ALARM 383config QCOM_SPMI_TEMP_ALARM
384 tristate "Qualcomm SPMI PMIC Temperature Alarm" 384 tristate "Qualcomm SPMI PMIC Temperature Alarm"
385 depends on OF && (SPMI || COMPILE_TEST) && IIO 385 depends on OF && SPMI && IIO
386 select REGMAP_SPMI 386 select REGMAP_SPMI
387 help 387 help
388 This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP) 388 This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP)
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index c8fe3cac2e0e..c5547bd711db 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -55,6 +55,7 @@
55#define TEMPSENSE2_PANIC_VALUE_SHIFT 16 55#define TEMPSENSE2_PANIC_VALUE_SHIFT 16
56#define TEMPSENSE2_PANIC_VALUE_MASK 0xfff0000 56#define TEMPSENSE2_PANIC_VALUE_MASK 0xfff0000
57 57
58#define OCOTP_MEM0 0x0480
58#define OCOTP_ANA1 0x04e0 59#define OCOTP_ANA1 0x04e0
59 60
60/* The driver supports 1 passive trip point and 1 critical trip point */ 61/* The driver supports 1 passive trip point and 1 critical trip point */
@@ -64,12 +65,6 @@ enum imx_thermal_trip {
64 IMX_TRIP_NUM, 65 IMX_TRIP_NUM,
65}; 66};
66 67
67/*
68 * It defines the temperature in millicelsius for passive trip point
69 * that will trigger cooling action when crossed.
70 */
71#define IMX_TEMP_PASSIVE 85000
72
73#define IMX_POLLING_DELAY 2000 /* millisecond */ 68#define IMX_POLLING_DELAY 2000 /* millisecond */
74#define IMX_PASSIVE_DELAY 1000 69#define IMX_PASSIVE_DELAY 1000
75 70
@@ -100,12 +95,14 @@ struct imx_thermal_data {
100 u32 c1, c2; /* See formula in imx_get_sensor_data() */ 95 u32 c1, c2; /* See formula in imx_get_sensor_data() */
101 int temp_passive; 96 int temp_passive;
102 int temp_critical; 97 int temp_critical;
98 int temp_max;
103 int alarm_temp; 99 int alarm_temp;
104 int last_temp; 100 int last_temp;
105 bool irq_enabled; 101 bool irq_enabled;
106 int irq; 102 int irq;
107 struct clk *thermal_clk; 103 struct clk *thermal_clk;
108 const struct thermal_soc_data *socdata; 104 const struct thermal_soc_data *socdata;
105 const char *temp_grade;
109}; 106};
110 107
111static void imx_set_panic_temp(struct imx_thermal_data *data, 108static void imx_set_panic_temp(struct imx_thermal_data *data,
@@ -285,10 +282,12 @@ static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip,
285{ 282{
286 struct imx_thermal_data *data = tz->devdata; 283 struct imx_thermal_data *data = tz->devdata;
287 284
285 /* do not allow changing critical threshold */
288 if (trip == IMX_TRIP_CRITICAL) 286 if (trip == IMX_TRIP_CRITICAL)
289 return -EPERM; 287 return -EPERM;
290 288
291 if (temp < 0 || temp > IMX_TEMP_PASSIVE) 289 /* do not allow passive to be set higher than critical */
290 if (temp < 0 || temp > data->temp_critical)
292 return -EINVAL; 291 return -EINVAL;
293 292
294 data->temp_passive = temp; 293 data->temp_passive = temp;
@@ -404,17 +403,39 @@ static int imx_get_sensor_data(struct platform_device *pdev)
404 data->c1 = temp64; 403 data->c1 = temp64;
405 data->c2 = n1 * data->c1 + 1000 * t1; 404 data->c2 = n1 * data->c1 + 1000 * t1;
406 405
407 /* 406 /* use OTP for thermal grade */
408 * Set the default passive cooling trip point, 407 ret = regmap_read(map, OCOTP_MEM0, &val);
409 * can be changed from userspace. 408 if (ret) {
410 */ 409 dev_err(&pdev->dev, "failed to read temp grade: %d\n", ret);
411 data->temp_passive = IMX_TEMP_PASSIVE; 410 return ret;
411 }
412
413 /* The maximum die temp is specified by the Temperature Grade */
414 switch ((val >> 6) & 0x3) {
415 case 0: /* Commercial (0 to 95C) */
416 data->temp_grade = "Commercial";
417 data->temp_max = 95000;
418 break;
419 case 1: /* Extended Commercial (-20 to 105C) */
420 data->temp_grade = "Extended Commercial";
421 data->temp_max = 105000;
422 break;
423 case 2: /* Industrial (-40 to 105C) */
424 data->temp_grade = "Industrial";
425 data->temp_max = 105000;
426 break;
427 case 3: /* Automotive (-40 to 125C) */
428 data->temp_grade = "Automotive";
429 data->temp_max = 125000;
430 break;
431 }
412 432
413 /* 433 /*
414 * The maximum die temperature set to 20 C higher than 434 * Set the critical trip point at 5C under max
415 * IMX_TEMP_PASSIVE. 435 * Set the passive trip point at 10C under max (can change via sysfs)
416 */ 436 */
417 data->temp_critical = 1000 * 20 + data->temp_passive; 437 data->temp_critical = data->temp_max - (1000 * 5);
438 data->temp_passive = data->temp_max - (1000 * 10);
418 439
419 return 0; 440 return 0;
420} 441}
@@ -551,6 +572,11 @@ static int imx_thermal_probe(struct platform_device *pdev)
551 return ret; 572 return ret;
552 } 573 }
553 574
575 dev_info(&pdev->dev, "%s CPU temperature grade - max:%dC"
576 " critical:%dC passive:%dC\n", data->temp_grade,
577 data->temp_max / 1000, data->temp_critical / 1000,
578 data->temp_passive / 1000);
579
554 /* Enable measurements at ~ 10 Hz */ 580 /* Enable measurements at ~ 10 Hz */
555 regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ); 581 regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ);
556 measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */ 582 measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 42b7d4253b94..be4eedcb839a 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -964,7 +964,7 @@ void of_thermal_destroy_zones(void)
964 964
965 np = of_find_node_by_name(NULL, "thermal-zones"); 965 np = of_find_node_by_name(NULL, "thermal-zones");
966 if (!np) { 966 if (!np) {
967 pr_err("unable to find thermal zones\n"); 967 pr_debug("unable to find thermal zones\n");
968 return; 968 return;
969 } 969 }
970 970
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index f0fbea386869..1246aa6fcab0 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -174,7 +174,6 @@ static void estimate_pid_constants(struct thermal_zone_device *tz,
174/** 174/**
175 * pid_controller() - PID controller 175 * pid_controller() - PID controller
176 * @tz: thermal zone we are operating in 176 * @tz: thermal zone we are operating in
177 * @current_temp: the current temperature in millicelsius
178 * @control_temp: the target temperature in millicelsius 177 * @control_temp: the target temperature in millicelsius
179 * @max_allocatable_power: maximum allocatable power for this thermal zone 178 * @max_allocatable_power: maximum allocatable power for this thermal zone
180 * 179 *
@@ -191,7 +190,6 @@ static void estimate_pid_constants(struct thermal_zone_device *tz,
191 * Return: The power budget for the next period. 190 * Return: The power budget for the next period.
192 */ 191 */
193static u32 pid_controller(struct thermal_zone_device *tz, 192static u32 pid_controller(struct thermal_zone_device *tz,
194 int current_temp,
195 int control_temp, 193 int control_temp,
196 u32 max_allocatable_power) 194 u32 max_allocatable_power)
197{ 195{
@@ -211,7 +209,7 @@ static u32 pid_controller(struct thermal_zone_device *tz,
211 true); 209 true);
212 } 210 }
213 211
214 err = control_temp - current_temp; 212 err = control_temp - tz->temperature;
215 err = int_to_frac(err); 213 err = int_to_frac(err);
216 214
217 /* Calculate the proportional term */ 215 /* Calculate the proportional term */
@@ -332,7 +330,6 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
332} 330}
333 331
334static int allocate_power(struct thermal_zone_device *tz, 332static int allocate_power(struct thermal_zone_device *tz,
335 int current_temp,
336 int control_temp) 333 int control_temp)
337{ 334{
338 struct thermal_instance *instance; 335 struct thermal_instance *instance;
@@ -418,8 +415,7 @@ static int allocate_power(struct thermal_zone_device *tz,
418 i++; 415 i++;
419 } 416 }
420 417
421 power_range = pid_controller(tz, current_temp, control_temp, 418 power_range = pid_controller(tz, control_temp, max_allocatable_power);
422 max_allocatable_power);
423 419
424 divvy_up_power(weighted_req_power, max_power, num_actors, 420 divvy_up_power(weighted_req_power, max_power, num_actors,
425 total_weighted_req_power, power_range, granted_power, 421 total_weighted_req_power, power_range, granted_power,
@@ -444,8 +440,8 @@ static int allocate_power(struct thermal_zone_device *tz,
444 trace_thermal_power_allocator(tz, req_power, total_req_power, 440 trace_thermal_power_allocator(tz, req_power, total_req_power,
445 granted_power, total_granted_power, 441 granted_power, total_granted_power,
446 num_actors, power_range, 442 num_actors, power_range,
447 max_allocatable_power, current_temp, 443 max_allocatable_power, tz->temperature,
448 control_temp - current_temp); 444 control_temp - tz->temperature);
449 445
450 kfree(req_power); 446 kfree(req_power);
451unlock: 447unlock:
@@ -612,7 +608,7 @@ static void power_allocator_unbind(struct thermal_zone_device *tz)
612static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) 608static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
613{ 609{
614 int ret; 610 int ret;
615 int switch_on_temp, control_temp, current_temp; 611 int switch_on_temp, control_temp;
616 struct power_allocator_params *params = tz->governor_data; 612 struct power_allocator_params *params = tz->governor_data;
617 613
618 /* 614 /*
@@ -622,15 +618,9 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
622 if (trip != params->trip_max_desired_temperature) 618 if (trip != params->trip_max_desired_temperature)
623 return 0; 619 return 0;
624 620
625 ret = thermal_zone_get_temp(tz, &current_temp);
626 if (ret) {
627 dev_warn(&tz->device, "Failed to get temperature: %d\n", ret);
628 return ret;
629 }
630
631 ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, 621 ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
632 &switch_on_temp); 622 &switch_on_temp);
633 if (!ret && (current_temp < switch_on_temp)) { 623 if (!ret && (tz->temperature < switch_on_temp)) {
634 tz->passive = 0; 624 tz->passive = 0;
635 reset_pid_controller(params); 625 reset_pid_controller(params);
636 allow_maximum_power(tz); 626 allow_maximum_power(tz);
@@ -648,7 +638,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
648 return ret; 638 return ret;
649 } 639 }
650 640
651 return allocate_power(tz, current_temp, control_temp); 641 return allocate_power(tz, control_temp);
652} 642}
653 643
654static struct thermal_governor thermal_gov_power_allocator = { 644static struct thermal_governor thermal_gov_power_allocator = {
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 5d4ae7d705e0..13d01edc7a04 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -361,6 +361,24 @@ static irqreturn_t rcar_thermal_irq(int irq, void *data)
361/* 361/*
362 * platform functions 362 * platform functions
363 */ 363 */
364static int rcar_thermal_remove(struct platform_device *pdev)
365{
366 struct rcar_thermal_common *common = platform_get_drvdata(pdev);
367 struct device *dev = &pdev->dev;
368 struct rcar_thermal_priv *priv;
369
370 rcar_thermal_for_each_priv(priv, common) {
371 if (rcar_has_irq_support(priv))
372 rcar_thermal_irq_disable(priv);
373 thermal_zone_device_unregister(priv->zone);
374 }
375
376 pm_runtime_put(dev);
377 pm_runtime_disable(dev);
378
379 return 0;
380}
381
364static int rcar_thermal_probe(struct platform_device *pdev) 382static int rcar_thermal_probe(struct platform_device *pdev)
365{ 383{
366 struct rcar_thermal_common *common; 384 struct rcar_thermal_common *common;
@@ -377,6 +395,8 @@ static int rcar_thermal_probe(struct platform_device *pdev)
377 if (!common) 395 if (!common)
378 return -ENOMEM; 396 return -ENOMEM;
379 397
398 platform_set_drvdata(pdev, common);
399
380 INIT_LIST_HEAD(&common->head); 400 INIT_LIST_HEAD(&common->head);
381 spin_lock_init(&common->lock); 401 spin_lock_init(&common->lock);
382 common->dev = dev; 402 common->dev = dev;
@@ -454,43 +474,16 @@ static int rcar_thermal_probe(struct platform_device *pdev)
454 rcar_thermal_common_write(common, ENR, enr_bits); 474 rcar_thermal_common_write(common, ENR, enr_bits);
455 } 475 }
456 476
457 platform_set_drvdata(pdev, common);
458
459 dev_info(dev, "%d sensor probed\n", i); 477 dev_info(dev, "%d sensor probed\n", i);
460 478
461 return 0; 479 return 0;
462 480
463error_unregister: 481error_unregister:
464 rcar_thermal_for_each_priv(priv, common) { 482 rcar_thermal_remove(pdev);
465 if (rcar_has_irq_support(priv))
466 rcar_thermal_irq_disable(priv);
467 thermal_zone_device_unregister(priv->zone);
468 }
469
470 pm_runtime_put(dev);
471 pm_runtime_disable(dev);
472 483
473 return ret; 484 return ret;
474} 485}
475 486
476static int rcar_thermal_remove(struct platform_device *pdev)
477{
478 struct rcar_thermal_common *common = platform_get_drvdata(pdev);
479 struct device *dev = &pdev->dev;
480 struct rcar_thermal_priv *priv;
481
482 rcar_thermal_for_each_priv(priv, common) {
483 if (rcar_has_irq_support(priv))
484 rcar_thermal_irq_disable(priv);
485 thermal_zone_device_unregister(priv->zone);
486 }
487
488 pm_runtime_put(dev);
489 pm_runtime_disable(dev);
490
491 return 0;
492}
493
494static const struct of_device_id rcar_thermal_dt_ids[] = { 487static const struct of_device_id rcar_thermal_dt_ids[] = {
495 { .compatible = "renesas,rcar-thermal", }, 488 { .compatible = "renesas,rcar-thermal", },
496 {}, 489 {},
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 9787e8aa509f..e845841ab036 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -1,6 +1,9 @@
1/* 1/*
2 * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd 2 * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
3 * 3 *
4 * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
5 * Caesar Wang <wxt@rock-chips.com>
6 *
4 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 8 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation. 9 * version 2, as published by the Free Software Foundation.
@@ -45,17 +48,50 @@ enum tshut_polarity {
45}; 48};
46 49
47/** 50/**
48 * The system has three Temperature Sensors. channel 0 is reserved, 51 * The system has two Temperature Sensors.
49 * channel 1 is for CPU, and channel 2 is for GPU. 52 * sensor0 is for CPU, and sensor1 is for GPU.
50 */ 53 */
51enum sensor_id { 54enum sensor_id {
52 SENSOR_CPU = 1, 55 SENSOR_CPU = 0,
53 SENSOR_GPU, 56 SENSOR_GPU,
54}; 57};
55 58
59/**
60* The conversion table has the adc value and temperature.
61* ADC_DECREMENT is the adc value decremnet.(e.g. v2_code_table)
62* ADC_INCREMNET is the adc value incremnet.(e.g. v3_code_table)
63*/
64enum adc_sort_mode {
65 ADC_DECREMENT = 0,
66 ADC_INCREMENT,
67};
68
69/**
70 * The max sensors is two in rockchip SoCs.
71 * Two sensors: CPU and GPU sensor.
72 */
73#define SOC_MAX_SENSORS 2
74
75struct chip_tsadc_table {
76 const struct tsadc_table *id;
77
78 /* the array table size*/
79 unsigned int length;
80
81 /* that analogic mask data */
82 u32 data_mask;
83
84 /* the sort mode is adc value that increment or decrement in table */
85 enum adc_sort_mode mode;
86};
87
56struct rockchip_tsadc_chip { 88struct rockchip_tsadc_chip {
89 /* The sensor id of chip correspond to the ADC channel */
90 int chn_id[SOC_MAX_SENSORS];
91 int chn_num;
92
57 /* The hardware-controlled tshut property */ 93 /* The hardware-controlled tshut property */
58 long tshut_temp; 94 int tshut_temp;
59 enum tshut_mode tshut_mode; 95 enum tshut_mode tshut_mode;
60 enum tshut_polarity tshut_polarity; 96 enum tshut_polarity tshut_polarity;
61 97
@@ -65,37 +101,40 @@ struct rockchip_tsadc_chip {
65 void (*control)(void __iomem *reg, bool on); 101 void (*control)(void __iomem *reg, bool on);
66 102
67 /* Per-sensor methods */ 103 /* Per-sensor methods */
68 int (*get_temp)(int chn, void __iomem *reg, int *temp); 104 int (*get_temp)(struct chip_tsadc_table table,
69 void (*set_tshut_temp)(int chn, void __iomem *reg, long temp); 105 int chn, void __iomem *reg, int *temp);
106 void (*set_tshut_temp)(struct chip_tsadc_table table,
107 int chn, void __iomem *reg, int temp);
70 void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m); 108 void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
109
110 /* Per-table methods */
111 struct chip_tsadc_table table;
71}; 112};
72 113
73struct rockchip_thermal_sensor { 114struct rockchip_thermal_sensor {
74 struct rockchip_thermal_data *thermal; 115 struct rockchip_thermal_data *thermal;
75 struct thermal_zone_device *tzd; 116 struct thermal_zone_device *tzd;
76 enum sensor_id id; 117 int id;
77}; 118};
78 119
79#define NUM_SENSORS 2 /* Ignore unused sensor 0 */
80
81struct rockchip_thermal_data { 120struct rockchip_thermal_data {
82 const struct rockchip_tsadc_chip *chip; 121 const struct rockchip_tsadc_chip *chip;
83 struct platform_device *pdev; 122 struct platform_device *pdev;
84 struct reset_control *reset; 123 struct reset_control *reset;
85 124
86 struct rockchip_thermal_sensor sensors[NUM_SENSORS]; 125 struct rockchip_thermal_sensor sensors[SOC_MAX_SENSORS];
87 126
88 struct clk *clk; 127 struct clk *clk;
89 struct clk *pclk; 128 struct clk *pclk;
90 129
91 void __iomem *regs; 130 void __iomem *regs;
92 131
93 long tshut_temp; 132 int tshut_temp;
94 enum tshut_mode tshut_mode; 133 enum tshut_mode tshut_mode;
95 enum tshut_polarity tshut_polarity; 134 enum tshut_polarity tshut_polarity;
96}; 135};
97 136
98/* TSADC V2 Sensor info define: */ 137/* TSADC Sensor info define: */
99#define TSADCV2_AUTO_CON 0x04 138#define TSADCV2_AUTO_CON 0x04
100#define TSADCV2_INT_EN 0x08 139#define TSADCV2_INT_EN 0x08
101#define TSADCV2_INT_PD 0x0c 140#define TSADCV2_INT_PD 0x0c
@@ -117,6 +156,8 @@ struct rockchip_thermal_data {
117#define TSADCV2_INT_PD_CLEAR_MASK ~BIT(8) 156#define TSADCV2_INT_PD_CLEAR_MASK ~BIT(8)
118 157
119#define TSADCV2_DATA_MASK 0xfff 158#define TSADCV2_DATA_MASK 0xfff
159#define TSADCV3_DATA_MASK 0x3ff
160
120#define TSADCV2_HIGHT_INT_DEBOUNCE_COUNT 4 161#define TSADCV2_HIGHT_INT_DEBOUNCE_COUNT 4
121#define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT 4 162#define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT 4
122#define TSADCV2_AUTO_PERIOD_TIME 250 /* msec */ 163#define TSADCV2_AUTO_PERIOD_TIME 250 /* msec */
@@ -124,7 +165,7 @@ struct rockchip_thermal_data {
124 165
125struct tsadc_table { 166struct tsadc_table {
126 u32 code; 167 u32 code;
127 long temp; 168 int temp;
128}; 169};
129 170
130static const struct tsadc_table v2_code_table[] = { 171static const struct tsadc_table v2_code_table[] = {
@@ -165,21 +206,61 @@ static const struct tsadc_table v2_code_table[] = {
165 {3421, 125000}, 206 {3421, 125000},
166}; 207};
167 208
168static u32 rk_tsadcv2_temp_to_code(long temp) 209static const struct tsadc_table v3_code_table[] = {
210 {0, -40000},
211 {106, -40000},
212 {108, -35000},
213 {110, -30000},
214 {112, -25000},
215 {114, -20000},
216 {116, -15000},
217 {118, -10000},
218 {120, -5000},
219 {122, 0},
220 {124, 5000},
221 {126, 10000},
222 {128, 15000},
223 {130, 20000},
224 {132, 25000},
225 {134, 30000},
226 {136, 35000},
227 {138, 40000},
228 {140, 45000},
229 {142, 50000},
230 {144, 55000},
231 {146, 60000},
232 {148, 65000},
233 {150, 70000},
234 {152, 75000},
235 {154, 80000},
236 {156, 85000},
237 {158, 90000},
238 {160, 95000},
239 {162, 100000},
240 {163, 105000},
241 {165, 110000},
242 {167, 115000},
243 {169, 120000},
244 {171, 125000},
245 {TSADCV3_DATA_MASK, 125000},
246};
247
248static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table,
249 int temp)
169{ 250{
170 int high, low, mid; 251 int high, low, mid;
171 252
172 low = 0; 253 low = 0;
173 high = ARRAY_SIZE(v2_code_table) - 1; 254 high = table.length - 1;
174 mid = (high + low) / 2; 255 mid = (high + low) / 2;
175 256
176 if (temp < v2_code_table[low].temp || temp > v2_code_table[high].temp) 257 if (temp < table.id[low].temp || temp > table.id[high].temp)
177 return 0; 258 return 0;
178 259
179 while (low <= high) { 260 while (low <= high) {
180 if (temp == v2_code_table[mid].temp) 261 if (temp == table.id[mid].temp)
181 return v2_code_table[mid].code; 262 return table.id[mid].code;
182 else if (temp < v2_code_table[mid].temp) 263 else if (temp < table.id[mid].temp)
183 high = mid - 1; 264 high = mid - 1;
184 else 265 else
185 low = mid + 1; 266 low = mid + 1;
@@ -189,29 +270,54 @@ static u32 rk_tsadcv2_temp_to_code(long temp)
189 return 0; 270 return 0;
190} 271}
191 272
192static int rk_tsadcv2_code_to_temp(u32 code, int *temp) 273static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
274 int *temp)
193{ 275{
194 unsigned int low = 1; 276 unsigned int low = 1;
195 unsigned int high = ARRAY_SIZE(v2_code_table) - 1; 277 unsigned int high = table.length - 1;
196 unsigned int mid = (low + high) / 2; 278 unsigned int mid = (low + high) / 2;
197 unsigned int num; 279 unsigned int num;
198 unsigned long denom; 280 unsigned long denom;
199 281
200 BUILD_BUG_ON(ARRAY_SIZE(v2_code_table) < 2); 282 WARN_ON(table.length < 2);
201 283
202 code &= TSADCV2_DATA_MASK; 284 switch (table.mode) {
203 if (code < v2_code_table[high].code) 285 case ADC_DECREMENT:
204 return -EAGAIN; /* Incorrect reading */ 286 code &= table.data_mask;
205 287 if (code < table.id[high].code)
206 while (low <= high) { 288 return -EAGAIN; /* Incorrect reading */
207 if (code >= v2_code_table[mid].code && 289
208 code < v2_code_table[mid - 1].code) 290 while (low <= high) {
209 break; 291 if (code >= table.id[mid].code &&
210 else if (code < v2_code_table[mid].code) 292 code < table.id[mid - 1].code)
211 low = mid + 1; 293 break;
212 else 294 else if (code < table.id[mid].code)
213 high = mid - 1; 295 low = mid + 1;
214 mid = (low + high) / 2; 296 else
297 high = mid - 1;
298
299 mid = (low + high) / 2;
300 }
301 break;
302 case ADC_INCREMENT:
303 code &= table.data_mask;
304 if (code < table.id[low].code)
305 return -EAGAIN; /* Incorrect reading */
306
307 while (low <= high) {
308 if (code >= table.id[mid - 1].code &&
309 code < table.id[mid].code)
310 break;
311 else if (code > table.id[mid].code)
312 low = mid + 1;
313 else
314 high = mid - 1;
315
316 mid = (low + high) / 2;
317 }
318 break;
319 default:
320 pr_err("Invalid the conversion table\n");
215 } 321 }
216 322
217 /* 323 /*
@@ -220,24 +326,28 @@ static int rk_tsadcv2_code_to_temp(u32 code, int *temp)
220 * temperature between 2 table entries is linear and interpolate 326 * temperature between 2 table entries is linear and interpolate
221 * to produce less granular result. 327 * to produce less granular result.
222 */ 328 */
223 num = v2_code_table[mid].temp - v2_code_table[mid - 1].temp; 329 num = table.id[mid].temp - v2_code_table[mid - 1].temp;
224 num *= v2_code_table[mid - 1].code - code; 330 num *= abs(table.id[mid - 1].code - code);
225 denom = v2_code_table[mid - 1].code - v2_code_table[mid].code; 331 denom = abs(table.id[mid - 1].code - table.id[mid].code);
226 *temp = v2_code_table[mid - 1].temp + (num / denom); 332 *temp = table.id[mid - 1].temp + (num / denom);
227 333
228 return 0; 334 return 0;
229} 335}
230 336
231/** 337/**
232 * rk_tsadcv2_initialize - initialize TASDC Controller 338 * rk_tsadcv2_initialize - initialize TASDC Controller.
233 * (1) Set TSADCV2_AUTO_PERIOD, configure the interleave between 339 *
234 * every two accessing of TSADC in normal operation. 340 * (1) Set TSADC_V2_AUTO_PERIOD:
235 * (2) Set TSADCV2_AUTO_PERIOD_HT, configure the interleave between 341 * Configure the interleave between every two accessing of
236 * every two accessing of TSADC after the temperature is higher 342 * TSADC in normal operation.
237 * than COM_SHUT or COM_INT. 343 *
238 * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE, 344 * (2) Set TSADCV2_AUTO_PERIOD_HT:
239 * if the temperature is higher than COMP_INT or COMP_SHUT for 345 * Configure the interleave between every two accessing of
240 * "debounce" times, TSADC controller will generate interrupt or TSHUT. 346 * TSADC after the temperature is higher than COM_SHUT or COM_INT.
347 *
348 * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE:
349 * If the temperature is higher than COMP_INT or COMP_SHUT for
350 * "debounce" times, TSADC controller will generate interrupt or TSHUT.
241 */ 351 */
242static void rk_tsadcv2_initialize(void __iomem *regs, 352static void rk_tsadcv2_initialize(void __iomem *regs,
243 enum tshut_polarity tshut_polarity) 353 enum tshut_polarity tshut_polarity)
@@ -279,20 +389,22 @@ static void rk_tsadcv2_control(void __iomem *regs, bool enable)
279 writel_relaxed(val, regs + TSADCV2_AUTO_CON); 389 writel_relaxed(val, regs + TSADCV2_AUTO_CON);
280} 390}
281 391
282static int rk_tsadcv2_get_temp(int chn, void __iomem *regs, int *temp) 392static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
393 int chn, void __iomem *regs, int *temp)
283{ 394{
284 u32 val; 395 u32 val;
285 396
286 val = readl_relaxed(regs + TSADCV2_DATA(chn)); 397 val = readl_relaxed(regs + TSADCV2_DATA(chn));
287 398
288 return rk_tsadcv2_code_to_temp(val, temp); 399 return rk_tsadcv2_code_to_temp(table, val, temp);
289} 400}
290 401
291static void rk_tsadcv2_tshut_temp(int chn, void __iomem *regs, long temp) 402static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table,
403 int chn, void __iomem *regs, int temp)
292{ 404{
293 u32 tshut_value, val; 405 u32 tshut_value, val;
294 406
295 tshut_value = rk_tsadcv2_temp_to_code(temp); 407 tshut_value = rk_tsadcv2_temp_to_code(table, temp);
296 writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn)); 408 writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn));
297 409
298 /* TSHUT will be valid */ 410 /* TSHUT will be valid */
@@ -318,6 +430,10 @@ static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
318} 430}
319 431
320static const struct rockchip_tsadc_chip rk3288_tsadc_data = { 432static const struct rockchip_tsadc_chip rk3288_tsadc_data = {
433 .chn_id[SENSOR_CPU] = 1, /* cpu sensor is channel 1 */
434 .chn_id[SENSOR_GPU] = 2, /* gpu sensor is channel 2 */
435 .chn_num = 2, /* two channels for tsadc */
436
321 .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ 437 .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
322 .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ 438 .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
323 .tshut_temp = 95000, 439 .tshut_temp = 95000,
@@ -328,6 +444,37 @@ static const struct rockchip_tsadc_chip rk3288_tsadc_data = {
328 .get_temp = rk_tsadcv2_get_temp, 444 .get_temp = rk_tsadcv2_get_temp,
329 .set_tshut_temp = rk_tsadcv2_tshut_temp, 445 .set_tshut_temp = rk_tsadcv2_tshut_temp,
330 .set_tshut_mode = rk_tsadcv2_tshut_mode, 446 .set_tshut_mode = rk_tsadcv2_tshut_mode,
447
448 .table = {
449 .id = v2_code_table,
450 .length = ARRAY_SIZE(v2_code_table),
451 .data_mask = TSADCV2_DATA_MASK,
452 .mode = ADC_DECREMENT,
453 },
454};
455
456static const struct rockchip_tsadc_chip rk3368_tsadc_data = {
457 .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
458 .chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
459 .chn_num = 2, /* two channels for tsadc */
460
461 .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
462 .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
463 .tshut_temp = 95000,
464
465 .initialize = rk_tsadcv2_initialize,
466 .irq_ack = rk_tsadcv2_irq_ack,
467 .control = rk_tsadcv2_control,
468 .get_temp = rk_tsadcv2_get_temp,
469 .set_tshut_temp = rk_tsadcv2_tshut_temp,
470 .set_tshut_mode = rk_tsadcv2_tshut_mode,
471
472 .table = {
473 .id = v3_code_table,
474 .length = ARRAY_SIZE(v3_code_table),
475 .data_mask = TSADCV3_DATA_MASK,
476 .mode = ADC_INCREMENT,
477 },
331}; 478};
332 479
333static const struct of_device_id of_rockchip_thermal_match[] = { 480static const struct of_device_id of_rockchip_thermal_match[] = {
@@ -335,6 +482,10 @@ static const struct of_device_id of_rockchip_thermal_match[] = {
335 .compatible = "rockchip,rk3288-tsadc", 482 .compatible = "rockchip,rk3288-tsadc",
336 .data = (void *)&rk3288_tsadc_data, 483 .data = (void *)&rk3288_tsadc_data,
337 }, 484 },
485 {
486 .compatible = "rockchip,rk3368-tsadc",
487 .data = (void *)&rk3368_tsadc_data,
488 },
338 { /* end */ }, 489 { /* end */ },
339}; 490};
340MODULE_DEVICE_TABLE(of, of_rockchip_thermal_match); 491MODULE_DEVICE_TABLE(of, of_rockchip_thermal_match);
@@ -357,7 +508,7 @@ static irqreturn_t rockchip_thermal_alarm_irq_thread(int irq, void *dev)
357 508
358 thermal->chip->irq_ack(thermal->regs); 509 thermal->chip->irq_ack(thermal->regs);
359 510
360 for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) 511 for (i = 0; i < thermal->chip->chn_num; i++)
361 thermal_zone_device_update(thermal->sensors[i].tzd); 512 thermal_zone_device_update(thermal->sensors[i].tzd);
362 513
363 return IRQ_HANDLED; 514 return IRQ_HANDLED;
@@ -370,7 +521,8 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
370 const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip; 521 const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip;
371 int retval; 522 int retval;
372 523
373 retval = tsadc->get_temp(sensor->id, thermal->regs, out_temp); 524 retval = tsadc->get_temp(tsadc->table,
525 sensor->id, thermal->regs, out_temp);
374 dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n", 526 dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n",
375 sensor->id, *out_temp, retval); 527 sensor->id, *out_temp, retval);
376 528
@@ -389,7 +541,7 @@ static int rockchip_configure_from_dt(struct device *dev,
389 541
390 if (of_property_read_u32(np, "rockchip,hw-tshut-temp", &shut_temp)) { 542 if (of_property_read_u32(np, "rockchip,hw-tshut-temp", &shut_temp)) {
391 dev_warn(dev, 543 dev_warn(dev,
392 "Missing tshut temp property, using default %ld\n", 544 "Missing tshut temp property, using default %d\n",
393 thermal->chip->tshut_temp); 545 thermal->chip->tshut_temp);
394 thermal->tshut_temp = thermal->chip->tshut_temp; 546 thermal->tshut_temp = thermal->chip->tshut_temp;
395 } else { 547 } else {
@@ -397,7 +549,7 @@ static int rockchip_configure_from_dt(struct device *dev,
397 } 549 }
398 550
399 if (thermal->tshut_temp > INT_MAX) { 551 if (thermal->tshut_temp > INT_MAX) {
400 dev_err(dev, "Invalid tshut temperature specified: %ld\n", 552 dev_err(dev, "Invalid tshut temperature specified: %d\n",
401 thermal->tshut_temp); 553 thermal->tshut_temp);
402 return -ERANGE; 554 return -ERANGE;
403 } 555 }
@@ -442,13 +594,14 @@ static int
442rockchip_thermal_register_sensor(struct platform_device *pdev, 594rockchip_thermal_register_sensor(struct platform_device *pdev,
443 struct rockchip_thermal_data *thermal, 595 struct rockchip_thermal_data *thermal,
444 struct rockchip_thermal_sensor *sensor, 596 struct rockchip_thermal_sensor *sensor,
445 enum sensor_id id) 597 int id)
446{ 598{
447 const struct rockchip_tsadc_chip *tsadc = thermal->chip; 599 const struct rockchip_tsadc_chip *tsadc = thermal->chip;
448 int error; 600 int error;
449 601
450 tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); 602 tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode);
451 tsadc->set_tshut_temp(id, thermal->regs, thermal->tshut_temp); 603 tsadc->set_tshut_temp(tsadc->table, id, thermal->regs,
604 thermal->tshut_temp);
452 605
453 sensor->thermal = thermal; 606 sensor->thermal = thermal;
454 sensor->id = id; 607 sensor->id = id;
@@ -481,7 +634,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
481 const struct of_device_id *match; 634 const struct of_device_id *match;
482 struct resource *res; 635 struct resource *res;
483 int irq; 636 int irq;
484 int i; 637 int i, j;
485 int error; 638 int error;
486 639
487 match = of_match_node(of_rockchip_thermal_match, np); 640 match = of_match_node(of_rockchip_thermal_match, np);
@@ -556,22 +709,19 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
556 709
557 thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); 710 thermal->chip->initialize(thermal->regs, thermal->tshut_polarity);
558 711
559 error = rockchip_thermal_register_sensor(pdev, thermal, 712 for (i = 0; i < thermal->chip->chn_num; i++) {
560 &thermal->sensors[0], 713 error = rockchip_thermal_register_sensor(pdev, thermal,
561 SENSOR_CPU); 714 &thermal->sensors[i],
562 if (error) { 715 thermal->chip->chn_id[i]);
563 dev_err(&pdev->dev, 716 if (error) {
564 "failed to register CPU thermal sensor: %d\n", error); 717 dev_err(&pdev->dev,
565 goto err_disable_pclk; 718 "failed to register sensor[%d] : error = %d\n",
566 } 719 i, error);
567 720 for (j = 0; j < i; j++)
568 error = rockchip_thermal_register_sensor(pdev, thermal, 721 thermal_zone_of_sensor_unregister(&pdev->dev,
569 &thermal->sensors[1], 722 thermal->sensors[j].tzd);
570 SENSOR_GPU); 723 goto err_disable_pclk;
571 if (error) { 724 }
572 dev_err(&pdev->dev,
573 "failed to register GPU thermal sensor: %d\n", error);
574 goto err_unregister_cpu_sensor;
575 } 725 }
576 726
577 error = devm_request_threaded_irq(&pdev->dev, irq, NULL, 727 error = devm_request_threaded_irq(&pdev->dev, irq, NULL,
@@ -581,22 +731,23 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
581 if (error) { 731 if (error) {
582 dev_err(&pdev->dev, 732 dev_err(&pdev->dev,
583 "failed to request tsadc irq: %d\n", error); 733 "failed to request tsadc irq: %d\n", error);
584 goto err_unregister_gpu_sensor; 734 goto err_unregister_sensor;
585 } 735 }
586 736
587 thermal->chip->control(thermal->regs, true); 737 thermal->chip->control(thermal->regs, true);
588 738
589 for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) 739 for (i = 0; i < thermal->chip->chn_num; i++)
590 rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); 740 rockchip_thermal_toggle_sensor(&thermal->sensors[i], true);
591 741
592 platform_set_drvdata(pdev, thermal); 742 platform_set_drvdata(pdev, thermal);
593 743
594 return 0; 744 return 0;
595 745
596err_unregister_gpu_sensor: 746err_unregister_sensor:
597 thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[1].tzd); 747 while (i--)
598err_unregister_cpu_sensor: 748 thermal_zone_of_sensor_unregister(&pdev->dev,
599 thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[0].tzd); 749 thermal->sensors[i].tzd);
750
600err_disable_pclk: 751err_disable_pclk:
601 clk_disable_unprepare(thermal->pclk); 752 clk_disable_unprepare(thermal->pclk);
602err_disable_clk: 753err_disable_clk:
@@ -610,7 +761,7 @@ static int rockchip_thermal_remove(struct platform_device *pdev)
610 struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev); 761 struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev);
611 int i; 762 int i;
612 763
613 for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) { 764 for (i = 0; i < thermal->chip->chn_num; i++) {
614 struct rockchip_thermal_sensor *sensor = &thermal->sensors[i]; 765 struct rockchip_thermal_sensor *sensor = &thermal->sensors[i];
615 766
616 rockchip_thermal_toggle_sensor(sensor, false); 767 rockchip_thermal_toggle_sensor(sensor, false);
@@ -631,7 +782,7 @@ static int __maybe_unused rockchip_thermal_suspend(struct device *dev)
631 struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev); 782 struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev);
632 int i; 783 int i;
633 784
634 for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) 785 for (i = 0; i < thermal->chip->chn_num; i++)
635 rockchip_thermal_toggle_sensor(&thermal->sensors[i], false); 786 rockchip_thermal_toggle_sensor(&thermal->sensors[i], false);
636 787
637 thermal->chip->control(thermal->regs, false); 788 thermal->chip->control(thermal->regs, false);
@@ -663,18 +814,19 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
663 814
664 thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); 815 thermal->chip->initialize(thermal->regs, thermal->tshut_polarity);
665 816
666 for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) { 817 for (i = 0; i < thermal->chip->chn_num; i++) {
667 enum sensor_id id = thermal->sensors[i].id; 818 int id = thermal->sensors[i].id;
668 819
669 thermal->chip->set_tshut_mode(id, thermal->regs, 820 thermal->chip->set_tshut_mode(id, thermal->regs,
670 thermal->tshut_mode); 821 thermal->tshut_mode);
671 thermal->chip->set_tshut_temp(id, thermal->regs, 822 thermal->chip->set_tshut_temp(thermal->chip->table,
823 id, thermal->regs,
672 thermal->tshut_temp); 824 thermal->tshut_temp);
673 } 825 }
674 826
675 thermal->chip->control(thermal->regs, true); 827 thermal->chip->control(thermal->regs, true);
676 828
677 for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) 829 for (i = 0; i < thermal->chip->chn_num; i++)
678 rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); 830 rockchip_thermal_toggle_sensor(&thermal->sensors[i], true);
679 831
680 pinctrl_pm_select_default_state(dev); 832 pinctrl_pm_select_default_state(dev);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index ed776149261e..e49c2bce551d 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2054,13 +2054,13 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
2054 size_t eol; 2054 size_t eol;
2055 size_t tail; 2055 size_t tail;
2056 int ret, found = 0; 2056 int ret, found = 0;
2057 bool eof_push = 0;
2058 2057
2059 /* N.B. avoid overrun if nr == 0 */ 2058 /* N.B. avoid overrun if nr == 0 */
2060 n = min(*nr, smp_load_acquire(&ldata->canon_head) - ldata->read_tail); 2059 if (!*nr)
2061 if (!n)
2062 return 0; 2060 return 0;
2063 2061
2062 n = min(*nr + 1, smp_load_acquire(&ldata->canon_head) - ldata->read_tail);
2063
2064 tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); 2064 tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
2065 size = min_t(size_t, tail + n, N_TTY_BUF_SIZE); 2065 size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
2066 2066
@@ -2081,12 +2081,11 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
2081 n = eol - tail; 2081 n = eol - tail;
2082 if (n > N_TTY_BUF_SIZE) 2082 if (n > N_TTY_BUF_SIZE)
2083 n += N_TTY_BUF_SIZE; 2083 n += N_TTY_BUF_SIZE;
2084 n += found; 2084 c = n + found;
2085 c = n;
2086 2085
2087 if (found && !ldata->push && read_buf(ldata, eol) == __DISABLED_CHAR) { 2086 if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) {
2088 n--; 2087 c = min(*nr, c);
2089 eof_push = !n && ldata->read_tail != ldata->line_start; 2088 n = c;
2090 } 2089 }
2091 2090
2092 n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu size:%zu more:%zu\n", 2091 n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu size:%zu more:%zu\n",
@@ -2116,7 +2115,7 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
2116 ldata->push = 0; 2115 ldata->push = 0;
2117 tty_audit_push(tty); 2116 tty_audit_push(tty);
2118 } 2117 }
2119 return eof_push ? -EAGAIN : 0; 2118 return 0;
2120} 2119}
2121 2120
2122extern ssize_t redirected_tty_write(struct file *, const char __user *, 2121extern ssize_t redirected_tty_write(struct file *, const char __user *,
@@ -2273,10 +2272,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2273 2272
2274 if (ldata->icanon && !L_EXTPROC(tty)) { 2273 if (ldata->icanon && !L_EXTPROC(tty)) {
2275 retval = canon_copy_from_read_buf(tty, &b, &nr); 2274 retval = canon_copy_from_read_buf(tty, &b, &nr);
2276 if (retval == -EAGAIN) { 2275 if (retval)
2277 retval = 0;
2278 continue;
2279 } else if (retval)
2280 break; 2276 break;
2281 } else { 2277 } else {
2282 int uncopied; 2278 int uncopied;
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index d11621e2cf1d..245edbb68d4b 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -115,12 +115,16 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value)
115 */ 115 */
116static int uniphier_serial_dl_read(struct uart_8250_port *up) 116static int uniphier_serial_dl_read(struct uart_8250_port *up)
117{ 117{
118 return readl(up->port.membase + UNIPHIER_UART_DLR); 118 int offset = UNIPHIER_UART_DLR << up->port.regshift;
119
120 return readl(up->port.membase + offset);
119} 121}
120 122
121static void uniphier_serial_dl_write(struct uart_8250_port *up, int value) 123static void uniphier_serial_dl_write(struct uart_8250_port *up, int value)
122{ 124{
123 writel(value, up->port.membase + UNIPHIER_UART_DLR); 125 int offset = UNIPHIER_UART_DLR << up->port.regshift;
126
127 writel(value, up->port.membase + offset);
124} 128}
125 129
126static int uniphier_of_serial_setup(struct device *dev, struct uart_port *port, 130static int uniphier_of_serial_setup(struct device *dev, struct uart_port *port,
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index f09636083426..b5b2f2be6be7 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -115,6 +115,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match)
115 if (buf && !parse_options(&early_console_dev, buf)) 115 if (buf && !parse_options(&early_console_dev, buf))
116 buf = NULL; 116 buf = NULL;
117 117
118 spin_lock_init(&port->lock);
118 port->uartclk = BASE_BAUD * 16; 119 port->uartclk = BASE_BAUD * 16;
119 if (port->mapbase) 120 if (port->mapbase)
120 port->membase = earlycon_map(port->mapbase, 64); 121 port->membase = earlycon_map(port->mapbase, 64);
@@ -202,6 +203,7 @@ int __init of_setup_earlycon(unsigned long addr,
202 int err; 203 int err;
203 struct uart_port *port = &early_console_dev.port; 204 struct uart_port *port = &early_console_dev.port;
204 205
206 spin_lock_init(&port->lock);
205 port->iotype = UPIO_MEM; 207 port->iotype = UPIO_MEM;
206 port->mapbase = addr; 208 port->mapbase = addr;
207 port->uartclk = BASE_BAUD * 16; 209 port->uartclk = BASE_BAUD * 16;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 960e50a97558..51c7507b0444 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1437,7 +1437,7 @@ static void sci_request_dma(struct uart_port *port)
1437 sg_init_table(sg, 1); 1437 sg_init_table(sg, 1);
1438 s->rx_buf[i] = buf; 1438 s->rx_buf[i] = buf;
1439 sg_dma_address(sg) = dma; 1439 sg_dma_address(sg) = dma;
1440 sg->length = s->buf_len_rx; 1440 sg_dma_len(sg) = s->buf_len_rx;
1441 1441
1442 buf += s->buf_len_rx; 1442 buf += s->buf_len_rx;
1443 dma += s->buf_len_rx; 1443 dma += s->buf_len_rx;
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index 064031870ba0..ca0d3802f2af 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -148,8 +148,10 @@ static int receive_chars_read(struct uart_port *port)
148 uart_handle_dcd_change(port, 1); 148 uart_handle_dcd_change(port, 1);
149 } 149 }
150 150
151 for (i = 0; i < bytes_read; i++) 151 if (port->sysrq != 0 && *con_read_page) {
152 uart_handle_sysrq_char(port, con_read_page[i]); 152 for (i = 0; i < bytes_read; i++)
153 uart_handle_sysrq_char(port, con_read_page[i]);
154 }
153 155
154 if (port->state == NULL) 156 if (port->state == NULL)
155 continue; 157 continue;
@@ -168,17 +170,17 @@ struct sunhv_ops {
168 int (*receive_chars)(struct uart_port *port); 170 int (*receive_chars)(struct uart_port *port);
169}; 171};
170 172
171static struct sunhv_ops bychar_ops = { 173static const struct sunhv_ops bychar_ops = {
172 .transmit_chars = transmit_chars_putchar, 174 .transmit_chars = transmit_chars_putchar,
173 .receive_chars = receive_chars_getchar, 175 .receive_chars = receive_chars_getchar,
174}; 176};
175 177
176static struct sunhv_ops bywrite_ops = { 178static const struct sunhv_ops bywrite_ops = {
177 .transmit_chars = transmit_chars_write, 179 .transmit_chars = transmit_chars_write,
178 .receive_chars = receive_chars_read, 180 .receive_chars = receive_chars_read,
179}; 181};
180 182
181static struct sunhv_ops *sunhv_ops = &bychar_ops; 183static const struct sunhv_ops *sunhv_ops = &bychar_ops;
182 184
183static struct tty_port *receive_chars(struct uart_port *port) 185static struct tty_port *receive_chars(struct uart_port *port)
184{ 186{
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 9a479e61791a..3cd31e0d4bd9 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -450,7 +450,7 @@ receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count)
450 count = disc->ops->receive_buf2(tty, p, f, count); 450 count = disc->ops->receive_buf2(tty, p, f, count);
451 else { 451 else {
452 count = min_t(int, count, tty->receive_room); 452 count = min_t(int, count, tty->receive_room);
453 if (count) 453 if (count && disc->ops->receive_buf)
454 disc->ops->receive_buf(tty, p, f, count); 454 disc->ops->receive_buf(tty, p, f, count);
455 } 455 }
456 return count; 456 return count;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index b30e7423549b..26ca4f910cb0 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1838,6 +1838,11 @@ static const struct usb_device_id acm_ids[] = {
1838 }, 1838 },
1839#endif 1839#endif
1840 1840
1841 /* Exclude Infineon Flash Loader utility */
1842 { USB_DEVICE(0x058b, 0x0041),
1843 .driver_info = IGNORE_DEVICE,
1844 },
1845
1841 /* control interfaces without any protocol set */ 1846 /* control interfaces without any protocol set */
1842 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1847 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1843 USB_CDC_PROTO_NONE) }, 1848 USB_CDC_PROTO_NONE) },
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 7caff020106e..5050760f5e17 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -115,7 +115,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
115 USB_SS_MULT(desc->bmAttributes) > 3) { 115 USB_SS_MULT(desc->bmAttributes) > 3) {
116 dev_warn(ddev, "Isoc endpoint has Mult of %d in " 116 dev_warn(ddev, "Isoc endpoint has Mult of %d in "
117 "config %d interface %d altsetting %d ep %d: " 117 "config %d interface %d altsetting %d ep %d: "
118 "setting to 3\n", desc->bmAttributes + 1, 118 "setting to 3\n",
119 USB_SS_MULT(desc->bmAttributes),
119 cfgno, inum, asnum, ep->desc.bEndpointAddress); 120 cfgno, inum, asnum, ep->desc.bEndpointAddress);
120 ep->ss_ep_comp.bmAttributes = 2; 121 ep->ss_ep_comp.bmAttributes = 2;
121 } 122 }
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index bdeadc112d29..ddbf32d599cb 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -124,6 +124,10 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
124 124
125int usb_device_supports_lpm(struct usb_device *udev) 125int usb_device_supports_lpm(struct usb_device *udev)
126{ 126{
127 /* Some devices have trouble with LPM */
128 if (udev->quirks & USB_QUIRK_NO_LPM)
129 return 0;
130
127 /* USB 2.1 (and greater) devices indicate LPM support through 131 /* USB 2.1 (and greater) devices indicate LPM support through
128 * their USB 2.0 Extended Capabilities BOS descriptor. 132 * their USB 2.0 Extended Capabilities BOS descriptor.
129 */ 133 */
@@ -1031,10 +1035,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1031 unsigned delay; 1035 unsigned delay;
1032 1036
1033 /* Continue a partial initialization */ 1037 /* Continue a partial initialization */
1034 if (type == HUB_INIT2) 1038 if (type == HUB_INIT2 || type == HUB_INIT3) {
1035 goto init2; 1039 device_lock(hub->intfdev);
1036 if (type == HUB_INIT3) 1040
1041 /* Was the hub disconnected while we were waiting? */
1042 if (hub->disconnected) {
1043 device_unlock(hub->intfdev);
1044 kref_put(&hub->kref, hub_release);
1045 return;
1046 }
1047 if (type == HUB_INIT2)
1048 goto init2;
1037 goto init3; 1049 goto init3;
1050 }
1051 kref_get(&hub->kref);
1038 1052
1039 /* The superspeed hub except for root hub has to use Hub Depth 1053 /* The superspeed hub except for root hub has to use Hub Depth
1040 * value as an offset into the route string to locate the bits 1054 * value as an offset into the route string to locate the bits
@@ -1232,6 +1246,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1232 queue_delayed_work(system_power_efficient_wq, 1246 queue_delayed_work(system_power_efficient_wq,
1233 &hub->init_work, 1247 &hub->init_work,
1234 msecs_to_jiffies(delay)); 1248 msecs_to_jiffies(delay));
1249 device_unlock(hub->intfdev);
1235 return; /* Continues at init3: below */ 1250 return; /* Continues at init3: below */
1236 } else { 1251 } else {
1237 msleep(delay); 1252 msleep(delay);
@@ -1253,6 +1268,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1253 /* Allow autosuspend if it was suppressed */ 1268 /* Allow autosuspend if it was suppressed */
1254 if (type <= HUB_INIT3) 1269 if (type <= HUB_INIT3)
1255 usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); 1270 usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
1271
1272 if (type == HUB_INIT2 || type == HUB_INIT3)
1273 device_unlock(hub->intfdev);
1274
1275 kref_put(&hub->kref, hub_release);
1256} 1276}
1257 1277
1258/* Implement the continuations for the delays above */ 1278/* Implement the continuations for the delays above */
@@ -4512,6 +4532,8 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
4512 goto fail; 4532 goto fail;
4513 } 4533 }
4514 4534
4535 usb_detect_quirks(udev);
4536
4515 if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) { 4537 if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) {
4516 retval = usb_get_bos_descriptor(udev); 4538 retval = usb_get_bos_descriptor(udev);
4517 if (!retval) { 4539 if (!retval) {
@@ -4710,7 +4732,6 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
4710 if (status < 0) 4732 if (status < 0)
4711 goto loop; 4733 goto loop;
4712 4734
4713 usb_detect_quirks(udev);
4714 if (udev->quirks & USB_QUIRK_DELAY_INIT) 4735 if (udev->quirks & USB_QUIRK_DELAY_INIT)
4715 msleep(1000); 4736 msleep(1000);
4716 4737
@@ -5326,9 +5347,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
5326 if (udev->usb2_hw_lpm_enabled == 1) 5347 if (udev->usb2_hw_lpm_enabled == 1)
5327 usb_set_usb2_hardware_lpm(udev, 0); 5348 usb_set_usb2_hardware_lpm(udev, 0);
5328 5349
5329 bos = udev->bos;
5330 udev->bos = NULL;
5331
5332 /* Disable LPM and LTM while we reset the device and reinstall the alt 5350 /* Disable LPM and LTM while we reset the device and reinstall the alt
5333 * settings. Device-initiated LPM settings, and system exit latency 5351 * settings. Device-initiated LPM settings, and system exit latency
5334 * settings are cleared when the device is reset, so we have to set 5352 * settings are cleared when the device is reset, so we have to set
@@ -5337,15 +5355,18 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
5337 ret = usb_unlocked_disable_lpm(udev); 5355 ret = usb_unlocked_disable_lpm(udev);
5338 if (ret) { 5356 if (ret) {
5339 dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__); 5357 dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__);
5340 goto re_enumerate; 5358 goto re_enumerate_no_bos;
5341 } 5359 }
5342 ret = usb_disable_ltm(udev); 5360 ret = usb_disable_ltm(udev);
5343 if (ret) { 5361 if (ret) {
5344 dev_err(&udev->dev, "%s Failed to disable LTM\n.", 5362 dev_err(&udev->dev, "%s Failed to disable LTM\n.",
5345 __func__); 5363 __func__);
5346 goto re_enumerate; 5364 goto re_enumerate_no_bos;
5347 } 5365 }
5348 5366
5367 bos = udev->bos;
5368 udev->bos = NULL;
5369
5349 for (i = 0; i < SET_CONFIG_TRIES; ++i) { 5370 for (i = 0; i < SET_CONFIG_TRIES; ++i) {
5350 5371
5351 /* ep0 maxpacket size may change; let the HCD know about it. 5372 /* ep0 maxpacket size may change; let the HCD know about it.
@@ -5442,10 +5463,11 @@ done:
5442 return 0; 5463 return 0;
5443 5464
5444re_enumerate: 5465re_enumerate:
5445 /* LPM state doesn't matter when we're about to destroy the device. */
5446 hub_port_logical_disconnect(parent_hub, port1);
5447 usb_release_bos_descriptor(udev); 5466 usb_release_bos_descriptor(udev);
5448 udev->bos = bos; 5467 udev->bos = bos;
5468re_enumerate_no_bos:
5469 /* LPM state doesn't matter when we're about to destroy the device. */
5470 hub_port_logical_disconnect(parent_hub, port1);
5449 return -ENODEV; 5471 return -ENODEV;
5450} 5472}
5451 5473
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 210618319f10..5487fe308f01 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -206,7 +206,7 @@ static int link_peers(struct usb_port *left, struct usb_port *right)
206 else 206 else
207 method = "default"; 207 method = "default";
208 208
209 pr_warn("usb: failed to peer %s and %s by %s (%s:%s) (%s:%s)\n", 209 pr_debug("usb: failed to peer %s and %s by %s (%s:%s) (%s:%s)\n",
210 dev_name(&left->dev), dev_name(&right->dev), method, 210 dev_name(&left->dev), dev_name(&right->dev), method,
211 dev_name(&left->dev), 211 dev_name(&left->dev),
212 lpeer ? dev_name(&lpeer->dev) : "none", 212 lpeer ? dev_name(&lpeer->dev) : "none",
@@ -265,7 +265,7 @@ static void link_peers_report(struct usb_port *left, struct usb_port *right)
265 if (rc == 0) { 265 if (rc == 0) {
266 dev_dbg(&left->dev, "peered to %s\n", dev_name(&right->dev)); 266 dev_dbg(&left->dev, "peered to %s\n", dev_name(&right->dev));
267 } else { 267 } else {
268 dev_warn(&left->dev, "failed to peer to %s (%d)\n", 268 dev_dbg(&left->dev, "failed to peer to %s (%d)\n",
269 dev_name(&right->dev), rc); 269 dev_name(&right->dev), rc);
270 pr_warn_once("usb: port power management may be unreliable\n"); 270 pr_warn_once("usb: port power management may be unreliable\n");
271 usb_port_block_power_off = 1; 271 usb_port_block_power_off = 1;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f5a381945db2..6dc810bce295 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -125,6 +125,9 @@ static const struct usb_device_id usb_quirk_list[] = {
125 { USB_DEVICE(0x04f3, 0x016f), .driver_info = 125 { USB_DEVICE(0x04f3, 0x016f), .driver_info =
126 USB_QUIRK_DEVICE_QUALIFIER }, 126 USB_QUIRK_DEVICE_QUALIFIER },
127 127
128 { USB_DEVICE(0x04f3, 0x21b8), .driver_info =
129 USB_QUIRK_DEVICE_QUALIFIER },
130
128 /* Roland SC-8820 */ 131 /* Roland SC-8820 */
129 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, 132 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
130 133
@@ -199,6 +202,12 @@ static const struct usb_device_id usb_quirk_list[] = {
199 { USB_DEVICE(0x1a0a, 0x0200), .driver_info = 202 { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
200 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, 203 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
201 204
205 /* Blackmagic Design Intensity Shuttle */
206 { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
207
208 /* Blackmagic Design UltraStudio SDI */
209 { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
210
202 { } /* terminating entry must be last */ 211 { } /* terminating entry must be last */
203}; 212};
204 213
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index e61d773cf65e..39c1cbf0e75d 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -125,9 +125,11 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
125 if (ret) 125 if (ret)
126 return ret; 126 return ret;
127 127
128 ret = clk_prepare_enable(hsotg->clk); 128 if (hsotg->clk) {
129 if (ret) 129 ret = clk_prepare_enable(hsotg->clk);
130 return ret; 130 if (ret)
131 return ret;
132 }
131 133
132 if (hsotg->uphy) 134 if (hsotg->uphy)
133 ret = usb_phy_init(hsotg->uphy); 135 ret = usb_phy_init(hsotg->uphy);
@@ -175,7 +177,8 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
175 if (ret) 177 if (ret)
176 return ret; 178 return ret;
177 179
178 clk_disable_unprepare(hsotg->clk); 180 if (hsotg->clk)
181 clk_disable_unprepare(hsotg->clk);
179 182
180 ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), 183 ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
181 hsotg->supplies); 184 hsotg->supplies);
@@ -212,14 +215,41 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
212 */ 215 */
213 hsotg->phy = devm_phy_get(hsotg->dev, "usb2-phy"); 216 hsotg->phy = devm_phy_get(hsotg->dev, "usb2-phy");
214 if (IS_ERR(hsotg->phy)) { 217 if (IS_ERR(hsotg->phy)) {
215 hsotg->phy = NULL; 218 ret = PTR_ERR(hsotg->phy);
219 switch (ret) {
220 case -ENODEV:
221 case -ENOSYS:
222 hsotg->phy = NULL;
223 break;
224 case -EPROBE_DEFER:
225 return ret;
226 default:
227 dev_err(hsotg->dev, "error getting phy %d\n", ret);
228 return ret;
229 }
230 }
231
232 if (!hsotg->phy) {
216 hsotg->uphy = devm_usb_get_phy(hsotg->dev, USB_PHY_TYPE_USB2); 233 hsotg->uphy = devm_usb_get_phy(hsotg->dev, USB_PHY_TYPE_USB2);
217 if (IS_ERR(hsotg->uphy)) 234 if (IS_ERR(hsotg->uphy)) {
218 hsotg->uphy = NULL; 235 ret = PTR_ERR(hsotg->uphy);
219 else 236 switch (ret) {
220 hsotg->plat = dev_get_platdata(hsotg->dev); 237 case -ENODEV:
238 case -ENXIO:
239 hsotg->uphy = NULL;
240 break;
241 case -EPROBE_DEFER:
242 return ret;
243 default:
244 dev_err(hsotg->dev, "error getting usb phy %d\n",
245 ret);
246 return ret;
247 }
248 }
221 } 249 }
222 250
251 hsotg->plat = dev_get_platdata(hsotg->dev);
252
223 if (hsotg->phy) { 253 if (hsotg->phy) {
224 /* 254 /*
225 * If using the generic PHY framework, check if the PHY bus 255 * If using the generic PHY framework, check if the PHY bus
@@ -229,11 +259,6 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
229 hsotg->phyif = GUSBCFG_PHYIF8; 259 hsotg->phyif = GUSBCFG_PHYIF8;
230 } 260 }
231 261
232 if (!hsotg->phy && !hsotg->uphy && !hsotg->plat) {
233 dev_err(hsotg->dev, "no platform data or transceiver defined\n");
234 return -EPROBE_DEFER;
235 }
236
237 /* Clock */ 262 /* Clock */
238 hsotg->clk = devm_clk_get(hsotg->dev, "otg"); 263 hsotg->clk = devm_clk_get(hsotg->dev, "otg");
239 if (IS_ERR(hsotg->clk)) { 264 if (IS_ERR(hsotg->clk)) {
@@ -342,20 +367,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
342 if (retval) 367 if (retval)
343 return retval; 368 return retval;
344 369
345 irq = platform_get_irq(dev, 0);
346 if (irq < 0) {
347 dev_err(&dev->dev, "missing IRQ resource\n");
348 return irq;
349 }
350
351 dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
352 irq);
353 retval = devm_request_irq(hsotg->dev, irq,
354 dwc2_handle_common_intr, IRQF_SHARED,
355 dev_name(hsotg->dev), hsotg);
356 if (retval)
357 return retval;
358
359 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 370 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
360 hsotg->regs = devm_ioremap_resource(&dev->dev, res); 371 hsotg->regs = devm_ioremap_resource(&dev->dev, res);
361 if (IS_ERR(hsotg->regs)) 372 if (IS_ERR(hsotg->regs))
@@ -390,6 +401,20 @@ static int dwc2_driver_probe(struct platform_device *dev)
390 401
391 dwc2_set_all_params(hsotg->core_params, -1); 402 dwc2_set_all_params(hsotg->core_params, -1);
392 403
404 irq = platform_get_irq(dev, 0);
405 if (irq < 0) {
406 dev_err(&dev->dev, "missing IRQ resource\n");
407 return irq;
408 }
409
410 dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
411 irq);
412 retval = devm_request_irq(hsotg->dev, irq,
413 dwc2_handle_common_intr, IRQF_SHARED,
414 dev_name(hsotg->dev), hsotg);
415 if (retval)
416 return retval;
417
393 retval = dwc2_lowlevel_hw_enable(hsotg); 418 retval = dwc2_lowlevel_hw_enable(hsotg);
394 if (retval) 419 if (retval)
395 return retval; 420 return retval;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index e24a01cc98df..a58376fd65fe 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1078,6 +1078,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1078 * little bit faster. 1078 * little bit faster.
1079 */ 1079 */
1080 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1080 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1081 !usb_endpoint_xfer_int(dep->endpoint.desc) &&
1081 !(dep->flags & DWC3_EP_BUSY)) { 1082 !(dep->flags & DWC3_EP_BUSY)) {
1082 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1083 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1083 goto out; 1084 goto out;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index adc6d52efa46..cf43e9e18368 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -423,7 +423,7 @@ static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
423 spin_unlock_irq(&ffs->ev.waitq.lock); 423 spin_unlock_irq(&ffs->ev.waitq.lock);
424 mutex_unlock(&ffs->mutex); 424 mutex_unlock(&ffs->mutex);
425 425
426 return unlikely(__copy_to_user(buf, events, size)) ? -EFAULT : size; 426 return unlikely(copy_to_user(buf, events, size)) ? -EFAULT : size;
427} 427}
428 428
429static ssize_t ffs_ep0_read(struct file *file, char __user *buf, 429static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
@@ -513,7 +513,7 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
513 513
514 /* unlocks spinlock */ 514 /* unlocks spinlock */
515 ret = __ffs_ep0_queue_wait(ffs, data, len); 515 ret = __ffs_ep0_queue_wait(ffs, data, len);
516 if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len))) 516 if (likely(ret > 0) && unlikely(copy_to_user(buf, data, len)))
517 ret = -EFAULT; 517 ret = -EFAULT;
518 goto done_mutex; 518 goto done_mutex;
519 519
@@ -3493,7 +3493,7 @@ static char *ffs_prepare_buffer(const char __user *buf, size_t len)
3493 if (unlikely(!data)) 3493 if (unlikely(!data))
3494 return ERR_PTR(-ENOMEM); 3494 return ERR_PTR(-ENOMEM);
3495 3495
3496 if (unlikely(__copy_from_user(data, buf, len))) { 3496 if (unlikely(copy_from_user(data, buf, len))) {
3497 kfree(data); 3497 kfree(data);
3498 return ERR_PTR(-EFAULT); 3498 return ERR_PTR(-EFAULT);
3499 } 3499 }
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 42acb45e1ab4..898a570319f1 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -370,6 +370,7 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
370 if (err) { 370 if (err) {
371 ERROR(midi, "%s queue req: %d\n", 371 ERROR(midi, "%s queue req: %d\n",
372 midi->out_ep->name, err); 372 midi->out_ep->name, err);
373 free_ep_req(midi->out_ep, req);
373 } 374 }
374 } 375 }
375 376
@@ -545,7 +546,7 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req)
545 } 546 }
546 } 547 }
547 548
548 if (req->length > 0) { 549 if (req->length > 0 && ep->enabled) {
549 int err; 550 int err;
550 551
551 err = usb_ep_queue(ep, req, GFP_ATOMIC); 552 err = usb_ep_queue(ep, req, GFP_ATOMIC);
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 289ebca316d3..ad8c9b05572d 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -20,7 +20,7 @@
20#define UVC_ATTR(prefix, cname, aname) \ 20#define UVC_ATTR(prefix, cname, aname) \
21static struct configfs_attribute prefix##attr_##cname = { \ 21static struct configfs_attribute prefix##attr_##cname = { \
22 .ca_name = __stringify(aname), \ 22 .ca_name = __stringify(aname), \
23 .ca_mode = S_IRUGO, \ 23 .ca_mode = S_IRUGO | S_IWUGO, \
24 .ca_owner = THIS_MODULE, \ 24 .ca_owner = THIS_MODULE, \
25 .show = prefix##cname##_show, \ 25 .show = prefix##cname##_show, \
26 .store = prefix##cname##_store, \ 26 .store = prefix##cname##_store, \
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index 670ac0b12f00..001a3b74a993 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -2536,6 +2536,9 @@ static int pxa_udc_suspend(struct platform_device *_dev, pm_message_t state)
2536 udc->pullup_resume = udc->pullup_on; 2536 udc->pullup_resume = udc->pullup_on;
2537 dplus_pullup(udc, 0); 2537 dplus_pullup(udc, 0);
2538 2538
2539 if (udc->driver)
2540 udc->driver->disconnect(&udc->gadget);
2541
2539 return 0; 2542 return 0;
2540} 2543}
2541 2544
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 342ffd140122..8c6e15bd6ff0 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -473,6 +473,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
473 if (!pdata) 473 if (!pdata)
474 return -ENOMEM; 474 return -ENOMEM;
475 475
476 pdev->dev.platform_data = pdata;
477
476 if (!of_property_read_u32(np, "num-ports", &ports)) 478 if (!of_property_read_u32(np, "num-ports", &ports))
477 pdata->ports = ports; 479 pdata->ports = ports;
478 480
@@ -483,6 +485,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
483 */ 485 */
484 if (i >= pdata->ports) { 486 if (i >= pdata->ports) {
485 pdata->vbus_pin[i] = -EINVAL; 487 pdata->vbus_pin[i] = -EINVAL;
488 pdata->overcurrent_pin[i] = -EINVAL;
486 continue; 489 continue;
487 } 490 }
488 491
@@ -513,10 +516,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
513 } 516 }
514 517
515 at91_for_each_port(i) { 518 at91_for_each_port(i) {
516 if (i >= pdata->ports) { 519 if (i >= pdata->ports)
517 pdata->overcurrent_pin[i] = -EINVAL; 520 break;
518 continue;
519 }
520 521
521 pdata->overcurrent_pin[i] = 522 pdata->overcurrent_pin[i] =
522 of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags); 523 of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags);
@@ -552,8 +553,6 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
552 } 553 }
553 } 554 }
554 555
555 pdev->dev.platform_data = pdata;
556
557 device_init_wakeup(&pdev->dev, 1); 556 device_init_wakeup(&pdev->dev, 1);
558 return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev); 557 return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev);
559} 558}
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index dc31c425ce01..9f1c0538b211 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -377,6 +377,10 @@ static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_f
377 if (std->pl_virt == NULL) 377 if (std->pl_virt == NULL)
378 return -ENOMEM; 378 return -ENOMEM;
379 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE); 379 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
380 if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) {
381 kfree(std->pl_virt);
382 return -EFAULT;
383 }
380 384
381 for (p = 0; p < std->num_pointers; p++) { 385 for (p = 0; p < std->num_pointers; p++) {
382 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); 386 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0230965fb78c..f980c239eded 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -733,8 +733,30 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
733 if ((raw_port_status & PORT_RESET) || 733 if ((raw_port_status & PORT_RESET) ||
734 !(raw_port_status & PORT_PE)) 734 !(raw_port_status & PORT_PE))
735 return 0xffffffff; 735 return 0xffffffff;
736 if (time_after_eq(jiffies, 736 /* did port event handler already start resume timing? */
737 bus_state->resume_done[wIndex])) { 737 if (!bus_state->resume_done[wIndex]) {
738 /* If not, maybe we are in a host initated resume? */
739 if (test_bit(wIndex, &bus_state->resuming_ports)) {
740 /* Host initated resume doesn't time the resume
741 * signalling using resume_done[].
742 * It manually sets RESUME state, sleeps 20ms
743 * and sets U0 state. This should probably be
744 * changed, but not right now.
745 */
746 } else {
747 /* port resume was discovered now and here,
748 * start resume timing
749 */
750 unsigned long timeout = jiffies +
751 msecs_to_jiffies(USB_RESUME_TIMEOUT);
752
753 set_bit(wIndex, &bus_state->resuming_ports);
754 bus_state->resume_done[wIndex] = timeout;
755 mod_timer(&hcd->rh_timer, timeout);
756 }
757 /* Has resume been signalled for USB_RESUME_TIME yet? */
758 } else if (time_after_eq(jiffies,
759 bus_state->resume_done[wIndex])) {
738 int time_left; 760 int time_left;
739 761
740 xhci_dbg(xhci, "Resume USB2 port %d\n", 762 xhci_dbg(xhci, "Resume USB2 port %d\n",
@@ -775,13 +797,26 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
775 } else { 797 } else {
776 /* 798 /*
777 * The resume has been signaling for less than 799 * The resume has been signaling for less than
778 * 20ms. Report the port status as SUSPEND, 800 * USB_RESUME_TIME. Report the port status as SUSPEND,
779 * let the usbcore check port status again 801 * let the usbcore check port status again and clear
780 * and clear resume signaling later. 802 * resume signaling later.
781 */ 803 */
782 status |= USB_PORT_STAT_SUSPEND; 804 status |= USB_PORT_STAT_SUSPEND;
783 } 805 }
784 } 806 }
807 /*
808 * Clear stale usb2 resume signalling variables in case port changed
809 * state during resume signalling. For example on error
810 */
811 if ((bus_state->resume_done[wIndex] ||
812 test_bit(wIndex, &bus_state->resuming_ports)) &&
813 (raw_port_status & PORT_PLS_MASK) != XDEV_U3 &&
814 (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) {
815 bus_state->resume_done[wIndex] = 0;
816 clear_bit(wIndex, &bus_state->resuming_ports);
817 }
818
819
785 if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 && 820 if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 &&
786 (raw_port_status & PORT_POWER)) { 821 (raw_port_status & PORT_POWER)) {
787 if (bus_state->suspended_ports & (1 << wIndex)) { 822 if (bus_state->suspended_ports & (1 << wIndex)) {
@@ -1115,6 +1150,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1115 if ((temp & PORT_PE) == 0) 1150 if ((temp & PORT_PE) == 0)
1116 goto error; 1151 goto error;
1117 1152
1153 set_bit(wIndex, &bus_state->resuming_ports);
1118 xhci_set_link_state(xhci, port_array, wIndex, 1154 xhci_set_link_state(xhci, port_array, wIndex,
1119 XDEV_RESUME); 1155 XDEV_RESUME);
1120 spin_unlock_irqrestore(&xhci->lock, flags); 1156 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1122,6 +1158,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1122 spin_lock_irqsave(&xhci->lock, flags); 1158 spin_lock_irqsave(&xhci->lock, flags);
1123 xhci_set_link_state(xhci, port_array, wIndex, 1159 xhci_set_link_state(xhci, port_array, wIndex,
1124 XDEV_U0); 1160 XDEV_U0);
1161 clear_bit(wIndex, &bus_state->resuming_ports);
1125 } 1162 }
1126 bus_state->port_c_suspend |= 1 << wIndex; 1163 bus_state->port_c_suspend |= 1 << wIndex;
1127 1164
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 17f6897acde2..c62109091d12 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -188,10 +188,14 @@ static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
188 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45, 188 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45,
189 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23, 189 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
190 }; 190 };
191 acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL); 191 union acpi_object *obj;
192
193 obj = acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1,
194 NULL);
195 ACPI_FREE(obj);
192} 196}
193#else 197#else
194 static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { } 198static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
195#endif /* CONFIG_ACPI */ 199#endif /* CONFIG_ACPI */
196 200
197/* called during probe() after chip reset completes */ 201/* called during probe() after chip reset completes */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 6c5e8133cf87..eeaa6c6bd540 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1583,7 +1583,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
1583 */ 1583 */
1584 bogus_port_status = true; 1584 bogus_port_status = true;
1585 goto cleanup; 1585 goto cleanup;
1586 } else { 1586 } else if (!test_bit(faked_port_index,
1587 &bus_state->resuming_ports)) {
1587 xhci_dbg(xhci, "resume HS port %d\n", port_id); 1588 xhci_dbg(xhci, "resume HS port %d\n", port_id);
1588 bus_state->resume_done[faked_port_index] = jiffies + 1589 bus_state->resume_done[faked_port_index] = jiffies +
1589 msecs_to_jiffies(USB_RESUME_TIMEOUT); 1590 msecs_to_jiffies(USB_RESUME_TIMEOUT);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index dfa44d3e8eee..3f912705dcef 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4778,8 +4778,16 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4778 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 4778 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4779 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); 4779 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4780 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); 4780 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4781 /*
4782 * refer to section 6.2.2: MTT should be 0 for full speed hub,
4783 * but it may be already set to 1 when setup an xHCI virtual
4784 * device, so clear it anyway.
4785 */
4781 if (tt->multi) 4786 if (tt->multi)
4782 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 4787 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4788 else if (hdev->speed == USB_SPEED_FULL)
4789 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
4790
4783 if (xhci->hci_version > 0x95) { 4791 if (xhci->hci_version > 0x95) {
4784 xhci_dbg(xhci, "xHCI version %x needs hub " 4792 xhci_dbg(xhci, "xHCI version %x needs hub "
4785 "TT think time and number of ports\n", 4793 "TT think time and number of ports\n",
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 1f2037bbeb0d..45c83baf675d 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -159,7 +159,7 @@ config USB_TI_CPPI_DMA
159 159
160config USB_TI_CPPI41_DMA 160config USB_TI_CPPI41_DMA
161 bool 'TI CPPI 4.1 (AM335x)' 161 bool 'TI CPPI 4.1 (AM335x)'
162 depends on ARCH_OMAP 162 depends on ARCH_OMAP && DMADEVICES
163 select TI_CPPI41 163 select TI_CPPI41
164 164
165config USB_TUSB_OMAP_DMA 165config USB_TUSB_OMAP_DMA
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 18cfc0a361cb..ee9ff7028b92 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2017,7 +2017,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2017 /* We need musb_read/write functions initialized for PM */ 2017 /* We need musb_read/write functions initialized for PM */
2018 pm_runtime_use_autosuspend(musb->controller); 2018 pm_runtime_use_autosuspend(musb->controller);
2019 pm_runtime_set_autosuspend_delay(musb->controller, 200); 2019 pm_runtime_set_autosuspend_delay(musb->controller, 200);
2020 pm_runtime_irq_safe(musb->controller);
2021 pm_runtime_enable(musb->controller); 2020 pm_runtime_enable(musb->controller);
2022 2021
2023 /* The musb_platform_init() call: 2022 /* The musb_platform_init() call:
@@ -2095,6 +2094,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2095#ifndef CONFIG_MUSB_PIO_ONLY 2094#ifndef CONFIG_MUSB_PIO_ONLY
2096 if (!musb->ops->dma_init || !musb->ops->dma_exit) { 2095 if (!musb->ops->dma_init || !musb->ops->dma_exit) {
2097 dev_err(dev, "DMA controller not set\n"); 2096 dev_err(dev, "DMA controller not set\n");
2097 status = -ENODEV;
2098 goto fail2; 2098 goto fail2;
2099 } 2099 }
2100 musb_dma_controller_create = musb->ops->dma_init; 2100 musb_dma_controller_create = musb->ops->dma_init;
@@ -2218,6 +2218,12 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2218 2218
2219 pm_runtime_put(musb->controller); 2219 pm_runtime_put(musb->controller);
2220 2220
2221 /*
2222 * For why this is currently needed, see commit 3e43a0725637
2223 * ("usb: musb: core: add pm_runtime_irq_safe()")
2224 */
2225 pm_runtime_irq_safe(musb->controller);
2226
2221 return 0; 2227 return 0;
2222 2228
2223fail5: 2229fail5:
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 80eb991c2506..0d19a6d61a71 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -1506,7 +1506,6 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
1506{ 1506{
1507 struct msm_otg_platform_data *pdata; 1507 struct msm_otg_platform_data *pdata;
1508 struct extcon_dev *ext_id, *ext_vbus; 1508 struct extcon_dev *ext_id, *ext_vbus;
1509 const struct of_device_id *id;
1510 struct device_node *node = pdev->dev.of_node; 1509 struct device_node *node = pdev->dev.of_node;
1511 struct property *prop; 1510 struct property *prop;
1512 int len, ret, words; 1511 int len, ret, words;
@@ -1518,8 +1517,9 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
1518 1517
1519 motg->pdata = pdata; 1518 motg->pdata = pdata;
1520 1519
1521 id = of_match_device(msm_otg_dt_match, &pdev->dev); 1520 pdata->phy_type = (enum msm_usb_phy_type)of_device_get_match_data(&pdev->dev);
1522 pdata->phy_type = (enum msm_usb_phy_type) id->data; 1521 if (!pdata->phy_type)
1522 return 1;
1523 1523
1524 motg->link_rst = devm_reset_control_get(&pdev->dev, "link"); 1524 motg->link_rst = devm_reset_control_get(&pdev->dev, "link");
1525 if (IS_ERR(motg->link_rst)) 1525 if (IS_ERR(motg->link_rst))
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index b7536af777ab..c2936dc48ca7 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -143,12 +143,17 @@ static const struct mxs_phy_data imx6sx_phy_data = {
143 .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS, 143 .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS,
144}; 144};
145 145
146static const struct mxs_phy_data imx6ul_phy_data = {
147 .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS,
148};
149
146static const struct of_device_id mxs_phy_dt_ids[] = { 150static const struct of_device_id mxs_phy_dt_ids[] = {
147 { .compatible = "fsl,imx6sx-usbphy", .data = &imx6sx_phy_data, }, 151 { .compatible = "fsl,imx6sx-usbphy", .data = &imx6sx_phy_data, },
148 { .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, }, 152 { .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, },
149 { .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, }, 153 { .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, },
150 { .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, }, 154 { .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, },
151 { .compatible = "fsl,vf610-usbphy", .data = &vf610_phy_data, }, 155 { .compatible = "fsl,vf610-usbphy", .data = &vf610_phy_data, },
156 { .compatible = "fsl,imx6ul-usbphy", .data = &imx6ul_phy_data, },
152 { /* sentinel */ } 157 { /* sentinel */ }
153}; 158};
154MODULE_DEVICE_TABLE(of, mxs_phy_dt_ids); 159MODULE_DEVICE_TABLE(of, mxs_phy_dt_ids);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index de4f97d84a82..8f7a78e70975 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -131,7 +131,8 @@ static void __usbhsg_queue_pop(struct usbhsg_uep *uep,
131 struct device *dev = usbhsg_gpriv_to_dev(gpriv); 131 struct device *dev = usbhsg_gpriv_to_dev(gpriv);
132 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); 132 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
133 133
134 dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe)); 134 if (pipe)
135 dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe));
135 136
136 ureq->req.status = status; 137 ureq->req.status = status;
137 spin_unlock(usbhs_priv_to_lock(priv)); 138 spin_unlock(usbhs_priv_to_lock(priv));
@@ -685,7 +686,13 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
685 struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); 686 struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
686 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); 687 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
687 688
688 usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq)); 689 if (pipe)
690 usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq));
691
692 /*
693 * To dequeue a request, this driver should call the usbhsg_queue_pop()
694 * even if the pipe is NULL.
695 */
689 usbhsg_queue_pop(uep, ureq, -ECONNRESET); 696 usbhsg_queue_pop(uep, ureq, -ECONNRESET);
690 697
691 return 0; 698 return 0;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index eac7ccaa3c85..7d4f51a32e66 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -132,7 +132,6 @@ static const struct usb_device_id id_table[] = {
132 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 132 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
133 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 133 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
134 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ 134 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
135 { USB_DEVICE(0x10C4, 0xEA80) }, /* Silicon Labs factory default */
136 { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ 135 { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
137 { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ 136 { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
138 { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */ 137 { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index f51a5d52c0ed..ec1b8f2c1183 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -531,7 +531,8 @@ static int ipaq_open(struct tty_struct *tty,
531 * through. Since this has a reasonably high failure rate, we retry 531 * through. Since this has a reasonably high failure rate, we retry
532 * several times. 532 * several times.
533 */ 533 */
534 while (retries--) { 534 while (retries) {
535 retries--;
535 result = usb_control_msg(serial->dev, 536 result = usb_control_msg(serial->dev,
536 usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21, 537 usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21,
537 0x1, 0, NULL, 0, 100); 538 0x1, 0, NULL, 0, 100);
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 3658662898fc..a204782ae530 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -53,6 +53,7 @@ DEVICE(funsoft, FUNSOFT_IDS);
53 53
54/* Infineon Flashloader driver */ 54/* Infineon Flashloader driver */
55#define FLASHLOADER_IDS() \ 55#define FLASHLOADER_IDS() \
56 { USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
56 { USB_DEVICE(0x8087, 0x0716) } 57 { USB_DEVICE(0x8087, 0x0716) }
57DEVICE(flashloader, FLASHLOADER_IDS); 58DEVICE(flashloader, FLASHLOADER_IDS);
58 59
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index e69151664436..5c66d3f7a6d0 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -796,6 +796,10 @@ static int uas_slave_configure(struct scsi_device *sdev)
796 if (devinfo->flags & US_FL_NO_REPORT_OPCODES) 796 if (devinfo->flags & US_FL_NO_REPORT_OPCODES)
797 sdev->no_report_opcodes = 1; 797 sdev->no_report_opcodes = 1;
798 798
799 /* A few buggy USB-ATA bridges don't understand FUA */
800 if (devinfo->flags & US_FL_BROKEN_FUA)
801 sdev->broken_fua = 1;
802
799 scsi_change_queue_depth(sdev, devinfo->qdepth - 2); 803 scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
800 return 0; 804 return 0;
801} 805}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 6b2479123de7..7ffe4209067b 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1987,7 +1987,7 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
1987 US_FL_IGNORE_RESIDUE ), 1987 US_FL_IGNORE_RESIDUE ),
1988 1988
1989/* Reported by Michael Büsch <m@bues.ch> */ 1989/* Reported by Michael Büsch <m@bues.ch> */
1990UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0114, 1990UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
1991 "JMicron", 1991 "JMicron",
1992 "USB to ATA/ATAPI Bridge", 1992 "USB to ATA/ATAPI Bridge",
1993 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1993 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index c85ea530085f..ccc113e83d88 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -132,7 +132,7 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
132 "JMicron", 132 "JMicron",
133 "JMS567", 133 "JMS567",
134 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 134 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
135 US_FL_NO_REPORT_OPCODES), 135 US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
136 136
137/* Reported-by: Hans de Goede <hdegoede@redhat.com> */ 137/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
138UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, 138UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index da6e2ce77495..850d86ca685b 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -31,21 +31,6 @@ menuconfig VFIO
31 31
32 If you don't know what to do here, say N. 32 If you don't know what to do here, say N.
33 33
34menuconfig VFIO_NOIOMMU
35 bool "VFIO No-IOMMU support"
36 depends on VFIO
37 help
38 VFIO is built on the ability to isolate devices using the IOMMU.
39 Only with an IOMMU can userspace access to DMA capable devices be
40 considered secure. VFIO No-IOMMU mode enables IOMMU groups for
41 devices without IOMMU backing for the purpose of re-using the VFIO
42 infrastructure in a non-secure mode. Use of this mode will result
43 in an unsupportable kernel and will therefore taint the kernel.
44 Device assignment to virtual machines is also not possible with
45 this mode since there is no IOMMU to provide DMA translation.
46
47 If you don't know what to do here, say N.
48
49source "drivers/vfio/pci/Kconfig" 34source "drivers/vfio/pci/Kconfig"
50source "drivers/vfio/platform/Kconfig" 35source "drivers/vfio/platform/Kconfig"
51source "virt/lib/Kconfig" 36source "virt/lib/Kconfig"
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 32b88bd2c82c..56bf6dbb93db 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -940,13 +940,13 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
940 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL) 940 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
941 return -EINVAL; 941 return -EINVAL;
942 942
943 group = vfio_iommu_group_get(&pdev->dev); 943 group = iommu_group_get(&pdev->dev);
944 if (!group) 944 if (!group)
945 return -EINVAL; 945 return -EINVAL;
946 946
947 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); 947 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
948 if (!vdev) { 948 if (!vdev) {
949 vfio_iommu_group_put(group, &pdev->dev); 949 iommu_group_put(group);
950 return -ENOMEM; 950 return -ENOMEM;
951 } 951 }
952 952
@@ -957,7 +957,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
957 957
958 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev); 958 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
959 if (ret) { 959 if (ret) {
960 vfio_iommu_group_put(group, &pdev->dev); 960 iommu_group_put(group);
961 kfree(vdev); 961 kfree(vdev);
962 return ret; 962 return ret;
963 } 963 }
@@ -993,7 +993,7 @@ static void vfio_pci_remove(struct pci_dev *pdev)
993 if (!vdev) 993 if (!vdev)
994 return; 994 return;
995 995
996 vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev); 996 iommu_group_put(pdev->dev.iommu_group);
997 kfree(vdev); 997 kfree(vdev);
998 998
999 if (vfio_pci_is_vga(pdev)) { 999 if (vfio_pci_is_vga(pdev)) {
@@ -1035,7 +1035,7 @@ static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
1035 return PCI_ERS_RESULT_CAN_RECOVER; 1035 return PCI_ERS_RESULT_CAN_RECOVER;
1036} 1036}
1037 1037
1038static struct pci_error_handlers vfio_err_handlers = { 1038static const struct pci_error_handlers vfio_err_handlers = {
1039 .error_detected = vfio_pci_aer_err_detected, 1039 .error_detected = vfio_pci_aer_err_detected,
1040}; 1040};
1041 1041
diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c
index f1625dcfbb23..b1cc3a768784 100644
--- a/drivers/vfio/platform/vfio_platform.c
+++ b/drivers/vfio/platform/vfio_platform.c
@@ -92,7 +92,6 @@ static struct platform_driver vfio_platform_driver = {
92 .remove = vfio_platform_remove, 92 .remove = vfio_platform_remove,
93 .driver = { 93 .driver = {
94 .name = "vfio-platform", 94 .name = "vfio-platform",
95 .owner = THIS_MODULE,
96 }, 95 },
97}; 96};
98 97
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index a1c50d630792..418cdd9ba3f4 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -51,13 +51,10 @@ static vfio_platform_reset_fn_t vfio_platform_lookup_reset(const char *compat,
51 51
52static void vfio_platform_get_reset(struct vfio_platform_device *vdev) 52static void vfio_platform_get_reset(struct vfio_platform_device *vdev)
53{ 53{
54 char modname[256];
55
56 vdev->reset = vfio_platform_lookup_reset(vdev->compat, 54 vdev->reset = vfio_platform_lookup_reset(vdev->compat,
57 &vdev->reset_module); 55 &vdev->reset_module);
58 if (!vdev->reset) { 56 if (!vdev->reset) {
59 snprintf(modname, 256, "vfio-reset:%s", vdev->compat); 57 request_module("vfio-reset:%s", vdev->compat);
60 request_module(modname);
61 vdev->reset = vfio_platform_lookup_reset(vdev->compat, 58 vdev->reset = vfio_platform_lookup_reset(vdev->compat,
62 &vdev->reset_module); 59 &vdev->reset_module);
63 } 60 }
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index de632da2e22f..6070b793cbcb 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -62,7 +62,6 @@ struct vfio_container {
62 struct rw_semaphore group_lock; 62 struct rw_semaphore group_lock;
63 struct vfio_iommu_driver *iommu_driver; 63 struct vfio_iommu_driver *iommu_driver;
64 void *iommu_data; 64 void *iommu_data;
65 bool noiommu;
66}; 65};
67 66
68struct vfio_unbound_dev { 67struct vfio_unbound_dev {
@@ -85,7 +84,6 @@ struct vfio_group {
85 struct list_head unbound_list; 84 struct list_head unbound_list;
86 struct mutex unbound_lock; 85 struct mutex unbound_lock;
87 atomic_t opened; 86 atomic_t opened;
88 bool noiommu;
89}; 87};
90 88
91struct vfio_device { 89struct vfio_device {
@@ -97,147 +95,6 @@ struct vfio_device {
97 void *device_data; 95 void *device_data;
98}; 96};
99 97
100#ifdef CONFIG_VFIO_NOIOMMU
101static bool noiommu __read_mostly;
102module_param_named(enable_unsafe_noiommu_support,
103 noiommu, bool, S_IRUGO | S_IWUSR);
104MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
105#endif
106
107/*
108 * vfio_iommu_group_{get,put} are only intended for VFIO bus driver probe
109 * and remove functions, any use cases other than acquiring the first
110 * reference for the purpose of calling vfio_add_group_dev() or removing
111 * that symmetric reference after vfio_del_group_dev() should use the raw
112 * iommu_group_{get,put} functions. In particular, vfio_iommu_group_put()
113 * removes the device from the dummy group and cannot be nested.
114 */
115struct iommu_group *vfio_iommu_group_get(struct device *dev)
116{
117 struct iommu_group *group;
118 int __maybe_unused ret;
119
120 group = iommu_group_get(dev);
121
122#ifdef CONFIG_VFIO_NOIOMMU
123 /*
124 * With noiommu enabled, an IOMMU group will be created for a device
125 * that doesn't already have one and doesn't have an iommu_ops on their
126 * bus. We use iommu_present() again in the main code to detect these
127 * fake groups.
128 */
129 if (group || !noiommu || iommu_present(dev->bus))
130 return group;
131
132 group = iommu_group_alloc();
133 if (IS_ERR(group))
134 return NULL;
135
136 iommu_group_set_name(group, "vfio-noiommu");
137 ret = iommu_group_add_device(group, dev);
138 iommu_group_put(group);
139 if (ret)
140 return NULL;
141
142 /*
143 * Where to taint? At this point we've added an IOMMU group for a
144 * device that is not backed by iommu_ops, therefore any iommu_
145 * callback using iommu_ops can legitimately Oops. So, while we may
146 * be about to give a DMA capable device to a user without IOMMU
147 * protection, which is clearly taint-worthy, let's go ahead and do
148 * it here.
149 */
150 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
151 dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
152#endif
153
154 return group;
155}
156EXPORT_SYMBOL_GPL(vfio_iommu_group_get);
157
158void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
159{
160#ifdef CONFIG_VFIO_NOIOMMU
161 if (!iommu_present(dev->bus))
162 iommu_group_remove_device(dev);
163#endif
164
165 iommu_group_put(group);
166}
167EXPORT_SYMBOL_GPL(vfio_iommu_group_put);
168
169#ifdef CONFIG_VFIO_NOIOMMU
170static void *vfio_noiommu_open(unsigned long arg)
171{
172 if (arg != VFIO_NOIOMMU_IOMMU)
173 return ERR_PTR(-EINVAL);
174 if (!capable(CAP_SYS_RAWIO))
175 return ERR_PTR(-EPERM);
176
177 return NULL;
178}
179
180static void vfio_noiommu_release(void *iommu_data)
181{
182}
183
184static long vfio_noiommu_ioctl(void *iommu_data,
185 unsigned int cmd, unsigned long arg)
186{
187 if (cmd == VFIO_CHECK_EXTENSION)
188 return arg == VFIO_NOIOMMU_IOMMU ? 1 : 0;
189
190 return -ENOTTY;
191}
192
193static int vfio_iommu_present(struct device *dev, void *unused)
194{
195 return iommu_present(dev->bus) ? 1 : 0;
196}
197
198static int vfio_noiommu_attach_group(void *iommu_data,
199 struct iommu_group *iommu_group)
200{
201 return iommu_group_for_each_dev(iommu_group, NULL,
202 vfio_iommu_present) ? -EINVAL : 0;
203}
204
205static void vfio_noiommu_detach_group(void *iommu_data,
206 struct iommu_group *iommu_group)
207{
208}
209
210static struct vfio_iommu_driver_ops vfio_noiommu_ops = {
211 .name = "vfio-noiommu",
212 .owner = THIS_MODULE,
213 .open = vfio_noiommu_open,
214 .release = vfio_noiommu_release,
215 .ioctl = vfio_noiommu_ioctl,
216 .attach_group = vfio_noiommu_attach_group,
217 .detach_group = vfio_noiommu_detach_group,
218};
219
220static struct vfio_iommu_driver vfio_noiommu_driver = {
221 .ops = &vfio_noiommu_ops,
222};
223
224/*
225 * Wrap IOMMU drivers, the noiommu driver is the one and only driver for
226 * noiommu groups (and thus containers) and not available for normal groups.
227 */
228#define vfio_for_each_iommu_driver(con, pos) \
229 for (pos = con->noiommu ? &vfio_noiommu_driver : \
230 list_first_entry(&vfio.iommu_drivers_list, \
231 struct vfio_iommu_driver, vfio_next); \
232 (con->noiommu ? pos != NULL : \
233 &pos->vfio_next != &vfio.iommu_drivers_list); \
234 pos = con->noiommu ? NULL : list_next_entry(pos, vfio_next))
235#else
236#define vfio_for_each_iommu_driver(con, pos) \
237 list_for_each_entry(pos, &vfio.iommu_drivers_list, vfio_next)
238#endif
239
240
241/** 98/**
242 * IOMMU driver registration 99 * IOMMU driver registration
243 */ 100 */
@@ -342,8 +199,7 @@ static void vfio_group_unlock_and_free(struct vfio_group *group)
342/** 199/**
343 * Group objects - create, release, get, put, search 200 * Group objects - create, release, get, put, search
344 */ 201 */
345static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group, 202static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
346 bool noiommu)
347{ 203{
348 struct vfio_group *group, *tmp; 204 struct vfio_group *group, *tmp;
349 struct device *dev; 205 struct device *dev;
@@ -361,7 +217,6 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
361 atomic_set(&group->container_users, 0); 217 atomic_set(&group->container_users, 0);
362 atomic_set(&group->opened, 0); 218 atomic_set(&group->opened, 0);
363 group->iommu_group = iommu_group; 219 group->iommu_group = iommu_group;
364 group->noiommu = noiommu;
365 220
366 group->nb.notifier_call = vfio_iommu_group_notifier; 221 group->nb.notifier_call = vfio_iommu_group_notifier;
367 222
@@ -397,8 +252,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
397 252
398 dev = device_create(vfio.class, NULL, 253 dev = device_create(vfio.class, NULL,
399 MKDEV(MAJOR(vfio.group_devt), minor), 254 MKDEV(MAJOR(vfio.group_devt), minor),
400 group, "%s%d", noiommu ? "noiommu-" : "", 255 group, "%d", iommu_group_id(iommu_group));
401 iommu_group_id(iommu_group));
402 if (IS_ERR(dev)) { 256 if (IS_ERR(dev)) {
403 vfio_free_group_minor(minor); 257 vfio_free_group_minor(minor);
404 vfio_group_unlock_and_free(group); 258 vfio_group_unlock_and_free(group);
@@ -682,7 +536,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
682 return 0; 536 return 0;
683 537
684 /* TODO Prevent device auto probing */ 538 /* TODO Prevent device auto probing */
685 WARN("Device %s added to live group %d!\n", dev_name(dev), 539 WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
686 iommu_group_id(group->iommu_group)); 540 iommu_group_id(group->iommu_group));
687 541
688 return 0; 542 return 0;
@@ -786,8 +640,7 @@ int vfio_add_group_dev(struct device *dev,
786 640
787 group = vfio_group_get_from_iommu(iommu_group); 641 group = vfio_group_get_from_iommu(iommu_group);
788 if (!group) { 642 if (!group) {
789 group = vfio_create_group(iommu_group, 643 group = vfio_create_group(iommu_group);
790 !iommu_present(dev->bus));
791 if (IS_ERR(group)) { 644 if (IS_ERR(group)) {
792 iommu_group_put(iommu_group); 645 iommu_group_put(iommu_group);
793 return PTR_ERR(group); 646 return PTR_ERR(group);
@@ -999,7 +852,8 @@ static long vfio_ioctl_check_extension(struct vfio_container *container,
999 */ 852 */
1000 if (!driver) { 853 if (!driver) {
1001 mutex_lock(&vfio.iommu_drivers_lock); 854 mutex_lock(&vfio.iommu_drivers_lock);
1002 vfio_for_each_iommu_driver(container, driver) { 855 list_for_each_entry(driver, &vfio.iommu_drivers_list,
856 vfio_next) {
1003 if (!try_module_get(driver->ops->owner)) 857 if (!try_module_get(driver->ops->owner))
1004 continue; 858 continue;
1005 859
@@ -1068,7 +922,7 @@ static long vfio_ioctl_set_iommu(struct vfio_container *container,
1068 } 922 }
1069 923
1070 mutex_lock(&vfio.iommu_drivers_lock); 924 mutex_lock(&vfio.iommu_drivers_lock);
1071 vfio_for_each_iommu_driver(container, driver) { 925 list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
1072 void *data; 926 void *data;
1073 927
1074 if (!try_module_get(driver->ops->owner)) 928 if (!try_module_get(driver->ops->owner))
@@ -1333,9 +1187,6 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1333 if (atomic_read(&group->container_users)) 1187 if (atomic_read(&group->container_users))
1334 return -EINVAL; 1188 return -EINVAL;
1335 1189
1336 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1337 return -EPERM;
1338
1339 f = fdget(container_fd); 1190 f = fdget(container_fd);
1340 if (!f.file) 1191 if (!f.file)
1341 return -EBADF; 1192 return -EBADF;
@@ -1351,13 +1202,6 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1351 1202
1352 down_write(&container->group_lock); 1203 down_write(&container->group_lock);
1353 1204
1354 /* Real groups and fake groups cannot mix */
1355 if (!list_empty(&container->group_list) &&
1356 container->noiommu != group->noiommu) {
1357 ret = -EPERM;
1358 goto unlock_out;
1359 }
1360
1361 driver = container->iommu_driver; 1205 driver = container->iommu_driver;
1362 if (driver) { 1206 if (driver) {
1363 ret = driver->ops->attach_group(container->iommu_data, 1207 ret = driver->ops->attach_group(container->iommu_data,
@@ -1367,7 +1211,6 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1367 } 1211 }
1368 1212
1369 group->container = container; 1213 group->container = container;
1370 container->noiommu = group->noiommu;
1371 list_add(&group->container_next, &container->group_list); 1214 list_add(&group->container_next, &container->group_list);
1372 1215
1373 /* Get a reference on the container and mark a user within the group */ 1216 /* Get a reference on the container and mark a user within the group */
@@ -1398,9 +1241,6 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1398 !group->container->iommu_driver || !vfio_group_viable(group)) 1241 !group->container->iommu_driver || !vfio_group_viable(group))
1399 return -EINVAL; 1242 return -EINVAL;
1400 1243
1401 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1402 return -EPERM;
1403
1404 device = vfio_device_get_from_name(group, buf); 1244 device = vfio_device_get_from_name(group, buf);
1405 if (!device) 1245 if (!device)
1406 return -ENODEV; 1246 return -ENODEV;
@@ -1443,10 +1283,6 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1443 1283
1444 fd_install(ret, filep); 1284 fd_install(ret, filep);
1445 1285
1446 if (group->noiommu)
1447 dev_warn(device->dev, "vfio-noiommu device opened by user "
1448 "(%s:%d)\n", current->comm, task_pid_nr(current));
1449
1450 return ret; 1286 return ret;
1451} 1287}
1452 1288
@@ -1535,11 +1371,6 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep)
1535 if (!group) 1371 if (!group)
1536 return -ENODEV; 1372 return -ENODEV;
1537 1373
1538 if (group->noiommu && !capable(CAP_SYS_RAWIO)) {
1539 vfio_group_put(group);
1540 return -EPERM;
1541 }
1542
1543 /* Do we need multiple instances of the group open? Seems not. */ 1374 /* Do we need multiple instances of the group open? Seems not. */
1544 opened = atomic_cmpxchg(&group->opened, 0, 1); 1375 opened = atomic_cmpxchg(&group->opened, 0, 1);
1545 if (opened) { 1376 if (opened) {
@@ -1702,11 +1533,6 @@ struct vfio_group *vfio_group_get_external_user(struct file *filep)
1702 if (!atomic_inc_not_zero(&group->container_users)) 1533 if (!atomic_inc_not_zero(&group->container_users))
1703 return ERR_PTR(-EINVAL); 1534 return ERR_PTR(-EINVAL);
1704 1535
1705 if (group->noiommu) {
1706 atomic_dec(&group->container_users);
1707 return ERR_PTR(-EPERM);
1708 }
1709
1710 if (!group->container->iommu_driver || 1536 if (!group->container->iommu_driver ||
1711 !vfio_group_viable(group)) { 1537 !vfio_group_viable(group)) {
1712 atomic_dec(&group->container_users); 1538 atomic_dec(&group->container_users);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index eec2f11809ff..ad2146a9ab2d 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -819,7 +819,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
819 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); 819 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
820 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) || 820 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
821 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) || 821 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
822 (a.log_guest_addr & (sizeof(u64) - 1))) { 822 (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) {
823 r = -EINVAL; 823 r = -EINVAL;
824 break; 824 break;
825 } 825 }
@@ -1369,7 +1369,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
1369 /* Grab the next descriptor number they're advertising, and increment 1369 /* Grab the next descriptor number they're advertising, and increment
1370 * the index we've seen. */ 1370 * the index we've seen. */
1371 if (unlikely(__get_user(ring_head, 1371 if (unlikely(__get_user(ring_head,
1372 &vq->avail->ring[last_avail_idx % vq->num]))) { 1372 &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
1373 vq_err(vq, "Failed to read head: idx %d address %p\n", 1373 vq_err(vq, "Failed to read head: idx %d address %p\n",
1374 last_avail_idx, 1374 last_avail_idx,
1375 &vq->avail->ring[last_avail_idx % vq->num]); 1375 &vq->avail->ring[last_avail_idx % vq->num]);
@@ -1489,7 +1489,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1489 u16 old, new; 1489 u16 old, new;
1490 int start; 1490 int start;
1491 1491
1492 start = vq->last_used_idx % vq->num; 1492 start = vq->last_used_idx & (vq->num - 1);
1493 used = vq->used->ring + start; 1493 used = vq->used->ring + start;
1494 if (count == 1) { 1494 if (count == 1) {
1495 if (__put_user(heads[0].id, &used->id)) { 1495 if (__put_user(heads[0].id, &used->id)) {
@@ -1531,7 +1531,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1531{ 1531{
1532 int start, n, r; 1532 int start, n, r;
1533 1533
1534 start = vq->last_used_idx % vq->num; 1534 start = vq->last_used_idx & (vq->num - 1);
1535 n = vq->num - start; 1535 n = vq->num - start;
1536 if (n < count) { 1536 if (n < count) {
1537 r = __vhost_add_used_n(vq, heads, n); 1537 r = __vhost_add_used_n(vq, heads, n);
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index b335c1ae8625..fe00a07c122e 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -479,7 +479,10 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s)
479 port = FSL_DIU_PORT_DLVDS; 479 port = FSL_DIU_PORT_DLVDS;
480 } 480 }
481 481
482 return diu_ops.valid_monitor_port(port); 482 if (diu_ops.valid_monitor_port)
483 port = diu_ops.valid_monitor_port(port);
484
485 return port;
483} 486}
484 487
485/* 488/*
@@ -1915,6 +1918,14 @@ static int __init fsl_diu_init(void)
1915#else 1918#else
1916 monitor_port = fsl_diu_name_to_port(monitor_string); 1919 monitor_port = fsl_diu_name_to_port(monitor_string);
1917#endif 1920#endif
1921
1922 /*
1923 * Must to verify set_pixel_clock. If not implement on platform,
1924 * then that means that there is no platform support for the DIU.
1925 */
1926 if (!diu_ops.set_pixel_clock)
1927 return -ENODEV;
1928
1918 pr_info("Freescale Display Interface Unit (DIU) framebuffer driver\n"); 1929 pr_info("Freescale Display Interface Unit (DIU) framebuffer driver\n");
1919 1930
1920#ifdef CONFIG_NOT_COHERENT_CACHE 1931#ifdef CONFIG_NOT_COHERENT_CACHE
diff --git a/drivers/video/fbdev/omap2/dss/venc.c b/drivers/video/fbdev/omap2/dss/venc.c
index 99ca268c1cdd..d05a54922ba6 100644
--- a/drivers/video/fbdev/omap2/dss/venc.c
+++ b/drivers/video/fbdev/omap2/dss/venc.c
@@ -275,6 +275,12 @@ const struct omap_video_timings omap_dss_pal_timings = {
275 .vbp = 41, 275 .vbp = 41,
276 276
277 .interlace = true, 277 .interlace = true,
278
279 .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
280 .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
281 .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
282 .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
283 .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
278}; 284};
279EXPORT_SYMBOL(omap_dss_pal_timings); 285EXPORT_SYMBOL(omap_dss_pal_timings);
280 286
@@ -290,6 +296,12 @@ const struct omap_video_timings omap_dss_ntsc_timings = {
290 .vbp = 31, 296 .vbp = 31,
291 297
292 .interlace = true, 298 .interlace = true,
299
300 .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
301 .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
302 .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
303 .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
304 .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
293}; 305};
294EXPORT_SYMBOL(omap_dss_ntsc_timings); 306EXPORT_SYMBOL(omap_dss_ntsc_timings);
295 307
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index b1877d73fa56..7062bb0975a5 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -412,6 +412,7 @@ static int virtio_init(void)
412static void __exit virtio_exit(void) 412static void __exit virtio_exit(void)
413{ 413{
414 bus_unregister(&virtio_bus); 414 bus_unregister(&virtio_bus);
415 ida_destroy(&virtio_index_ida);
415} 416}
416core_initcall(virtio_init); 417core_initcall(virtio_init);
417module_exit(virtio_exit); 418module_exit(virtio_exit);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 096b857e7b75..ee663c458b20 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -80,6 +80,12 @@ struct vring_virtqueue {
80 /* Last used index we've seen. */ 80 /* Last used index we've seen. */
81 u16 last_used_idx; 81 u16 last_used_idx;
82 82
83 /* Last written value to avail->flags */
84 u16 avail_flags_shadow;
85
86 /* Last written value to avail->idx in guest byte order */
87 u16 avail_idx_shadow;
88
83 /* How to notify other side. FIXME: commonalize hcalls! */ 89 /* How to notify other side. FIXME: commonalize hcalls! */
84 bool (*notify)(struct virtqueue *vq); 90 bool (*notify)(struct virtqueue *vq);
85 91
@@ -109,7 +115,7 @@ static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
109 * otherwise virt_to_phys will give us bogus addresses in the 115 * otherwise virt_to_phys will give us bogus addresses in the
110 * virtqueue. 116 * virtqueue.
111 */ 117 */
112 gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); 118 gfp &= ~__GFP_HIGHMEM;
113 119
114 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); 120 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
115 if (!desc) 121 if (!desc)
@@ -235,13 +241,14 @@ static inline int virtqueue_add(struct virtqueue *_vq,
235 241
236 /* Put entry in available array (but don't update avail->idx until they 242 /* Put entry in available array (but don't update avail->idx until they
237 * do sync). */ 243 * do sync). */
238 avail = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) & (vq->vring.num - 1); 244 avail = vq->avail_idx_shadow & (vq->vring.num - 1);
239 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); 245 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
240 246
241 /* Descriptors and available array need to be set before we expose the 247 /* Descriptors and available array need to be set before we expose the
242 * new available array entries. */ 248 * new available array entries. */
243 virtio_wmb(vq->weak_barriers); 249 virtio_wmb(vq->weak_barriers);
244 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1); 250 vq->avail_idx_shadow++;
251 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
245 vq->num_added++; 252 vq->num_added++;
246 253
247 pr_debug("Added buffer head %i to %p\n", head, vq); 254 pr_debug("Added buffer head %i to %p\n", head, vq);
@@ -354,8 +361,8 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq)
354 * event. */ 361 * event. */
355 virtio_mb(vq->weak_barriers); 362 virtio_mb(vq->weak_barriers);
356 363
357 old = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->num_added; 364 old = vq->avail_idx_shadow - vq->num_added;
358 new = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx); 365 new = vq->avail_idx_shadow;
359 vq->num_added = 0; 366 vq->num_added = 0;
360 367
361#ifdef DEBUG 368#ifdef DEBUG
@@ -510,7 +517,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
510 /* If we expect an interrupt for the next entry, tell host 517 /* If we expect an interrupt for the next entry, tell host
511 * by writing event index and flush out the write before 518 * by writing event index and flush out the write before
512 * the read in the next get_buf call. */ 519 * the read in the next get_buf call. */
513 if (!(vq->vring.avail->flags & cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT))) { 520 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
514 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx); 521 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx);
515 virtio_mb(vq->weak_barriers); 522 virtio_mb(vq->weak_barriers);
516 } 523 }
@@ -537,7 +544,11 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
537{ 544{
538 struct vring_virtqueue *vq = to_vvq(_vq); 545 struct vring_virtqueue *vq = to_vvq(_vq);
539 546
540 vq->vring.avail->flags |= cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT); 547 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
548 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
549 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
550 }
551
541} 552}
542EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 553EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
543 554
@@ -565,7 +576,10 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
565 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 576 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
566 * either clear the flags bit or point the event index at the next 577 * either clear the flags bit or point the event index at the next
567 * entry. Always do both to keep code simple. */ 578 * entry. Always do both to keep code simple. */
568 vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT); 579 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
580 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
581 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
582 }
569 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); 583 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
570 END_USE(vq); 584 END_USE(vq);
571 return last_used_idx; 585 return last_used_idx;
@@ -633,9 +647,12 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
633 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 647 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
634 * either clear the flags bit or point the event index at the next 648 * either clear the flags bit or point the event index at the next
635 * entry. Always do both to keep code simple. */ 649 * entry. Always do both to keep code simple. */
636 vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT); 650 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
651 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
652 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
653 }
637 /* TODO: tune this threshold */ 654 /* TODO: tune this threshold */
638 bufs = (u16)(virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->last_used_idx) * 3 / 4; 655 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
639 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs); 656 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs);
640 virtio_mb(vq->weak_barriers); 657 virtio_mb(vq->weak_barriers);
641 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { 658 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
@@ -670,7 +687,8 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
670 /* detach_buf clears data, so grab it now. */ 687 /* detach_buf clears data, so grab it now. */
671 buf = vq->data[i]; 688 buf = vq->data[i];
672 detach_buf(vq, i); 689 detach_buf(vq, i);
673 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - 1); 690 vq->avail_idx_shadow--;
691 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
674 END_USE(vq); 692 END_USE(vq);
675 return buf; 693 return buf;
676 } 694 }
@@ -735,6 +753,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
735 vq->weak_barriers = weak_barriers; 753 vq->weak_barriers = weak_barriers;
736 vq->broken = false; 754 vq->broken = false;
737 vq->last_used_idx = 0; 755 vq->last_used_idx = 0;
756 vq->avail_flags_shadow = 0;
757 vq->avail_idx_shadow = 0;
738 vq->num_added = 0; 758 vq->num_added = 0;
739 list_add_tail(&vq->vq.list, &vdev->vqs); 759 list_add_tail(&vq->vq.list, &vdev->vqs);
740#ifdef DEBUG 760#ifdef DEBUG
@@ -746,8 +766,10 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
746 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 766 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
747 767
748 /* No callback? Tell other side not to bother us. */ 768 /* No callback? Tell other side not to bother us. */
749 if (!callback) 769 if (!callback) {
750 vq->vring.avail->flags |= cpu_to_virtio16(vdev, VRING_AVAIL_F_NO_INTERRUPT); 770 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
771 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
772 }
751 773
752 /* Put everything in free lists. */ 774 /* Put everything in free lists. */
753 vq->free_head = 0; 775 vq->free_head = 0;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 7a8a6c6952e9..1c427beffadd 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -446,7 +446,7 @@ config MAX63XX_WATCHDOG
446 446
447config IMX2_WDT 447config IMX2_WDT
448 tristate "IMX2+ Watchdog" 448 tristate "IMX2+ Watchdog"
449 depends on ARCH_MXC 449 depends on ARCH_MXC || ARCH_LAYERSCAPE
450 select REGMAP_MMIO 450 select REGMAP_MMIO
451 select WATCHDOG_CORE 451 select WATCHDOG_CORE
452 help 452 help
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index 6ad9df948711..b751f43d76ed 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -123,6 +123,7 @@ static int mtk_wdt_stop(struct watchdog_device *wdt_dev)
123 123
124 reg = readl(wdt_base + WDT_MODE); 124 reg = readl(wdt_base + WDT_MODE);
125 reg &= ~WDT_MODE_EN; 125 reg &= ~WDT_MODE_EN;
126 reg |= WDT_MODE_KEY;
126 iowrite32(reg, wdt_base + WDT_MODE); 127 iowrite32(reg, wdt_base + WDT_MODE);
127 128
128 return 0; 129 return 0;
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index d96bee017fd3..6f17c935a6cf 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -205,7 +205,7 @@ static int omap_wdt_set_timeout(struct watchdog_device *wdog,
205 205
206static unsigned int omap_wdt_get_timeleft(struct watchdog_device *wdog) 206static unsigned int omap_wdt_get_timeleft(struct watchdog_device *wdog)
207{ 207{
208 struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog); 208 struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog);
209 void __iomem *base = wdev->base; 209 void __iomem *base = wdev->base;
210 u32 value; 210 u32 value;
211 211
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 4224b3ec83a5..313cd1c6fda0 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -80,7 +80,7 @@ static unsigned int heartbeat = DEFAULT_HEARTBEAT;
80 80
81static DEFINE_SPINLOCK(io_lock); 81static DEFINE_SPINLOCK(io_lock);
82static void __iomem *wdt_base; 82static void __iomem *wdt_base;
83struct clk *wdt_clk; 83static struct clk *wdt_clk;
84 84
85static int pnx4008_wdt_start(struct watchdog_device *wdd) 85static int pnx4008_wdt_start(struct watchdog_device *wdd)
86{ 86{
@@ -161,7 +161,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
161 if (IS_ERR(wdt_clk)) 161 if (IS_ERR(wdt_clk))
162 return PTR_ERR(wdt_clk); 162 return PTR_ERR(wdt_clk);
163 163
164 ret = clk_enable(wdt_clk); 164 ret = clk_prepare_enable(wdt_clk);
165 if (ret) 165 if (ret)
166 return ret; 166 return ret;
167 167
@@ -184,7 +184,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
184 return 0; 184 return 0;
185 185
186disable_clk: 186disable_clk:
187 clk_disable(wdt_clk); 187 clk_disable_unprepare(wdt_clk);
188 return ret; 188 return ret;
189} 189}
190 190
@@ -192,7 +192,7 @@ static int pnx4008_wdt_remove(struct platform_device *pdev)
192{ 192{
193 watchdog_unregister_device(&pnx4008_wdd); 193 watchdog_unregister_device(&pnx4008_wdd);
194 194
195 clk_disable(wdt_clk); 195 clk_disable_unprepare(wdt_clk);
196 196
197 return 0; 197 return 0;
198} 198}
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
index 7f97cdd53f29..9ec57608da82 100644
--- a/drivers/watchdog/tegra_wdt.c
+++ b/drivers/watchdog/tegra_wdt.c
@@ -140,8 +140,10 @@ static int tegra_wdt_set_timeout(struct watchdog_device *wdd,
140{ 140{
141 wdd->timeout = timeout; 141 wdd->timeout = timeout;
142 142
143 if (watchdog_active(wdd)) 143 if (watchdog_active(wdd)) {
144 tegra_wdt_stop(wdd);
144 return tegra_wdt_start(wdd); 145 return tegra_wdt_start(wdd);
146 }
145 147
146 return 0; 148 return 0;
147} 149}
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c
index 91bf55a20024..20e2bba10400 100644
--- a/drivers/watchdog/w83977f_wdt.c
+++ b/drivers/watchdog/w83977f_wdt.c
@@ -224,7 +224,7 @@ static int wdt_keepalive(void)
224 224
225static int wdt_set_timeout(int t) 225static int wdt_set_timeout(int t)
226{ 226{
227 int tmrval; 227 unsigned int tmrval;
228 228
229 /* 229 /*
230 * Convert seconds to watchdog counter time units, rounding up. 230 * Convert seconds to watchdog counter time units, rounding up.
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 849500e4e14d..524c22146429 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -39,6 +39,7 @@
39#include <asm/irq.h> 39#include <asm/irq.h>
40#include <asm/idle.h> 40#include <asm/idle.h>
41#include <asm/io_apic.h> 41#include <asm/io_apic.h>
42#include <asm/i8259.h>
42#include <asm/xen/pci.h> 43#include <asm/xen/pci.h>
43#endif 44#endif
44#include <asm/sync_bitops.h> 45#include <asm/sync_bitops.h>
@@ -420,7 +421,7 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
420 return xen_allocate_irq_dynamic(); 421 return xen_allocate_irq_dynamic();
421 422
422 /* Legacy IRQ descriptors are already allocated by the arch. */ 423 /* Legacy IRQ descriptors are already allocated by the arch. */
423 if (gsi < NR_IRQS_LEGACY) 424 if (gsi < nr_legacy_irqs())
424 irq = gsi; 425 irq = gsi;
425 else 426 else
426 irq = irq_alloc_desc_at(gsi, -1); 427 irq = irq_alloc_desc_at(gsi, -1);
@@ -446,7 +447,7 @@ static void xen_free_irq(unsigned irq)
446 kfree(info); 447 kfree(info);
447 448
448 /* Legacy IRQ descriptors are managed by the arch. */ 449 /* Legacy IRQ descriptors are managed by the arch. */
449 if (irq < NR_IRQS_LEGACY) 450 if (irq < nr_legacy_irqs())
450 return; 451 return;
451 452
452 irq_free_desc(irq); 453 irq_free_desc(irq);
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index e3e9e3d46d1b..96a1b8da5371 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -281,7 +281,8 @@ static void handle_irq_for_port(unsigned port)
281 281
282static void consume_one_event(unsigned cpu, 282static void consume_one_event(unsigned cpu,
283 struct evtchn_fifo_control_block *control_block, 283 struct evtchn_fifo_control_block *control_block,
284 unsigned priority, unsigned long *ready) 284 unsigned priority, unsigned long *ready,
285 bool drop)
285{ 286{
286 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); 287 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
287 uint32_t head; 288 uint32_t head;
@@ -313,13 +314,17 @@ static void consume_one_event(unsigned cpu,
313 if (head == 0) 314 if (head == 0)
314 clear_bit(priority, ready); 315 clear_bit(priority, ready);
315 316
316 if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) 317 if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
317 handle_irq_for_port(port); 318 if (unlikely(drop))
319 pr_warn("Dropping pending event for port %u\n", port);
320 else
321 handle_irq_for_port(port);
322 }
318 323
319 q->head[priority] = head; 324 q->head[priority] = head;
320} 325}
321 326
322static void evtchn_fifo_handle_events(unsigned cpu) 327static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
323{ 328{
324 struct evtchn_fifo_control_block *control_block; 329 struct evtchn_fifo_control_block *control_block;
325 unsigned long ready; 330 unsigned long ready;
@@ -331,11 +336,16 @@ static void evtchn_fifo_handle_events(unsigned cpu)
331 336
332 while (ready) { 337 while (ready) {
333 q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); 338 q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
334 consume_one_event(cpu, control_block, q, &ready); 339 consume_one_event(cpu, control_block, q, &ready, drop);
335 ready |= xchg(&control_block->ready, 0); 340 ready |= xchg(&control_block->ready, 0);
336 } 341 }
337} 342}
338 343
344static void evtchn_fifo_handle_events(unsigned cpu)
345{
346 __evtchn_fifo_handle_events(cpu, false);
347}
348
339static void evtchn_fifo_resume(void) 349static void evtchn_fifo_resume(void)
340{ 350{
341 unsigned cpu; 351 unsigned cpu;
@@ -420,6 +430,9 @@ static int evtchn_fifo_cpu_notification(struct notifier_block *self,
420 if (!per_cpu(cpu_control_block, cpu)) 430 if (!per_cpu(cpu_control_block, cpu))
421 ret = evtchn_fifo_alloc_control_block(cpu); 431 ret = evtchn_fifo_alloc_control_block(cpu);
422 break; 432 break;
433 case CPU_DEAD:
434 __evtchn_fifo_handle_events(cpu, true);
435 break;
423 default: 436 default:
424 break; 437 break;
425 } 438 }
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 00f40f051d95..38272ad24551 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -49,6 +49,8 @@
49#include <linux/init.h> 49#include <linux/init.h>
50#include <linux/mutex.h> 50#include <linux/mutex.h>
51#include <linux/cpu.h> 51#include <linux/cpu.h>
52#include <linux/mm.h>
53#include <linux/vmalloc.h>
52 54
53#include <xen/xen.h> 55#include <xen/xen.h>
54#include <xen/events.h> 56#include <xen/events.h>
@@ -58,10 +60,10 @@
58struct per_user_data { 60struct per_user_data {
59 struct mutex bind_mutex; /* serialize bind/unbind operations */ 61 struct mutex bind_mutex; /* serialize bind/unbind operations */
60 struct rb_root evtchns; 62 struct rb_root evtchns;
63 unsigned int nr_evtchns;
61 64
62 /* Notification ring, accessed via /dev/xen/evtchn. */ 65 /* Notification ring, accessed via /dev/xen/evtchn. */
63#define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t)) 66 unsigned int ring_size;
64#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
65 evtchn_port_t *ring; 67 evtchn_port_t *ring;
66 unsigned int ring_cons, ring_prod, ring_overflow; 68 unsigned int ring_cons, ring_prod, ring_overflow;
67 struct mutex ring_cons_mutex; /* protect against concurrent readers */ 69 struct mutex ring_cons_mutex; /* protect against concurrent readers */
@@ -80,10 +82,41 @@ struct user_evtchn {
80 bool enabled; 82 bool enabled;
81}; 83};
82 84
85static evtchn_port_t *evtchn_alloc_ring(unsigned int size)
86{
87 evtchn_port_t *ring;
88 size_t s = size * sizeof(*ring);
89
90 ring = kmalloc(s, GFP_KERNEL);
91 if (!ring)
92 ring = vmalloc(s);
93
94 return ring;
95}
96
97static void evtchn_free_ring(evtchn_port_t *ring)
98{
99 kvfree(ring);
100}
101
102static unsigned int evtchn_ring_offset(struct per_user_data *u,
103 unsigned int idx)
104{
105 return idx & (u->ring_size - 1);
106}
107
108static evtchn_port_t *evtchn_ring_entry(struct per_user_data *u,
109 unsigned int idx)
110{
111 return u->ring + evtchn_ring_offset(u, idx);
112}
113
83static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) 114static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
84{ 115{
85 struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL; 116 struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL;
86 117
118 u->nr_evtchns++;
119
87 while (*new) { 120 while (*new) {
88 struct user_evtchn *this; 121 struct user_evtchn *this;
89 122
@@ -107,6 +140,7 @@ static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
107 140
108static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) 141static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
109{ 142{
143 u->nr_evtchns--;
110 rb_erase(&evtchn->node, &u->evtchns); 144 rb_erase(&evtchn->node, &u->evtchns);
111 kfree(evtchn); 145 kfree(evtchn);
112} 146}
@@ -144,8 +178,8 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
144 178
145 spin_lock(&u->ring_prod_lock); 179 spin_lock(&u->ring_prod_lock);
146 180
147 if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { 181 if ((u->ring_prod - u->ring_cons) < u->ring_size) {
148 u->ring[EVTCHN_RING_MASK(u->ring_prod)] = evtchn->port; 182 *evtchn_ring_entry(u, u->ring_prod) = evtchn->port;
149 wmb(); /* Ensure ring contents visible */ 183 wmb(); /* Ensure ring contents visible */
150 if (u->ring_cons == u->ring_prod++) { 184 if (u->ring_cons == u->ring_prod++) {
151 wake_up_interruptible(&u->evtchn_wait); 185 wake_up_interruptible(&u->evtchn_wait);
@@ -200,10 +234,10 @@ static ssize_t evtchn_read(struct file *file, char __user *buf,
200 } 234 }
201 235
202 /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ 236 /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
203 if (((c ^ p) & EVTCHN_RING_SIZE) != 0) { 237 if (((c ^ p) & u->ring_size) != 0) {
204 bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * 238 bytes1 = (u->ring_size - evtchn_ring_offset(u, c)) *
205 sizeof(evtchn_port_t); 239 sizeof(evtchn_port_t);
206 bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t); 240 bytes2 = evtchn_ring_offset(u, p) * sizeof(evtchn_port_t);
207 } else { 241 } else {
208 bytes1 = (p - c) * sizeof(evtchn_port_t); 242 bytes1 = (p - c) * sizeof(evtchn_port_t);
209 bytes2 = 0; 243 bytes2 = 0;
@@ -219,7 +253,7 @@ static ssize_t evtchn_read(struct file *file, char __user *buf,
219 253
220 rc = -EFAULT; 254 rc = -EFAULT;
221 rmb(); /* Ensure that we see the port before we copy it. */ 255 rmb(); /* Ensure that we see the port before we copy it. */
222 if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) || 256 if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) ||
223 ((bytes2 != 0) && 257 ((bytes2 != 0) &&
224 copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) 258 copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
225 goto unlock_out; 259 goto unlock_out;
@@ -278,6 +312,66 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf,
278 return rc; 312 return rc;
279} 313}
280 314
315static int evtchn_resize_ring(struct per_user_data *u)
316{
317 unsigned int new_size;
318 evtchn_port_t *new_ring, *old_ring;
319 unsigned int p, c;
320
321 /*
322 * Ensure the ring is large enough to capture all possible
323 * events. i.e., one free slot for each bound event.
324 */
325 if (u->nr_evtchns <= u->ring_size)
326 return 0;
327
328 if (u->ring_size == 0)
329 new_size = 64;
330 else
331 new_size = 2 * u->ring_size;
332
333 new_ring = evtchn_alloc_ring(new_size);
334 if (!new_ring)
335 return -ENOMEM;
336
337 old_ring = u->ring;
338
339 /*
340 * Access to the ring contents is serialized by either the
341 * prod /or/ cons lock so take both when resizing.
342 */
343 mutex_lock(&u->ring_cons_mutex);
344 spin_lock_irq(&u->ring_prod_lock);
345
346 /*
347 * Copy the old ring contents to the new ring.
348 *
349 * If the ring contents crosses the end of the current ring,
350 * it needs to be copied in two chunks.
351 *
352 * +---------+ +------------------+
353 * |34567 12| -> | 1234567 |
354 * +-----p-c-+ +------------------+
355 */
356 p = evtchn_ring_offset(u, u->ring_prod);
357 c = evtchn_ring_offset(u, u->ring_cons);
358 if (p < c) {
359 memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring));
360 memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring));
361 } else
362 memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring));
363
364 u->ring = new_ring;
365 u->ring_size = new_size;
366
367 spin_unlock_irq(&u->ring_prod_lock);
368 mutex_unlock(&u->ring_cons_mutex);
369
370 evtchn_free_ring(old_ring);
371
372 return 0;
373}
374
281static int evtchn_bind_to_user(struct per_user_data *u, int port) 375static int evtchn_bind_to_user(struct per_user_data *u, int port)
282{ 376{
283 struct user_evtchn *evtchn; 377 struct user_evtchn *evtchn;
@@ -305,6 +399,10 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
305 if (rc < 0) 399 if (rc < 0)
306 goto err; 400 goto err;
307 401
402 rc = evtchn_resize_ring(u);
403 if (rc < 0)
404 goto err;
405
308 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0, 406 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0,
309 u->name, evtchn); 407 u->name, evtchn);
310 if (rc < 0) 408 if (rc < 0)
@@ -503,13 +601,6 @@ static int evtchn_open(struct inode *inode, struct file *filp)
503 601
504 init_waitqueue_head(&u->evtchn_wait); 602 init_waitqueue_head(&u->evtchn_wait);
505 603
506 u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
507 if (u->ring == NULL) {
508 kfree(u->name);
509 kfree(u);
510 return -ENOMEM;
511 }
512
513 mutex_init(&u->bind_mutex); 604 mutex_init(&u->bind_mutex);
514 mutex_init(&u->ring_cons_mutex); 605 mutex_init(&u->ring_cons_mutex);
515 spin_lock_init(&u->ring_prod_lock); 606 spin_lock_init(&u->ring_prod_lock);
@@ -532,7 +623,7 @@ static int evtchn_release(struct inode *inode, struct file *filp)
532 evtchn_unbind_from_user(u, evtchn); 623 evtchn_unbind_from_user(u, evtchn);
533 } 624 }
534 625
535 free_page((unsigned long)u->ring); 626 evtchn_free_ring(u->ring);
536 kfree(u->name); 627 kfree(u->name);
537 kfree(u); 628 kfree(u);
538 629
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 2ea0b3b2a91d..1be5dd048622 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -804,7 +804,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
804 804
805 vma->vm_ops = &gntdev_vmops; 805 vma->vm_ops = &gntdev_vmops;
806 806
807 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 807 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
808 808
809 if (use_ptemod) 809 if (use_ptemod)
810 vma->vm_flags |= VM_DONTCOPY; 810 vma->vm_flags |= VM_DONTCOPY;
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index 58e38d586f52..4d529f3e40df 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -37,6 +37,7 @@ struct xen_pcibk_device {
37 struct xen_pci_sharedinfo *sh_info; 37 struct xen_pci_sharedinfo *sh_info;
38 unsigned long flags; 38 unsigned long flags;
39 struct work_struct op_work; 39 struct work_struct op_work;
40 struct xen_pci_op op;
40}; 41};
41 42
42struct xen_pcibk_dev_data { 43struct xen_pcibk_dev_data {
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index c4a0666de6f5..73dafdc494aa 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -70,6 +70,13 @@ static void xen_pcibk_control_isr(struct pci_dev *dev, int reset)
70 enable ? "enable" : "disable"); 70 enable ? "enable" : "disable");
71 71
72 if (enable) { 72 if (enable) {
73 /*
74 * The MSI or MSI-X should not have an IRQ handler. Otherwise
75 * if the guest terminates we BUG_ON in free_msi_irqs.
76 */
77 if (dev->msi_enabled || dev->msix_enabled)
78 goto out;
79
73 rc = request_irq(dev_data->irq, 80 rc = request_irq(dev_data->irq,
74 xen_pcibk_guest_interrupt, IRQF_SHARED, 81 xen_pcibk_guest_interrupt, IRQF_SHARED,
75 dev_data->irq_name, dev); 82 dev_data->irq_name, dev);
@@ -144,7 +151,12 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
144 if (unlikely(verbose_request)) 151 if (unlikely(verbose_request))
145 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev)); 152 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev));
146 153
147 status = pci_enable_msi(dev); 154 if (dev->msi_enabled)
155 status = -EALREADY;
156 else if (dev->msix_enabled)
157 status = -ENXIO;
158 else
159 status = pci_enable_msi(dev);
148 160
149 if (status) { 161 if (status) {
150 pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n", 162 pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n",
@@ -173,20 +185,23 @@ static
173int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev, 185int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev,
174 struct pci_dev *dev, struct xen_pci_op *op) 186 struct pci_dev *dev, struct xen_pci_op *op)
175{ 187{
176 struct xen_pcibk_dev_data *dev_data;
177
178 if (unlikely(verbose_request)) 188 if (unlikely(verbose_request))
179 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n", 189 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n",
180 pci_name(dev)); 190 pci_name(dev));
181 pci_disable_msi(dev);
182 191
192 if (dev->msi_enabled) {
193 struct xen_pcibk_dev_data *dev_data;
194
195 pci_disable_msi(dev);
196
197 dev_data = pci_get_drvdata(dev);
198 if (dev_data)
199 dev_data->ack_intr = 1;
200 }
183 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; 201 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
184 if (unlikely(verbose_request)) 202 if (unlikely(verbose_request))
185 printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev), 203 printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
186 op->value); 204 op->value);
187 dev_data = pci_get_drvdata(dev);
188 if (dev_data)
189 dev_data->ack_intr = 1;
190 return 0; 205 return 0;
191} 206}
192 207
@@ -197,13 +212,26 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
197 struct xen_pcibk_dev_data *dev_data; 212 struct xen_pcibk_dev_data *dev_data;
198 int i, result; 213 int i, result;
199 struct msix_entry *entries; 214 struct msix_entry *entries;
215 u16 cmd;
200 216
201 if (unlikely(verbose_request)) 217 if (unlikely(verbose_request))
202 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n", 218 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
203 pci_name(dev)); 219 pci_name(dev));
220
204 if (op->value > SH_INFO_MAX_VEC) 221 if (op->value > SH_INFO_MAX_VEC)
205 return -EINVAL; 222 return -EINVAL;
206 223
224 if (dev->msix_enabled)
225 return -EALREADY;
226
227 /*
228 * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
229 * to access the BARs where the MSI-X entries reside.
230 */
231 pci_read_config_word(dev, PCI_COMMAND, &cmd);
232 if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
233 return -ENXIO;
234
207 entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL); 235 entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
208 if (entries == NULL) 236 if (entries == NULL)
209 return -ENOMEM; 237 return -ENOMEM;
@@ -245,23 +273,27 @@ static
245int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, 273int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
246 struct pci_dev *dev, struct xen_pci_op *op) 274 struct pci_dev *dev, struct xen_pci_op *op)
247{ 275{
248 struct xen_pcibk_dev_data *dev_data;
249 if (unlikely(verbose_request)) 276 if (unlikely(verbose_request))
250 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n", 277 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n",
251 pci_name(dev)); 278 pci_name(dev));
252 pci_disable_msix(dev);
253 279
280 if (dev->msix_enabled) {
281 struct xen_pcibk_dev_data *dev_data;
282
283 pci_disable_msix(dev);
284
285 dev_data = pci_get_drvdata(dev);
286 if (dev_data)
287 dev_data->ack_intr = 1;
288 }
254 /* 289 /*
255 * SR-IOV devices (which don't have any legacy IRQ) have 290 * SR-IOV devices (which don't have any legacy IRQ) have
256 * an undefined IRQ value of zero. 291 * an undefined IRQ value of zero.
257 */ 292 */
258 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; 293 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
259 if (unlikely(verbose_request)) 294 if (unlikely(verbose_request))
260 printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev), 295 printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n",
261 op->value); 296 pci_name(dev), op->value);
262 dev_data = pci_get_drvdata(dev);
263 if (dev_data)
264 dev_data->ack_intr = 1;
265 return 0; 297 return 0;
266} 298}
267#endif 299#endif
@@ -298,9 +330,11 @@ void xen_pcibk_do_op(struct work_struct *data)
298 container_of(data, struct xen_pcibk_device, op_work); 330 container_of(data, struct xen_pcibk_device, op_work);
299 struct pci_dev *dev; 331 struct pci_dev *dev;
300 struct xen_pcibk_dev_data *dev_data = NULL; 332 struct xen_pcibk_dev_data *dev_data = NULL;
301 struct xen_pci_op *op = &pdev->sh_info->op; 333 struct xen_pci_op *op = &pdev->op;
302 int test_intx = 0; 334 int test_intx = 0;
303 335
336 *op = pdev->sh_info->op;
337 barrier();
304 dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn); 338 dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
305 339
306 if (dev == NULL) 340 if (dev == NULL)
@@ -342,6 +376,17 @@ void xen_pcibk_do_op(struct work_struct *data)
342 if ((dev_data->enable_intx != test_intx)) 376 if ((dev_data->enable_intx != test_intx))
343 xen_pcibk_control_isr(dev, 0 /* no reset */); 377 xen_pcibk_control_isr(dev, 0 /* no reset */);
344 } 378 }
379 pdev->sh_info->op.err = op->err;
380 pdev->sh_info->op.value = op->value;
381#ifdef CONFIG_PCI_MSI
382 if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
383 unsigned int i;
384
385 for (i = 0; i < op->value; i++)
386 pdev->sh_info->op.msix_entries[i].vector =
387 op->msix_entries[i].vector;
388 }
389#endif
345 /* Tell the driver domain that we're done. */ 390 /* Tell the driver domain that we're done. */
346 wmb(); 391 wmb();
347 clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); 392 clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 98bc345f296e..4843741e703a 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -44,7 +44,6 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
44 dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev); 44 dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
45 45
46 pdev->xdev = xdev; 46 pdev->xdev = xdev;
47 dev_set_drvdata(&xdev->dev, pdev);
48 47
49 mutex_init(&pdev->dev_lock); 48 mutex_init(&pdev->dev_lock);
50 49
@@ -58,6 +57,9 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
58 kfree(pdev); 57 kfree(pdev);
59 pdev = NULL; 58 pdev = NULL;
60 } 59 }
60
61 dev_set_drvdata(&xdev->dev, pdev);
62
61out: 63out:
62 return pdev; 64 return pdev;
63} 65}
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 43bcae852546..ad4eb1024d1f 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -726,7 +726,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
726 if (!pending_req) 726 if (!pending_req)
727 return 1; 727 return 1;
728 728
729 ring_req = *RING_GET_REQUEST(ring, rc); 729 RING_COPY_REQUEST(ring, rc, &ring_req);
730 ring->req_cons = ++rc; 730 ring->req_cons = ++rc;
731 731
732 err = prepare_pending_reqs(info, &ring_req, pending_req); 732 err = prepare_pending_reqs(info, &ring_req, pending_req);