aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/pci_irq.c1
-rw-r--r--drivers/acpi/pci_link.c16
-rw-r--r--drivers/atm/he.c7
-rw-r--r--drivers/atm/solos-pci.c12
-rw-r--r--drivers/base/cacheinfo.c10
-rw-r--r--drivers/base/power/opp.c17
-rw-r--r--drivers/block/loop.c11
-rw-r--r--drivers/block/null_blk.c2
-rw-r--r--drivers/block/nvme-core.c52
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/block/xen-blkback/xenbus.c38
-rw-r--r--drivers/block/xen-blkfront.c19
-rw-r--r--drivers/char/hw_random/xgene-rng.c7
-rw-r--r--drivers/clocksource/rockchip_timer.c2
-rw-r--r--drivers/clocksource/timer-keystone.c2
-rw-r--r--drivers/crypto/marvell/cesa.h27
-rw-r--r--drivers/crypto/marvell/cipher.c7
-rw-r--r--drivers/crypto/marvell/hash.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c3
-rw-r--r--drivers/dma/at_xdmac.c15
-rw-r--r--drivers/dma/dmaengine.c10
-rw-r--r--drivers/dma/dw/core.c4
-rw-r--r--drivers/dma/idma64.c16
-rw-r--r--drivers/dma/pxa_dma.c31
-rw-r--r--drivers/dma/sun4i-dma.c6
-rw-r--r--drivers/dma/xgene-dma.c46
-rw-r--r--drivers/dma/zx296702_dma.c2
-rw-r--r--drivers/extcon/extcon.c2
-rw-r--r--drivers/firmware/Kconfig8
-rw-r--r--drivers/firmware/Makefile3
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c88
-rw-r--r--drivers/firmware/efi/libstub/efistub.h4
-rw-r--r--drivers/firmware/qcom_scm-64.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_smc.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c3
-rw-r--r--drivers/gpu/drm/amd/include/cgs_linux.h17
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h41
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c155
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h41
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c4
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c85
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c94
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c26
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c7
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c9
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c39
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c31
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c36
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c32
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c11
-rw-r--r--drivers/hv/channel_mgmt.c17
-rw-r--r--drivers/hwmon/abx500.c1
-rw-r--r--drivers/hwmon/gpio-fan.c1
-rw-r--r--drivers/hwmon/pwm-fan.c1
-rw-r--r--drivers/idle/intel_idle.c12
-rw-r--r--drivers/infiniband/hw/mlx5/main.c67
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c18
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c26
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c5
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h1
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c18
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c21
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c293
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h21
-rw-r--r--drivers/input/joystick/Kconfig1
-rw-r--r--drivers/input/joystick/walkera0701.c4
-rw-r--r--drivers/input/keyboard/omap4-keypad.c2
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c2
-rw-r--r--drivers/input/misc/uinput.c2
-rw-r--r--drivers/input/mouse/elan_i2c.h2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c26
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c4
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c4
-rw-r--r--drivers/input/mouse/synaptics.c12
-rw-r--r--drivers/input/serio/libps2.c22
-rw-r--r--drivers/input/serio/parkbd.c1
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c34
-rw-r--r--drivers/input/touchscreen/mms114.c4
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/intel-iommu.c8
-rw-r--r--drivers/iommu/iova.c120
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c24
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c3
-rw-r--r--drivers/irqchip/irq-mips-gic.c12
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/dm-crypt.c17
-rw-r--r--drivers/md/dm-thin.c4
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/raid0.c12
-rw-r--r--drivers/md/raid1.c11
-rw-r--r--drivers/md/raid10.c9
-rw-r--r--drivers/md/raid5.c11
-rw-r--r--drivers/misc/cxl/sysfs.c2
-rw-r--r--drivers/misc/mei/debugfs.c3
-rw-r--r--drivers/mmc/core/core.c6
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/mmc/host/pxamci.c66
-rw-r--r--drivers/mmc/host/sunxi-mmc.c53
-rw-r--r--drivers/mtd/ubi/io.c5
-rw-r--r--drivers/mtd/ubi/vtbl.c1
-rw-r--r--drivers/mtd/ubi/wl.c1
-rw-r--r--drivers/net/arcnet/arcnet.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx.c3
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c24
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c12
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c13
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c29
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h5
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c10
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c15
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c9
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c22
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c1
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c18
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c111
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c17
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c74
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c47
-rw-r--r--drivers/net/ethernet/via/Kconfig2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/fjes/fjes_hw.c8
-rw-r--r--drivers/net/geneve.c32
-rw-r--r--drivers/net/irda/ali-ircc.c6
-rw-r--r--drivers/net/macvtap.c4
-rw-r--r--drivers/net/phy/fixed_phy.c2
-rw-r--r--drivers/net/phy/marvell.c9
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c1
-rw-r--r--drivers/net/phy/mdio-gpio.c1
-rw-r--r--drivers/net/phy/mdio-mux.c19
-rw-r--r--drivers/net/phy/mdio_bus.c31
-rw-r--r--drivers/net/phy/phy_device.c62
-rw-r--r--drivers/net/phy/vitesse.c14
-rw-r--r--drivers/net/ppp/ppp_generic.c4
-rw-r--r--drivers/net/usb/Kconfig11
-rw-r--r--drivers/net/usb/Makefile2
-rw-r--r--drivers/net/usb/ch9200.c432
-rw-r--r--drivers/net/vrf.c3
-rw-r--r--drivers/net/vxlan.c15
-rw-r--r--drivers/of/of_mdio.c27
-rw-r--r--drivers/of/of_pci_irq.c22
-rw-r--r--drivers/parisc/dino.c3
-rw-r--r--drivers/parisc/lba_pci.c1
-rw-r--r--drivers/pci/access.c27
-rw-r--r--drivers/pci/bus.c2
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c1
-rw-r--r--drivers/pci/pci-driver.c7
-rw-r--r--drivers/pci/probe.c23
-rw-r--r--drivers/pci/quirks.c20
-rw-r--r--drivers/regulator/anatop-regulator.c1
-rw-r--r--drivers/regulator/core.c21
-rw-r--r--drivers/regulator/gpio-regulator.c1
-rw-r--r--drivers/regulator/pbias-regulator.c56
-rw-r--r--drivers/regulator/tps65218-regulator.c2
-rw-r--r--drivers/regulator/vexpress.c1
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/spi/spi-atmel.c2
-rw-r--r--drivers/spi/spi-bcm2835.c6
-rw-r--r--drivers/spi/spi-meson-spifc.c1
-rw-r--r--drivers/spi/spi-mt65xx.c53
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c4
-rw-r--r--drivers/spi/spi.c3
-rw-r--r--drivers/spi/spidev.c3
-rw-r--r--drivers/staging/android/TODO20
-rw-r--r--drivers/staging/android/ion/ion.c6
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c2
-rw-r--r--drivers/staging/fbtft/fb_watterott.c4
-rw-r--r--drivers/staging/fbtft/fbtft-core.c10
-rw-r--r--drivers/staging/fbtft/flexfb.c11
-rw-r--r--drivers/staging/lustre/README.txt16
-rw-r--r--drivers/staging/most/Kconfig1
-rw-r--r--drivers/staging/most/hdm-dim2/Kconfig1
-rw-r--r--drivers/staging/most/hdm-usb/Kconfig2
-rw-r--r--drivers/staging/most/mostcore/Kconfig1
-rw-r--r--drivers/staging/unisys/visorbus/Makefile1
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c13
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c18
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c5
-rw-r--r--drivers/target/target_core_device.c45
-rw-r--r--drivers/target/target_core_hba.c2
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_pr.c91
-rw-r--r--drivers/target/target_core_tpg.c5
-rw-r--r--drivers/thermal/Kconfig17
-rw-r--r--drivers/thermal/cpu_cooling.c52
-rw-r--r--drivers/thermal/db8500_cpufreq_cooling.c1
-rw-r--r--drivers/thermal/power_allocator.c253
-rw-r--r--drivers/thermal/thermal_core.c28
-rw-r--r--drivers/thermal/ti-soc-thermal/Kconfig8
-rw-r--r--drivers/thunderbolt/nhi.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c25
-rw-r--r--drivers/usb/chipidea/udc.c84
-rw-r--r--drivers/usb/core/config.c5
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c4
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/gadget/epautoconf.c1
-rw-r--r--drivers/usb/gadget/udc/amd5536udc.c43
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c11
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c3
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c46
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c3
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c3
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c3
-rw-r--r--drivers/usb/host/xhci-mem.c17
-rw-r--r--drivers/usb/host/xhci-pci.c90
-rw-r--r--drivers/usb/host/xhci-ring.c13
-rw-r--r--drivers/usb/host/xhci.c24
-rw-r--r--drivers/usb/musb/musb_core.c7
-rw-r--r--drivers/usb/musb/musb_cppi41.c3
-rw-r--r--drivers/usb/musb/musb_dsps.c7
-rw-r--r--drivers/usb/musb/ux500.c2
-rw-r--r--drivers/usb/phy/Kconfig2
-rw-r--r--drivers/usb/phy/phy-generic.c3
-rw-r--r--drivers/usb/phy/phy-isp1301.c1
-rw-r--r--drivers/usb/serial/option.c24
-rw-r--r--drivers/usb/serial/whiteheat.c31
-rw-r--r--drivers/watchdog/Kconfig3
-rw-r--r--drivers/watchdog/bcm2835_wdt.c10
-rw-r--r--drivers/watchdog/gef_wdt.c1
-rw-r--r--drivers/watchdog/mena21_wdt.c1
-rw-r--r--drivers/watchdog/moxart_wdt.c1
326 files changed, 3677 insertions, 2195 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 2614a839c60d..42c66b64c12c 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1044,8 +1044,10 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
1044 goto err_exit; 1044 goto err_exit;
1045 1045
1046 mutex_lock(&ec->mutex); 1046 mutex_lock(&ec->mutex);
1047 result = -ENODATA;
1047 list_for_each_entry(handler, &ec->list, node) { 1048 list_for_each_entry(handler, &ec->list, node) {
1048 if (value == handler->query_bit) { 1049 if (value == handler->query_bit) {
1050 result = 0;
1049 q->handler = acpi_ec_get_query_handler(handler); 1051 q->handler = acpi_ec_get_query_handler(handler);
1050 ec_dbg_evt("Query(0x%02x) scheduled", 1052 ec_dbg_evt("Query(0x%02x) scheduled",
1051 q->handler->query_bit); 1053 q->handler->query_bit);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 6da0f9beab19..c9336751e5e3 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -372,6 +372,7 @@ static int acpi_isa_register_gsi(struct pci_dev *dev)
372 372
373 /* Interrupt Line values above 0xF are forbidden */ 373 /* Interrupt Line values above 0xF are forbidden */
374 if (dev->irq > 0 && (dev->irq <= 0xF) && 374 if (dev->irq > 0 && (dev->irq <= 0xF) &&
375 acpi_isa_irq_available(dev->irq) &&
375 (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) { 376 (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
376 dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n", 377 dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
377 pin_name(dev->pin), dev->irq); 378 pin_name(dev->pin), dev->irq);
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 3b4ea98e3ea0..7c8408b946ca 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -498,8 +498,7 @@ int __init acpi_irq_penalty_init(void)
498 PIRQ_PENALTY_PCI_POSSIBLE; 498 PIRQ_PENALTY_PCI_POSSIBLE;
499 } 499 }
500 } 500 }
501 /* Add a penalty for the SCI */ 501
502 acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING;
503 return 0; 502 return 0;
504} 503}
505 504
@@ -553,6 +552,13 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
553 irq = link->irq.possible[i]; 552 irq = link->irq.possible[i];
554 } 553 }
555 } 554 }
555 if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
556 printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
557 "Try pci=noacpi or acpi=off\n",
558 acpi_device_name(link->device),
559 acpi_device_bid(link->device));
560 return -ENODEV;
561 }
556 562
557 /* Attempt to enable the link device at this IRQ. */ 563 /* Attempt to enable the link device at this IRQ. */
558 if (acpi_pci_link_set(link, irq)) { 564 if (acpi_pci_link_set(link, irq)) {
@@ -821,6 +827,12 @@ void acpi_penalize_isa_irq(int irq, int active)
821 } 827 }
822} 828}
823 829
830bool acpi_isa_irq_available(int irq)
831{
832 return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
833 acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
834}
835
824/* 836/*
825 * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with 837 * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
826 * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for 838 * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index a8da3a50e374..0f5cb37636bc 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1578,9 +1578,7 @@ he_stop(struct he_dev *he_dev)
1578 1578
1579 kfree(he_dev->rbpl_virt); 1579 kfree(he_dev->rbpl_virt);
1580 kfree(he_dev->rbpl_table); 1580 kfree(he_dev->rbpl_table);
1581 1581 dma_pool_destroy(he_dev->rbpl_pool);
1582 if (he_dev->rbpl_pool)
1583 dma_pool_destroy(he_dev->rbpl_pool);
1584 1582
1585 if (he_dev->rbrq_base) 1583 if (he_dev->rbrq_base)
1586 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), 1584 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
@@ -1594,8 +1592,7 @@ he_stop(struct he_dev *he_dev)
1594 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), 1592 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1595 he_dev->tpdrq_base, he_dev->tpdrq_phys); 1593 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1596 1594
1597 if (he_dev->tpd_pool) 1595 dma_pool_destroy(he_dev->tpd_pool);
1598 dma_pool_destroy(he_dev->tpd_pool);
1599 1596
1600 if (he_dev->pci_dev) { 1597 if (he_dev->pci_dev) {
1601 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command); 1598 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 74e18b0a6d89..3d7fb6516f74 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -805,7 +805,12 @@ static void solos_bh(unsigned long card_arg)
805 continue; 805 continue;
806 } 806 }
807 807
808 skb = alloc_skb(size + 1, GFP_ATOMIC); 808 /* Use netdev_alloc_skb() because it adds NET_SKB_PAD of
809 * headroom, and ensures we can route packets back out an
810 * Ethernet interface (for example) without having to
811 * reallocate. Adding NET_IP_ALIGN also ensures that both
812 * PPPoATM and PPPoEoBR2684 packets end up aligned. */
813 skb = netdev_alloc_skb_ip_align(NULL, size + 1);
809 if (!skb) { 814 if (!skb) {
810 if (net_ratelimit()) 815 if (net_ratelimit())
811 dev_warn(&card->dev->dev, "Failed to allocate sk_buff for RX\n"); 816 dev_warn(&card->dev->dev, "Failed to allocate sk_buff for RX\n");
@@ -869,7 +874,10 @@ static void solos_bh(unsigned long card_arg)
869 /* Allocate RX skbs for any ports which need them */ 874 /* Allocate RX skbs for any ports which need them */
870 if (card->using_dma && card->atmdev[port] && 875 if (card->using_dma && card->atmdev[port] &&
871 !card->rx_skb[port]) { 876 !card->rx_skb[port]) {
872 struct sk_buff *skb = alloc_skb(RX_DMA_SIZE, GFP_ATOMIC); 877 /* Unlike the MMIO case (qv) we can't add NET_IP_ALIGN
878 * here; the FPGA can only DMA to addresses which are
879 * aligned to 4 bytes. */
880 struct sk_buff *skb = dev_alloc_skb(RX_DMA_SIZE);
873 if (skb) { 881 if (skb) {
874 SKB_CB(skb)->dma_addr = 882 SKB_CB(skb)->dma_addr =
875 dma_map_single(&card->dev->dev, skb->data, 883 dma_map_single(&card->dev->dev, skb->data,
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 764280a91776..e9fd32e91668 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -148,7 +148,11 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
148 148
149 if (sibling == cpu) /* skip itself */ 149 if (sibling == cpu) /* skip itself */
150 continue; 150 continue;
151
151 sib_cpu_ci = get_cpu_cacheinfo(sibling); 152 sib_cpu_ci = get_cpu_cacheinfo(sibling);
153 if (!sib_cpu_ci->info_list)
154 continue;
155
152 sib_leaf = sib_cpu_ci->info_list + index; 156 sib_leaf = sib_cpu_ci->info_list + index;
153 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); 157 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
154 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); 158 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
@@ -159,6 +163,9 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
159 163
160static void free_cache_attributes(unsigned int cpu) 164static void free_cache_attributes(unsigned int cpu)
161{ 165{
166 if (!per_cpu_cacheinfo(cpu))
167 return;
168
162 cache_shared_cpu_map_remove(cpu); 169 cache_shared_cpu_map_remove(cpu);
163 170
164 kfree(per_cpu_cacheinfo(cpu)); 171 kfree(per_cpu_cacheinfo(cpu));
@@ -514,8 +521,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb,
514 break; 521 break;
515 case CPU_DEAD: 522 case CPU_DEAD:
516 cache_remove_dev(cpu); 523 cache_remove_dev(cpu);
517 if (per_cpu_cacheinfo(cpu)) 524 free_cache_attributes(cpu);
518 free_cache_attributes(cpu);
519 break; 525 break;
520 } 526 }
521 return notifier_from_errno(rc); 527 return notifier_from_errno(rc);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 28cd75c535b0..7ae7cd990fbf 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -892,10 +892,17 @@ static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev)
892 u32 microvolt[3] = {0}; 892 u32 microvolt[3] = {0};
893 int count, ret; 893 int count, ret;
894 894
895 count = of_property_count_u32_elems(opp->np, "opp-microvolt"); 895 /* Missing property isn't a problem, but an invalid entry is */
896 if (!count) 896 if (!of_find_property(opp->np, "opp-microvolt", NULL))
897 return 0; 897 return 0;
898 898
899 count = of_property_count_u32_elems(opp->np, "opp-microvolt");
900 if (count < 0) {
901 dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n",
902 __func__, count);
903 return count;
904 }
905
899 /* There can be one or three elements here */ 906 /* There can be one or three elements here */
900 if (count != 1 && count != 3) { 907 if (count != 1 && count != 3) {
901 dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", 908 dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
@@ -1063,7 +1070,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add);
1063 * share a common logic which is isolated here. 1070 * share a common logic which is isolated here.
1064 * 1071 *
1065 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 1072 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1066 * copy operation, returns 0 if no modifcation was done OR modification was 1073 * copy operation, returns 0 if no modification was done OR modification was
1067 * successful. 1074 * successful.
1068 * 1075 *
1069 * Locking: The internal device_opp and opp structures are RCU protected. 1076 * Locking: The internal device_opp and opp structures are RCU protected.
@@ -1151,7 +1158,7 @@ unlock:
1151 * mutex locking or synchronize_rcu() blocking calls cannot be used. 1158 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1152 * 1159 *
1153 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 1160 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1154 * copy operation, returns 0 if no modifcation was done OR modification was 1161 * copy operation, returns 0 if no modification was done OR modification was
1155 * successful. 1162 * successful.
1156 */ 1163 */
1157int dev_pm_opp_enable(struct device *dev, unsigned long freq) 1164int dev_pm_opp_enable(struct device *dev, unsigned long freq)
@@ -1177,7 +1184,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
1177 * mutex locking or synchronize_rcu() blocking calls cannot be used. 1184 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1178 * 1185 *
1179 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 1186 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1180 * copy operation, returns 0 if no modifcation was done OR modification was 1187 * copy operation, returns 0 if no modification was done OR modification was
1181 * successful. 1188 * successful.
1182 */ 1189 */
1183int dev_pm_opp_disable(struct device *dev, unsigned long freq) 1190int dev_pm_opp_disable(struct device *dev, unsigned long freq)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f9889b6bc02c..674f800a3b57 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1486,17 +1486,16 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
1486{ 1486{
1487 const bool write = cmd->rq->cmd_flags & REQ_WRITE; 1487 const bool write = cmd->rq->cmd_flags & REQ_WRITE;
1488 struct loop_device *lo = cmd->rq->q->queuedata; 1488 struct loop_device *lo = cmd->rq->q->queuedata;
1489 int ret = -EIO; 1489 int ret = 0;
1490 1490
1491 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) 1491 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
1492 ret = -EIO;
1492 goto failed; 1493 goto failed;
1494 }
1493 1495
1494 ret = do_req_filebacked(lo, cmd->rq); 1496 ret = do_req_filebacked(lo, cmd->rq);
1495
1496 failed: 1497 failed:
1497 if (ret) 1498 blk_mq_complete_request(cmd->rq, ret ? -EIO : 0);
1498 cmd->rq->errors = -EIO;
1499 blk_mq_complete_request(cmd->rq);
1500} 1499}
1501 1500
1502static void loop_queue_write_work(struct work_struct *work) 1501static void loop_queue_write_work(struct work_struct *work)
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index a295b98c6bae..1c9e4fe5aa44 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -289,7 +289,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd)
289 case NULL_IRQ_SOFTIRQ: 289 case NULL_IRQ_SOFTIRQ:
290 switch (queue_mode) { 290 switch (queue_mode) {
291 case NULL_Q_MQ: 291 case NULL_Q_MQ:
292 blk_mq_complete_request(cmd->rq); 292 blk_mq_complete_request(cmd->rq, cmd->rq->errors);
293 break; 293 break;
294 case NULL_Q_RQ: 294 case NULL_Q_RQ:
295 blk_complete_request(cmd->rq); 295 blk_complete_request(cmd->rq);
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index b97fc3fe0916..6f04771f1019 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -618,16 +618,15 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
618 spin_unlock_irqrestore(req->q->queue_lock, flags); 618 spin_unlock_irqrestore(req->q->queue_lock, flags);
619 return; 619 return;
620 } 620 }
621
621 if (req->cmd_type == REQ_TYPE_DRV_PRIV) { 622 if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
622 if (cmd_rq->ctx == CMD_CTX_CANCELLED) 623 if (cmd_rq->ctx == CMD_CTX_CANCELLED)
623 req->errors = -EINTR; 624 status = -EINTR;
624 else
625 req->errors = status;
626 } else { 625 } else {
627 req->errors = nvme_error_status(status); 626 status = nvme_error_status(status);
628 } 627 }
629 } else 628 }
630 req->errors = 0; 629
631 if (req->cmd_type == REQ_TYPE_DRV_PRIV) { 630 if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
632 u32 result = le32_to_cpup(&cqe->result); 631 u32 result = le32_to_cpup(&cqe->result);
633 req->special = (void *)(uintptr_t)result; 632 req->special = (void *)(uintptr_t)result;
@@ -650,7 +649,7 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
650 } 649 }
651 nvme_free_iod(nvmeq->dev, iod); 650 nvme_free_iod(nvmeq->dev, iod);
652 651
653 blk_mq_complete_request(req); 652 blk_mq_complete_request(req, status);
654} 653}
655 654
656/* length is in bytes. gfp flags indicates whether we may sleep. */ 655/* length is in bytes. gfp flags indicates whether we may sleep. */
@@ -863,8 +862,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
863 if (ns && ns->ms && !blk_integrity_rq(req)) { 862 if (ns && ns->ms && !blk_integrity_rq(req)) {
864 if (!(ns->pi_type && ns->ms == 8) && 863 if (!(ns->pi_type && ns->ms == 8) &&
865 req->cmd_type != REQ_TYPE_DRV_PRIV) { 864 req->cmd_type != REQ_TYPE_DRV_PRIV) {
866 req->errors = -EFAULT; 865 blk_mq_complete_request(req, -EFAULT);
867 blk_mq_complete_request(req);
868 return BLK_MQ_RQ_QUEUE_OK; 866 return BLK_MQ_RQ_QUEUE_OK;
869 } 867 }
870 } 868 }
@@ -2439,6 +2437,22 @@ static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn)
2439 list_sort(NULL, &dev->namespaces, ns_cmp); 2437 list_sort(NULL, &dev->namespaces, ns_cmp);
2440} 2438}
2441 2439
2440static void nvme_set_irq_hints(struct nvme_dev *dev)
2441{
2442 struct nvme_queue *nvmeq;
2443 int i;
2444
2445 for (i = 0; i < dev->online_queues; i++) {
2446 nvmeq = dev->queues[i];
2447
2448 if (!nvmeq->tags || !(*nvmeq->tags))
2449 continue;
2450
2451 irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
2452 blk_mq_tags_cpumask(*nvmeq->tags));
2453 }
2454}
2455
2442static void nvme_dev_scan(struct work_struct *work) 2456static void nvme_dev_scan(struct work_struct *work)
2443{ 2457{
2444 struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work); 2458 struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
@@ -2450,6 +2464,7 @@ static void nvme_dev_scan(struct work_struct *work)
2450 return; 2464 return;
2451 nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn)); 2465 nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn));
2452 kfree(ctrl); 2466 kfree(ctrl);
2467 nvme_set_irq_hints(dev);
2453} 2468}
2454 2469
2455/* 2470/*
@@ -2953,22 +2968,6 @@ static const struct file_operations nvme_dev_fops = {
2953 .compat_ioctl = nvme_dev_ioctl, 2968 .compat_ioctl = nvme_dev_ioctl,
2954}; 2969};
2955 2970
2956static void nvme_set_irq_hints(struct nvme_dev *dev)
2957{
2958 struct nvme_queue *nvmeq;
2959 int i;
2960
2961 for (i = 0; i < dev->online_queues; i++) {
2962 nvmeq = dev->queues[i];
2963
2964 if (!nvmeq->tags || !(*nvmeq->tags))
2965 continue;
2966
2967 irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
2968 blk_mq_tags_cpumask(*nvmeq->tags));
2969 }
2970}
2971
2972static int nvme_dev_start(struct nvme_dev *dev) 2971static int nvme_dev_start(struct nvme_dev *dev)
2973{ 2972{
2974 int result; 2973 int result;
@@ -3010,8 +3009,6 @@ static int nvme_dev_start(struct nvme_dev *dev)
3010 if (result) 3009 if (result)
3011 goto free_tags; 3010 goto free_tags;
3012 3011
3013 nvme_set_irq_hints(dev);
3014
3015 dev->event_limit = 1; 3012 dev->event_limit = 1;
3016 return result; 3013 return result;
3017 3014
@@ -3062,7 +3059,6 @@ static int nvme_dev_resume(struct nvme_dev *dev)
3062 } else { 3059 } else {
3063 nvme_unfreeze_queues(dev); 3060 nvme_unfreeze_queues(dev);
3064 nvme_dev_add(dev); 3061 nvme_dev_add(dev);
3065 nvme_set_irq_hints(dev);
3066 } 3062 }
3067 return 0; 3063 return 0;
3068} 3064}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index e93899cc6f60..6ca35495a5be 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -144,7 +144,7 @@ static void virtblk_done(struct virtqueue *vq)
144 do { 144 do {
145 virtqueue_disable_cb(vq); 145 virtqueue_disable_cb(vq);
146 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { 146 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
147 blk_mq_complete_request(vbr->req); 147 blk_mq_complete_request(vbr->req, vbr->req->errors);
148 req_done = true; 148 req_done = true;
149 } 149 }
150 if (unlikely(virtqueue_is_broken(vq))) 150 if (unlikely(virtqueue_is_broken(vq)))
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index deb3f001791f..767657565de6 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -212,6 +212,9 @@ static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref,
212 212
213static int xen_blkif_disconnect(struct xen_blkif *blkif) 213static int xen_blkif_disconnect(struct xen_blkif *blkif)
214{ 214{
215 struct pending_req *req, *n;
216 int i = 0, j;
217
215 if (blkif->xenblkd) { 218 if (blkif->xenblkd) {
216 kthread_stop(blkif->xenblkd); 219 kthread_stop(blkif->xenblkd);
217 wake_up(&blkif->shutdown_wq); 220 wake_up(&blkif->shutdown_wq);
@@ -238,13 +241,28 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
238 /* Remove all persistent grants and the cache of ballooned pages. */ 241 /* Remove all persistent grants and the cache of ballooned pages. */
239 xen_blkbk_free_caches(blkif); 242 xen_blkbk_free_caches(blkif);
240 243
244 /* Check that there is no request in use */
245 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
246 list_del(&req->free_list);
247
248 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
249 kfree(req->segments[j]);
250
251 for (j = 0; j < MAX_INDIRECT_PAGES; j++)
252 kfree(req->indirect_pages[j]);
253
254 kfree(req);
255 i++;
256 }
257
258 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
259 blkif->nr_ring_pages = 0;
260
241 return 0; 261 return 0;
242} 262}
243 263
244static void xen_blkif_free(struct xen_blkif *blkif) 264static void xen_blkif_free(struct xen_blkif *blkif)
245{ 265{
246 struct pending_req *req, *n;
247 int i = 0, j;
248 266
249 xen_blkif_disconnect(blkif); 267 xen_blkif_disconnect(blkif);
250 xen_vbd_free(&blkif->vbd); 268 xen_vbd_free(&blkif->vbd);
@@ -257,22 +275,6 @@ static void xen_blkif_free(struct xen_blkif *blkif)
257 BUG_ON(!list_empty(&blkif->free_pages)); 275 BUG_ON(!list_empty(&blkif->free_pages));
258 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); 276 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
259 277
260 /* Check that there is no request in use */
261 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
262 list_del(&req->free_list);
263
264 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
265 kfree(req->segments[j]);
266
267 for (j = 0; j < MAX_INDIRECT_PAGES; j++)
268 kfree(req->indirect_pages[j]);
269
270 kfree(req);
271 i++;
272 }
273
274 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
275
276 kmem_cache_free(xen_blkif_cachep, blkif); 278 kmem_cache_free(xen_blkif_cachep, blkif);
277} 279}
278 280
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 0823a96902f8..611170896b8c 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1142,6 +1142,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
1142 RING_IDX i, rp; 1142 RING_IDX i, rp;
1143 unsigned long flags; 1143 unsigned long flags;
1144 struct blkfront_info *info = (struct blkfront_info *)dev_id; 1144 struct blkfront_info *info = (struct blkfront_info *)dev_id;
1145 int error;
1145 1146
1146 spin_lock_irqsave(&info->io_lock, flags); 1147 spin_lock_irqsave(&info->io_lock, flags);
1147 1148
@@ -1182,37 +1183,37 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
1182 continue; 1183 continue;
1183 } 1184 }
1184 1185
1185 req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; 1186 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
1186 switch (bret->operation) { 1187 switch (bret->operation) {
1187 case BLKIF_OP_DISCARD: 1188 case BLKIF_OP_DISCARD:
1188 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 1189 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1189 struct request_queue *rq = info->rq; 1190 struct request_queue *rq = info->rq;
1190 printk(KERN_WARNING "blkfront: %s: %s op failed\n", 1191 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1191 info->gd->disk_name, op_name(bret->operation)); 1192 info->gd->disk_name, op_name(bret->operation));
1192 req->errors = -EOPNOTSUPP; 1193 error = -EOPNOTSUPP;
1193 info->feature_discard = 0; 1194 info->feature_discard = 0;
1194 info->feature_secdiscard = 0; 1195 info->feature_secdiscard = 0;
1195 queue_flag_clear(QUEUE_FLAG_DISCARD, rq); 1196 queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
1196 queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); 1197 queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
1197 } 1198 }
1198 blk_mq_complete_request(req); 1199 blk_mq_complete_request(req, error);
1199 break; 1200 break;
1200 case BLKIF_OP_FLUSH_DISKCACHE: 1201 case BLKIF_OP_FLUSH_DISKCACHE:
1201 case BLKIF_OP_WRITE_BARRIER: 1202 case BLKIF_OP_WRITE_BARRIER:
1202 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 1203 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1203 printk(KERN_WARNING "blkfront: %s: %s op failed\n", 1204 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1204 info->gd->disk_name, op_name(bret->operation)); 1205 info->gd->disk_name, op_name(bret->operation));
1205 req->errors = -EOPNOTSUPP; 1206 error = -EOPNOTSUPP;
1206 } 1207 }
1207 if (unlikely(bret->status == BLKIF_RSP_ERROR && 1208 if (unlikely(bret->status == BLKIF_RSP_ERROR &&
1208 info->shadow[id].req.u.rw.nr_segments == 0)) { 1209 info->shadow[id].req.u.rw.nr_segments == 0)) {
1209 printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", 1210 printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
1210 info->gd->disk_name, op_name(bret->operation)); 1211 info->gd->disk_name, op_name(bret->operation));
1211 req->errors = -EOPNOTSUPP; 1212 error = -EOPNOTSUPP;
1212 } 1213 }
1213 if (unlikely(req->errors)) { 1214 if (unlikely(error)) {
1214 if (req->errors == -EOPNOTSUPP) 1215 if (error == -EOPNOTSUPP)
1215 req->errors = 0; 1216 error = 0;
1216 info->feature_flush = 0; 1217 info->feature_flush = 0;
1217 xlvbd_flush(info); 1218 xlvbd_flush(info);
1218 } 1219 }
@@ -1223,7 +1224,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
1223 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " 1224 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
1224 "request: %x\n", bret->status); 1225 "request: %x\n", bret->status);
1225 1226
1226 blk_mq_complete_request(req); 1227 blk_mq_complete_request(req, error);
1227 break; 1228 break;
1228 default: 1229 default:
1229 BUG(); 1230 BUG();
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index c37cf754a985..3c77645405e5 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -344,11 +344,12 @@ static int xgene_rng_probe(struct platform_device *pdev)
344 if (IS_ERR(ctx->csr_base)) 344 if (IS_ERR(ctx->csr_base))
345 return PTR_ERR(ctx->csr_base); 345 return PTR_ERR(ctx->csr_base);
346 346
347 ctx->irq = platform_get_irq(pdev, 0); 347 rc = platform_get_irq(pdev, 0);
348 if (ctx->irq < 0) { 348 if (rc < 0) {
349 dev_err(&pdev->dev, "No IRQ resource\n"); 349 dev_err(&pdev->dev, "No IRQ resource\n");
350 return ctx->irq; 350 return rc;
351 } 351 }
352 ctx->irq = rc;
352 353
353 dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d", 354 dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d",
354 ctx->csr_base, ctx->irq); 355 ctx->csr_base, ctx->irq);
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c
index bb2c2b050964..d3c1742ded1a 100644
--- a/drivers/clocksource/rockchip_timer.c
+++ b/drivers/clocksource/rockchip_timer.c
@@ -148,7 +148,7 @@ static void __init rk_timer_init(struct device_node *np)
148 bc_timer.freq = clk_get_rate(timer_clk); 148 bc_timer.freq = clk_get_rate(timer_clk);
149 149
150 irq = irq_of_parse_and_map(np, 0); 150 irq = irq_of_parse_and_map(np, 0);
151 if (irq == NO_IRQ) { 151 if (!irq) {
152 pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); 152 pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
153 return; 153 return;
154 } 154 }
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
index edacf3902e10..1cea08cf603e 100644
--- a/drivers/clocksource/timer-keystone.c
+++ b/drivers/clocksource/timer-keystone.c
@@ -152,7 +152,7 @@ static void __init keystone_timer_init(struct device_node *np)
152 int irq, error; 152 int irq, error;
153 153
154 irq = irq_of_parse_and_map(np, 0); 154 irq = irq_of_parse_and_map(np, 0);
155 if (irq == NO_IRQ) { 155 if (!irq) {
156 pr_err("%s: failed to map interrupts\n", __func__); 156 pr_err("%s: failed to map interrupts\n", __func__);
157 return; 157 return;
158 } 158 }
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index b60698b30d30..bc2a55bc35e4 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -687,6 +687,33 @@ static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine)
687 687
688int mv_cesa_queue_req(struct crypto_async_request *req); 688int mv_cesa_queue_req(struct crypto_async_request *req);
689 689
690/*
691 * Helper function that indicates whether a crypto request needs to be
692 * cleaned up or not after being enqueued using mv_cesa_queue_req().
693 */
694static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req,
695 int ret)
696{
697 /*
698 * The queue still had some space, the request was queued
699 * normally, so there's no need to clean it up.
700 */
701 if (ret == -EINPROGRESS)
702 return false;
703
704 /*
705 * The queue had not space left, but since the request is
706 * flagged with CRYPTO_TFM_REQ_MAY_BACKLOG, it was added to
707 * the backlog and will be processed later. There's no need to
708 * clean it up.
709 */
710 if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
711 return false;
712
713 /* Request wasn't queued, we need to clean it up */
714 return true;
715}
716
690/* TDMA functions */ 717/* TDMA functions */
691 718
692static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter, 719static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter,
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 0745cf3b9c0e..3df2f4e7adb2 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -189,7 +189,6 @@ static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
189{ 189{
190 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); 190 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
191 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); 191 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
192
193 creq->req.base.engine = engine; 192 creq->req.base.engine = engine;
194 193
195 if (creq->req.base.type == CESA_DMA_REQ) 194 if (creq->req.base.type == CESA_DMA_REQ)
@@ -431,7 +430,7 @@ static int mv_cesa_des_op(struct ablkcipher_request *req,
431 return ret; 430 return ret;
432 431
433 ret = mv_cesa_queue_req(&req->base); 432 ret = mv_cesa_queue_req(&req->base);
434 if (ret && ret != -EINPROGRESS) 433 if (mv_cesa_req_needs_cleanup(&req->base, ret))
435 mv_cesa_ablkcipher_cleanup(req); 434 mv_cesa_ablkcipher_cleanup(req);
436 435
437 return ret; 436 return ret;
@@ -551,7 +550,7 @@ static int mv_cesa_des3_op(struct ablkcipher_request *req,
551 return ret; 550 return ret;
552 551
553 ret = mv_cesa_queue_req(&req->base); 552 ret = mv_cesa_queue_req(&req->base);
554 if (ret && ret != -EINPROGRESS) 553 if (mv_cesa_req_needs_cleanup(&req->base, ret))
555 mv_cesa_ablkcipher_cleanup(req); 554 mv_cesa_ablkcipher_cleanup(req);
556 555
557 return ret; 556 return ret;
@@ -693,7 +692,7 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req,
693 return ret; 692 return ret;
694 693
695 ret = mv_cesa_queue_req(&req->base); 694 ret = mv_cesa_queue_req(&req->base);
696 if (ret && ret != -EINPROGRESS) 695 if (mv_cesa_req_needs_cleanup(&req->base, ret))
697 mv_cesa_ablkcipher_cleanup(req); 696 mv_cesa_ablkcipher_cleanup(req);
698 697
699 return ret; 698 return ret;
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index ae9272eb9c1a..e8d0d7128137 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -739,10 +739,8 @@ static int mv_cesa_ahash_update(struct ahash_request *req)
739 return 0; 739 return 0;
740 740
741 ret = mv_cesa_queue_req(&req->base); 741 ret = mv_cesa_queue_req(&req->base);
742 if (ret && ret != -EINPROGRESS) { 742 if (mv_cesa_req_needs_cleanup(&req->base, ret))
743 mv_cesa_ahash_cleanup(req); 743 mv_cesa_ahash_cleanup(req);
744 return ret;
745 }
746 744
747 return ret; 745 return ret;
748} 746}
@@ -766,7 +764,7 @@ static int mv_cesa_ahash_final(struct ahash_request *req)
766 return 0; 764 return 0;
767 765
768 ret = mv_cesa_queue_req(&req->base); 766 ret = mv_cesa_queue_req(&req->base);
769 if (ret && ret != -EINPROGRESS) 767 if (mv_cesa_req_needs_cleanup(&req->base, ret))
770 mv_cesa_ahash_cleanup(req); 768 mv_cesa_ahash_cleanup(req);
771 769
772 return ret; 770 return ret;
@@ -791,7 +789,7 @@ static int mv_cesa_ahash_finup(struct ahash_request *req)
791 return 0; 789 return 0;
792 790
793 ret = mv_cesa_queue_req(&req->base); 791 ret = mv_cesa_queue_req(&req->base);
794 if (ret && ret != -EINPROGRESS) 792 if (mv_cesa_req_needs_cleanup(&req->base, ret))
795 mv_cesa_ahash_cleanup(req); 793 mv_cesa_ahash_cleanup(req);
796 794
797 return ret; 795 return ret;
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index a57b4194de28..0a5ca0ba5d64 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -88,6 +88,9 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev)
88 struct pci_dev *parent = pdev->bus->self; 88 struct pci_dev *parent = pdev->bus->self;
89 uint16_t bridge_ctl = 0; 89 uint16_t bridge_ctl = 0;
90 90
91 if (accel_dev->is_vf)
92 return;
93
91 dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n", 94 dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
92 accel_dev->accel_id); 95 accel_dev->accel_id);
93 96
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index a165b4bfd330..dd24375b76dd 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
455 return desc; 455 return desc;
456} 456}
457 457
458void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
459{
460 memset(&desc->lld, 0, sizeof(desc->lld));
461 INIT_LIST_HEAD(&desc->descs_list);
462 desc->direction = DMA_TRANS_NONE;
463 desc->xfer_size = 0;
464 desc->active_xfer = false;
465}
466
458/* Call must be protected by lock. */ 467/* Call must be protected by lock. */
459static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) 468static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
460{ 469{
@@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
466 desc = list_first_entry(&atchan->free_descs_list, 475 desc = list_first_entry(&atchan->free_descs_list,
467 struct at_xdmac_desc, desc_node); 476 struct at_xdmac_desc, desc_node);
468 list_del(&desc->desc_node); 477 list_del(&desc->desc_node);
469 desc->active_xfer = false; 478 at_xdmac_init_used_desc(desc);
470 } 479 }
471 480
472 return desc; 481 return desc;
@@ -875,14 +884,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
875 884
876 if (xt->src_inc) { 885 if (xt->src_inc) {
877 if (xt->src_sgl) 886 if (xt->src_sgl)
878 chan_cc |= AT_XDMAC_CC_SAM_UBS_DS_AM; 887 chan_cc |= AT_XDMAC_CC_SAM_UBS_AM;
879 else 888 else
880 chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM; 889 chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
881 } 890 }
882 891
883 if (xt->dst_inc) { 892 if (xt->dst_inc) {
884 if (xt->dst_sgl) 893 if (xt->dst_sgl)
885 chan_cc |= AT_XDMAC_CC_DAM_UBS_DS_AM; 894 chan_cc |= AT_XDMAC_CC_DAM_UBS_AM;
886 else 895 else
887 chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM; 896 chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
888 } 897 }
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 3ff284c8e3d5..09479d4be4db 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
554 mutex_lock(&dma_list_mutex); 554 mutex_lock(&dma_list_mutex);
555 555
556 if (chan->client_count == 0) { 556 if (chan->client_count == 0) {
557 struct dma_device *device = chan->device;
558
559 dma_cap_set(DMA_PRIVATE, device->cap_mask);
560 device->privatecnt++;
557 err = dma_chan_get(chan); 561 err = dma_chan_get(chan);
558 if (err) 562 if (err) {
559 pr_debug("%s: failed to get %s: (%d)\n", 563 pr_debug("%s: failed to get %s: (%d)\n",
560 __func__, dma_chan_name(chan), err); 564 __func__, dma_chan_name(chan), err);
565 chan = NULL;
566 if (--device->privatecnt == 0)
567 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
568 }
561 } else 569 } else
562 chan = NULL; 570 chan = NULL;
563 571
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index cf1c87fa1edd..bedce038c6e2 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1591 INIT_LIST_HEAD(&dw->dma.channels); 1591 INIT_LIST_HEAD(&dw->dma.channels);
1592 for (i = 0; i < nr_channels; i++) { 1592 for (i = 0; i < nr_channels; i++) {
1593 struct dw_dma_chan *dwc = &dw->chan[i]; 1593 struct dw_dma_chan *dwc = &dw->chan[i];
1594 int r = nr_channels - i - 1;
1595 1594
1596 dwc->chan.device = &dw->dma; 1595 dwc->chan.device = &dw->dma;
1597 dma_cookie_init(&dwc->chan); 1596 dma_cookie_init(&dwc->chan);
@@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1603 1602
1604 /* 7 is highest priority & 0 is lowest. */ 1603 /* 7 is highest priority & 0 is lowest. */
1605 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) 1604 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1606 dwc->priority = r; 1605 dwc->priority = nr_channels - i - 1;
1607 else 1606 else
1608 dwc->priority = i; 1607 dwc->priority = i;
1609 1608
@@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1622 /* Hardware configuration */ 1621 /* Hardware configuration */
1623 if (autocfg) { 1622 if (autocfg) {
1624 unsigned int dwc_params; 1623 unsigned int dwc_params;
1624 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1625 void __iomem *addr = chip->regs + r * sizeof(u32); 1625 void __iomem *addr = chip->regs + r * sizeof(u32);
1626 1626
1627 dwc_params = dma_read_byaddr(addr, DWC_PARAMS); 1627 dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 18c14e1f1414..48d6d9e94f67 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -355,23 +355,23 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
355 struct idma64_desc *desc = idma64c->desc; 355 struct idma64_desc *desc = idma64c->desc;
356 struct idma64_hw_desc *hw; 356 struct idma64_hw_desc *hw;
357 size_t bytes = desc->length; 357 size_t bytes = desc->length;
358 u64 llp; 358 u64 llp = channel_readq(idma64c, LLP);
359 u32 ctlhi; 359 u32 ctlhi = channel_readl(idma64c, CTL_HI);
360 unsigned int i = 0; 360 unsigned int i = 0;
361 361
362 llp = channel_readq(idma64c, LLP);
363 do { 362 do {
364 hw = &desc->hw[i]; 363 hw = &desc->hw[i];
365 } while ((hw->llp != llp) && (++i < desc->ndesc)); 364 if (hw->llp == llp)
365 break;
366 bytes -= hw->len;
367 } while (++i < desc->ndesc);
366 368
367 if (!i) 369 if (!i)
368 return bytes; 370 return bytes;
369 371
370 do { 372 /* The current chunk is not fully transfered yet */
371 bytes -= desc->hw[--i].len; 373 bytes += desc->hw[--i].len;
372 } while (i);
373 374
374 ctlhi = channel_readl(idma64c, CTL_HI);
375 return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); 375 return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
376} 376}
377 377
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 5cb61ce01036..fc4156afa070 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -473,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan)
473 return; 473 return;
474 474
475 /* clear the channel mapping in DRCMR */ 475 /* clear the channel mapping in DRCMR */
476 reg = pxad_drcmr(chan->drcmr); 476 if (chan->drcmr <= DRCMR_CHLNUM) {
477 writel_relaxed(0, chan->phy->base + reg); 477 reg = pxad_drcmr(chan->drcmr);
478 writel_relaxed(0, chan->phy->base + reg);
479 }
478 480
479 spin_lock_irqsave(&pdev->phy_lock, flags); 481 spin_lock_irqsave(&pdev->phy_lock, flags);
480 for (i = 0; i < 32; i++) 482 for (i = 0; i < 32; i++)
@@ -516,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
516 "%s(); phy=%p(%d) misaligned=%d\n", __func__, 518 "%s(); phy=%p(%d) misaligned=%d\n", __func__,
517 phy, phy->idx, misaligned); 519 phy, phy->idx, misaligned);
518 520
519 reg = pxad_drcmr(phy->vchan->drcmr); 521 if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
520 writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); 522 reg = pxad_drcmr(phy->vchan->drcmr);
523 writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
524 }
521 525
522 dalgn = phy_readl_relaxed(phy, DALGN); 526 dalgn = phy_readl_relaxed(phy, DALGN);
523 if (misaligned) 527 if (misaligned)
@@ -887,6 +891,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
887 struct dma_async_tx_descriptor *tx; 891 struct dma_async_tx_descriptor *tx;
888 struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc); 892 struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
889 893
894 INIT_LIST_HEAD(&vd->node);
890 tx = vchan_tx_prep(vc, vd, tx_flags); 895 tx = vchan_tx_prep(vc, vd, tx_flags);
891 tx->tx_submit = pxad_tx_submit; 896 tx->tx_submit = pxad_tx_submit;
892 dev_dbg(&chan->vc.chan.dev->device, 897 dev_dbg(&chan->vc.chan.dev->device,
@@ -910,14 +915,18 @@ static void pxad_get_config(struct pxad_chan *chan,
910 width = chan->cfg.src_addr_width; 915 width = chan->cfg.src_addr_width;
911 dev_addr = chan->cfg.src_addr; 916 dev_addr = chan->cfg.src_addr;
912 *dev_src = dev_addr; 917 *dev_src = dev_addr;
913 *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC; 918 *dcmd |= PXA_DCMD_INCTRGADDR;
919 if (chan->drcmr <= DRCMR_CHLNUM)
920 *dcmd |= PXA_DCMD_FLOWSRC;
914 } 921 }
915 if (dir == DMA_MEM_TO_DEV) { 922 if (dir == DMA_MEM_TO_DEV) {
916 maxburst = chan->cfg.dst_maxburst; 923 maxburst = chan->cfg.dst_maxburst;
917 width = chan->cfg.dst_addr_width; 924 width = chan->cfg.dst_addr_width;
918 dev_addr = chan->cfg.dst_addr; 925 dev_addr = chan->cfg.dst_addr;
919 *dev_dst = dev_addr; 926 *dev_dst = dev_addr;
920 *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG; 927 *dcmd |= PXA_DCMD_INCSRCADDR;
928 if (chan->drcmr <= DRCMR_CHLNUM)
929 *dcmd |= PXA_DCMD_FLOWTRG;
921 } 930 }
922 if (dir == DMA_MEM_TO_MEM) 931 if (dir == DMA_MEM_TO_MEM)
923 *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | 932 *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
@@ -1177,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan,
1177 else 1186 else
1178 curr = phy_readl_relaxed(chan->phy, DTADR); 1187 curr = phy_readl_relaxed(chan->phy, DTADR);
1179 1188
1189 /*
1190 * curr has to be actually read before checking descriptor
1191 * completion, so that a curr inside a status updater
1192 * descriptor implies the following test returns true, and
1193 * preventing reordering of curr load and the test.
1194 */
1195 rmb();
1196 if (is_desc_completed(vd))
1197 goto out;
1198
1180 for (i = 0; i < sw_desc->nb_desc - 1; i++) { 1199 for (i = 0; i < sw_desc->nb_desc - 1; i++) {
1181 hw_desc = sw_desc->hw_desc[i]; 1200 hw_desc = sw_desc->hw_desc[i];
1182 if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) 1201 if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index a1a500d96ff2..1661d518224a 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -599,13 +599,13 @@ get_next_cyclic_promise(struct sun4i_dma_contract *contract)
599static void sun4i_dma_free_contract(struct virt_dma_desc *vd) 599static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
600{ 600{
601 struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); 601 struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
602 struct sun4i_dma_promise *promise; 602 struct sun4i_dma_promise *promise, *tmp;
603 603
604 /* Free all the demands and completed demands */ 604 /* Free all the demands and completed demands */
605 list_for_each_entry(promise, &contract->demands, list) 605 list_for_each_entry_safe(promise, tmp, &contract->demands, list)
606 kfree(promise); 606 kfree(promise);
607 607
608 list_for_each_entry(promise, &contract->completed_demands, list) 608 list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list)
609 kfree(promise); 609 kfree(promise);
610 610
611 kfree(contract); 611 kfree(contract);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index b23e8d52d126..8d57b1b12e41 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -59,7 +59,6 @@
59#define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 59#define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070
60#define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 60#define XGENE_DMA_RING_BLK_MEM_RDY 0xD074
61#define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF 61#define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF
62#define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1)
63#define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) 62#define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num))
64#define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) 63#define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v))
65#define XGENE_DMA_RING_CMD_OFFSET 0x2C 64#define XGENE_DMA_RING_CMD_OFFSET 0x2C
@@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
379 return flyby_type[src_cnt]; 378 return flyby_type[src_cnt];
380} 379}
381 380
382static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring)
383{
384 u32 __iomem *cmd_base = ring->cmd_base;
385 u32 ring_state = ioread32(&cmd_base[1]);
386
387 return XGENE_DMA_RING_DESC_CNT(ring_state);
388}
389
390static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, 381static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
391 dma_addr_t *paddr) 382 dma_addr_t *paddr)
392{ 383{
@@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan,
659 dma_pool_free(chan->desc_pool, desc, desc->tx.phys); 650 dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
660} 651}
661 652
662static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, 653static void xgene_chan_xfer_request(struct xgene_dma_chan *chan,
663 struct xgene_dma_desc_sw *desc_sw) 654 struct xgene_dma_desc_sw *desc_sw)
664{ 655{
656 struct xgene_dma_ring *ring = &chan->tx_ring;
665 struct xgene_dma_desc_hw *desc_hw; 657 struct xgene_dma_desc_hw *desc_hw;
666 658
667 /* Check if can push more descriptor to hw for execution */
668 if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2))
669 return -EBUSY;
670
671 /* Get hw descriptor from DMA tx ring */ 659 /* Get hw descriptor from DMA tx ring */
672 desc_hw = &ring->desc_hw[ring->head]; 660 desc_hw = &ring->desc_hw[ring->head];
673 661
@@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
694 memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); 682 memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
695 } 683 }
696 684
685 /* Increment the pending transaction count */
686 chan->pending += ((desc_sw->flags &
687 XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
688
697 /* Notify the hw that we have descriptor ready for execution */ 689 /* Notify the hw that we have descriptor ready for execution */
698 iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? 690 iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
699 2 : 1, ring->cmd); 691 2 : 1, ring->cmd);
700
701 return 0;
702} 692}
703 693
704/** 694/**
@@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
710static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) 700static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
711{ 701{
712 struct xgene_dma_desc_sw *desc_sw, *_desc_sw; 702 struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
713 int ret;
714 703
715 /* 704 /*
716 * If the list of pending descriptors is empty, then we 705 * If the list of pending descriptors is empty, then we
@@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
735 if (chan->pending >= chan->max_outstanding) 724 if (chan->pending >= chan->max_outstanding)
736 return; 725 return;
737 726
738 ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw); 727 xgene_chan_xfer_request(chan, desc_sw);
739 if (ret)
740 return;
741 728
742 /* 729 /*
743 * Delete this element from ld pending queue and append it to 730 * Delete this element from ld pending queue and append it to
744 * ld running queue 731 * ld running queue
745 */ 732 */
746 list_move_tail(&desc_sw->node, &chan->ld_running); 733 list_move_tail(&desc_sw->node, &chan->ld_running);
747
748 /* Increment the pending transaction count */
749 chan->pending++;
750 } 734 }
751} 735}
752 736
@@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
821 * Decrement the pending transaction count 805 * Decrement the pending transaction count
822 * as we have processed one 806 * as we have processed one
823 */ 807 */
824 chan->pending--; 808 chan->pending -= ((desc_sw->flags &
809 XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
825 810
826 /* 811 /*
827 * Delete this node from ld running queue and append it to 812 * Delete this node from ld running queue and append it to
@@ -1421,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
1421 struct xgene_dma_ring *ring, 1406 struct xgene_dma_ring *ring,
1422 enum xgene_dma_ring_cfgsize cfgsize) 1407 enum xgene_dma_ring_cfgsize cfgsize)
1423{ 1408{
1409 int ret;
1410
1424 /* Setup DMA ring descriptor variables */ 1411 /* Setup DMA ring descriptor variables */
1425 ring->pdma = chan->pdma; 1412 ring->pdma = chan->pdma;
1426 ring->cfgsize = cfgsize; 1413 ring->cfgsize = cfgsize;
1427 ring->num = chan->pdma->ring_num++; 1414 ring->num = chan->pdma->ring_num++;
1428 ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); 1415 ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
1429 1416
1430 ring->size = xgene_dma_get_ring_size(chan, cfgsize); 1417 ret = xgene_dma_get_ring_size(chan, cfgsize);
1431 if (ring->size <= 0) 1418 if (ret <= 0)
1432 return ring->size; 1419 return ret;
1420 ring->size = ret;
1433 1421
1434 /* Allocate memory for DMA ring descriptor */ 1422 /* Allocate memory for DMA ring descriptor */
1435 ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, 1423 ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
@@ -1482,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan)
1482 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); 1470 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
1483 1471
1484 /* Set the max outstanding request possible to this channel */ 1472 /* Set the max outstanding request possible to this channel */
1485 chan->max_outstanding = rx_ring->slots; 1473 chan->max_outstanding = tx_ring->slots;
1486 1474
1487 return ret; 1475 return ret;
1488} 1476}
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
index 39915a6b7986..c017fcd8e07c 100644
--- a/drivers/dma/zx296702_dma.c
+++ b/drivers/dma/zx296702_dma.c
@@ -739,7 +739,7 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
739 struct dma_chan *chan; 739 struct dma_chan *chan;
740 struct zx_dma_chan *c; 740 struct zx_dma_chan *c;
741 741
742 if (request > d->dma_requests) 742 if (request >= d->dma_requests)
743 return NULL; 743 return NULL;
744 744
745 chan = dma_get_any_slave_channel(&d->slave); 745 chan = dma_get_any_slave_channel(&d->slave);
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index a07addde297b..8dd0af1d50bc 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -159,7 +159,7 @@ static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
159static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached) 159static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached)
160{ 160{
161 if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) { 161 if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) {
162 *attached = new ? true : false; 162 *attached = ((new >> idx) & 0x1) ? true : false;
163 return true; 163 return true;
164 } 164 }
165 165
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index d8de6a8dd4de..665efca59487 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -139,6 +139,14 @@ config QCOM_SCM
139 bool 139 bool
140 depends on ARM || ARM64 140 depends on ARM || ARM64
141 141
142config QCOM_SCM_32
143 def_bool y
144 depends on QCOM_SCM && ARM
145
146config QCOM_SCM_64
147 def_bool y
148 depends on QCOM_SCM && ARM64
149
142source "drivers/firmware/broadcom/Kconfig" 150source "drivers/firmware/broadcom/Kconfig"
143source "drivers/firmware/google/Kconfig" 151source "drivers/firmware/google/Kconfig"
144source "drivers/firmware/efi/Kconfig" 152source "drivers/firmware/efi/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 000830fc6707..2ee83474a3c1 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -13,7 +13,8 @@ obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
13obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o 13obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
14obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o 14obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
15obj-$(CONFIG_QCOM_SCM) += qcom_scm.o 15obj-$(CONFIG_QCOM_SCM) += qcom_scm.o
16obj-$(CONFIG_QCOM_SCM) += qcom_scm-32.o 16obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
17obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
17CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1) 18CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
18 19
19obj-y += broadcom/ 20obj-y += broadcom/
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index e29560e6b40b..950c87f5d279 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/efi.h> 15#include <linux/efi.h>
16#include <linux/sort.h>
16#include <asm/efi.h> 17#include <asm/efi.h>
17 18
18#include "efistub.h" 19#include "efistub.h"
@@ -305,6 +306,44 @@ fail:
305 */ 306 */
306#define EFI_RT_VIRTUAL_BASE 0x40000000 307#define EFI_RT_VIRTUAL_BASE 0x40000000
307 308
309static int cmp_mem_desc(const void *l, const void *r)
310{
311 const efi_memory_desc_t *left = l, *right = r;
312
313 return (left->phys_addr > right->phys_addr) ? 1 : -1;
314}
315
316/*
317 * Returns whether region @left ends exactly where region @right starts,
318 * or false if either argument is NULL.
319 */
320static bool regions_are_adjacent(efi_memory_desc_t *left,
321 efi_memory_desc_t *right)
322{
323 u64 left_end;
324
325 if (left == NULL || right == NULL)
326 return false;
327
328 left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE;
329
330 return left_end == right->phys_addr;
331}
332
333/*
334 * Returns whether region @left and region @right have compatible memory type
335 * mapping attributes, and are both EFI_MEMORY_RUNTIME regions.
336 */
337static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left,
338 efi_memory_desc_t *right)
339{
340 static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT |
341 EFI_MEMORY_WC | EFI_MEMORY_UC |
342 EFI_MEMORY_RUNTIME;
343
344 return ((left->attribute ^ right->attribute) & mem_type_mask) == 0;
345}
346
308/* 347/*
309 * efi_get_virtmap() - create a virtual mapping for the EFI memory map 348 * efi_get_virtmap() - create a virtual mapping for the EFI memory map
310 * 349 *
@@ -317,33 +356,52 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
317 int *count) 356 int *count)
318{ 357{
319 u64 efi_virt_base = EFI_RT_VIRTUAL_BASE; 358 u64 efi_virt_base = EFI_RT_VIRTUAL_BASE;
320 efi_memory_desc_t *out = runtime_map; 359 efi_memory_desc_t *in, *prev = NULL, *out = runtime_map;
321 int l; 360 int l;
322 361
323 for (l = 0; l < map_size; l += desc_size) { 362 /*
324 efi_memory_desc_t *in = (void *)memory_map + l; 363 * To work around potential issues with the Properties Table feature
364 * introduced in UEFI 2.5, which may split PE/COFF executable images
365 * in memory into several RuntimeServicesCode and RuntimeServicesData
366 * regions, we need to preserve the relative offsets between adjacent
367 * EFI_MEMORY_RUNTIME regions with the same memory type attributes.
368 * The easiest way to find adjacent regions is to sort the memory map
369 * before traversing it.
370 */
371 sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL);
372
373 for (l = 0; l < map_size; l += desc_size, prev = in) {
325 u64 paddr, size; 374 u64 paddr, size;
326 375
376 in = (void *)memory_map + l;
327 if (!(in->attribute & EFI_MEMORY_RUNTIME)) 377 if (!(in->attribute & EFI_MEMORY_RUNTIME))
328 continue; 378 continue;
329 379
380 paddr = in->phys_addr;
381 size = in->num_pages * EFI_PAGE_SIZE;
382
330 /* 383 /*
331 * Make the mapping compatible with 64k pages: this allows 384 * Make the mapping compatible with 64k pages: this allows
332 * a 4k page size kernel to kexec a 64k page size kernel and 385 * a 4k page size kernel to kexec a 64k page size kernel and
333 * vice versa. 386 * vice versa.
334 */ 387 */
335 paddr = round_down(in->phys_addr, SZ_64K); 388 if (!regions_are_adjacent(prev, in) ||
336 size = round_up(in->num_pages * EFI_PAGE_SIZE + 389 !regions_have_compatible_memory_type_attrs(prev, in)) {
337 in->phys_addr - paddr, SZ_64K); 390
338 391 paddr = round_down(in->phys_addr, SZ_64K);
339 /* 392 size += in->phys_addr - paddr;
340 * Avoid wasting memory on PTEs by choosing a virtual base that 393
341 * is compatible with section mappings if this region has the 394 /*
342 * appropriate size and physical alignment. (Sections are 2 MB 395 * Avoid wasting memory on PTEs by choosing a virtual
343 * on 4k granule kernels) 396 * base that is compatible with section mappings if this
344 */ 397 * region has the appropriate size and physical
345 if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) 398 * alignment. (Sections are 2 MB on 4k granule kernels)
346 efi_virt_base = round_up(efi_virt_base, SZ_2M); 399 */
400 if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
401 efi_virt_base = round_up(efi_virt_base, SZ_2M);
402 else
403 efi_virt_base = round_up(efi_virt_base, SZ_64K);
404 }
347 405
348 in->virt_addr = efi_virt_base + in->phys_addr - paddr; 406 in->virt_addr = efi_virt_base + in->phys_addr - paddr;
349 efi_virt_base += size; 407 efi_virt_base += size;
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index e334a01cf92f..6b6548fda089 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -5,10 +5,6 @@
5/* error code which can't be mistaken for valid address */ 5/* error code which can't be mistaken for valid address */
6#define EFI_ERROR (~0UL) 6#define EFI_ERROR (~0UL)
7 7
8#undef memcpy
9#undef memset
10#undef memmove
11
12void efi_char16_printk(efi_system_table_t *, efi_char16_t *); 8void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
13 9
14efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image, 10efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
new file mode 100644
index 000000000000..bb6555f6d63b
--- /dev/null
+++ b/drivers/firmware/qcom_scm-64.c
@@ -0,0 +1,63 @@
1/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/io.h>
14#include <linux/errno.h>
15#include <linux/qcom_scm.h>
16
17/**
18 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
19 * @entry: Entry point function for the cpus
20 * @cpus: The cpumask of cpus that will use the entry point
21 *
22 * Set the cold boot address of the cpus. Any cpu outside the supported
23 * range would be removed from the cpu present mask.
24 */
25int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
26{
27 return -ENOTSUPP;
28}
29
30/**
31 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
32 * @entry: Entry point function for the cpus
33 * @cpus: The cpumask of cpus that will use the entry point
34 *
35 * Set the Linux entry point for the SCM to transfer control to when coming
36 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
37 */
38int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
39{
40 return -ENOTSUPP;
41}
42
43/**
44 * qcom_scm_cpu_power_down() - Power down the cpu
45 * @flags - Flags to flush cache
46 *
47 * This is an end point to power down cpu. If there was a pending interrupt,
48 * the control would return from this function, otherwise, the cpu jumps to the
49 * warm boot entry point set for this cpu upon reset.
50 */
51void __qcom_scm_cpu_power_down(u32 flags)
52{
53}
54
55int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
56{
57 return -ENOTSUPP;
58}
59
60int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
61{
62 return -ENOTSUPP;
63}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 668939a14206..6647fb26ef25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -82,6 +82,7 @@ extern int amdgpu_vm_block_size;
82extern int amdgpu_enable_scheduler; 82extern int amdgpu_enable_scheduler;
83extern int amdgpu_sched_jobs; 83extern int amdgpu_sched_jobs;
84extern int amdgpu_sched_hw_submission; 84extern int amdgpu_sched_hw_submission;
85extern int amdgpu_enable_semaphores;
85 86
86#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 87#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
87#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 88#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -432,7 +433,7 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev);
432void amdgpu_fence_driver_fini(struct amdgpu_device *adev); 433void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
433void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); 434void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
434 435
435void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); 436int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
436int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 437int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
437 struct amdgpu_irq_src *irq_src, 438 struct amdgpu_irq_src *irq_src,
438 unsigned irq_type); 439 unsigned irq_type);
@@ -890,7 +891,7 @@ struct amdgpu_ring {
890 struct amdgpu_device *adev; 891 struct amdgpu_device *adev;
891 const struct amdgpu_ring_funcs *funcs; 892 const struct amdgpu_ring_funcs *funcs;
892 struct amdgpu_fence_driver fence_drv; 893 struct amdgpu_fence_driver fence_drv;
893 struct amd_gpu_scheduler *scheduler; 894 struct amd_gpu_scheduler sched;
894 895
895 spinlock_t fence_lock; 896 spinlock_t fence_lock;
896 struct mutex *ring_lock; 897 struct mutex *ring_lock;
@@ -1201,8 +1202,6 @@ struct amdgpu_gfx {
1201 struct amdgpu_irq_src priv_inst_irq; 1202 struct amdgpu_irq_src priv_inst_irq;
1202 /* gfx status */ 1203 /* gfx status */
1203 uint32_t gfx_current_status; 1204 uint32_t gfx_current_status;
1204 /* sync signal for const engine */
1205 unsigned ce_sync_offs;
1206 /* ce ram size*/ 1205 /* ce ram size*/
1207 unsigned ce_ram_size; 1206 unsigned ce_ram_size;
1208}; 1207};
@@ -1274,8 +1273,10 @@ struct amdgpu_job {
1274 uint32_t num_ibs; 1273 uint32_t num_ibs;
1275 struct mutex job_lock; 1274 struct mutex job_lock;
1276 struct amdgpu_user_fence uf; 1275 struct amdgpu_user_fence uf;
1277 int (*free_job)(struct amdgpu_job *sched_job); 1276 int (*free_job)(struct amdgpu_job *job);
1278}; 1277};
1278#define to_amdgpu_job(sched_job) \
1279 container_of((sched_job), struct amdgpu_job, base)
1279 1280
1280static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) 1281static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
1281{ 1282{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 496ed2192eba..84d68d658f8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -183,7 +183,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
183 return -ENOMEM; 183 return -ENOMEM;
184 184
185 r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 185 r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
186 AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo); 186 AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
187 if (r) { 187 if (r) {
188 dev_err(rdev->dev, 188 dev_err(rdev->dev,
189 "failed to allocate BO for amdkfd (%d)\n", r); 189 "failed to allocate BO for amdkfd (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 98d59ee640ce..cd639c362df3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -79,7 +79,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
79 int time; 79 int time;
80 80
81 n = AMDGPU_BENCHMARK_ITERATIONS; 81 n = AMDGPU_BENCHMARK_ITERATIONS;
82 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj); 82 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
83 NULL, &sobj);
83 if (r) { 84 if (r) {
84 goto out_cleanup; 85 goto out_cleanup;
85 } 86 }
@@ -91,7 +92,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
91 if (r) { 92 if (r) {
92 goto out_cleanup; 93 goto out_cleanup;
93 } 94 }
94 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj); 95 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
96 NULL, &dobj);
95 if (r) { 97 if (r) {
96 goto out_cleanup; 98 goto out_cleanup;
97 } 99 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 6b1243f9f86d..8e995148f56e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -86,7 +86,7 @@ static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
86 86
87 struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages); 87 struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
88 ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false, 88 ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
89 AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo); 89 AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
90 if (ret) 90 if (ret)
91 return ret; 91 return ret;
92 ret = amdgpu_bo_reserve(bo, false); 92 ret = amdgpu_bo_reserve(bo, false);
@@ -197,7 +197,8 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
197 197
198 ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, 198 ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
199 true, domain, flags, 199 true, domain, flags,
200 NULL, &placement, &obj); 200 NULL, &placement, NULL,
201 &obj);
201 if (ret) { 202 if (ret) {
202 DRM_ERROR("(%d) bo create failed\n", ret); 203 DRM_ERROR("(%d) bo create failed\n", ret);
203 return ret; 204 return ret;
@@ -207,44 +208,6 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
207 return ret; 208 return ret;
208} 209}
209 210
210static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
211 cgs_handle_t *handle)
212{
213 CGS_FUNC_ADEV;
214 int r;
215 uint32_t dma_handle;
216 struct drm_gem_object *obj;
217 struct amdgpu_bo *bo;
218 struct drm_device *dev = adev->ddev;
219 struct drm_file *file_priv = NULL, *priv;
220
221 mutex_lock(&dev->struct_mutex);
222 list_for_each_entry(priv, &dev->filelist, lhead) {
223 rcu_read_lock();
224 if (priv->pid == get_pid(task_pid(current)))
225 file_priv = priv;
226 rcu_read_unlock();
227 if (file_priv)
228 break;
229 }
230 mutex_unlock(&dev->struct_mutex);
231 r = dev->driver->prime_fd_to_handle(dev,
232 file_priv, dmabuf_fd,
233 &dma_handle);
234 spin_lock(&file_priv->table_lock);
235
236 /* Check if we currently have a reference on the object */
237 obj = idr_find(&file_priv->object_idr, dma_handle);
238 if (obj == NULL) {
239 spin_unlock(&file_priv->table_lock);
240 return -EINVAL;
241 }
242 spin_unlock(&file_priv->table_lock);
243 bo = gem_to_amdgpu_bo(obj);
244 *handle = (cgs_handle_t)bo;
245 return 0;
246}
247
248static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) 211static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
249{ 212{
250 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 213 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@@ -809,7 +772,6 @@ static const struct cgs_ops amdgpu_cgs_ops = {
809}; 772};
810 773
811static const struct cgs_os_ops amdgpu_cgs_os_ops = { 774static const struct cgs_os_ops amdgpu_cgs_os_ops = {
812 amdgpu_cgs_import_gpu_mem,
813 amdgpu_cgs_add_irq_source, 775 amdgpu_cgs_add_irq_source,
814 amdgpu_cgs_irq_get, 776 amdgpu_cgs_irq_get,
815 amdgpu_cgs_irq_put 777 amdgpu_cgs_irq_put
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 3b355aeb62fd..cb3c274edb0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -154,42 +154,42 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
154{ 154{
155 union drm_amdgpu_cs *cs = data; 155 union drm_amdgpu_cs *cs = data;
156 uint64_t *chunk_array_user; 156 uint64_t *chunk_array_user;
157 uint64_t *chunk_array = NULL; 157 uint64_t *chunk_array;
158 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 158 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
159 unsigned size, i; 159 unsigned size;
160 int r = 0; 160 int i;
161 int ret;
161 162
162 if (!cs->in.num_chunks) 163 if (cs->in.num_chunks == 0)
163 goto out; 164 return 0;
165
166 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
167 if (!chunk_array)
168 return -ENOMEM;
164 169
165 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); 170 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
166 if (!p->ctx) { 171 if (!p->ctx) {
167 r = -EINVAL; 172 ret = -EINVAL;
168 goto out; 173 goto free_chunk;
169 } 174 }
175
170 p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); 176 p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
171 177
172 /* get chunks */ 178 /* get chunks */
173 INIT_LIST_HEAD(&p->validated); 179 INIT_LIST_HEAD(&p->validated);
174 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
175 if (chunk_array == NULL) {
176 r = -ENOMEM;
177 goto out;
178 }
179
180 chunk_array_user = (uint64_t __user *)(cs->in.chunks); 180 chunk_array_user = (uint64_t __user *)(cs->in.chunks);
181 if (copy_from_user(chunk_array, chunk_array_user, 181 if (copy_from_user(chunk_array, chunk_array_user,
182 sizeof(uint64_t)*cs->in.num_chunks)) { 182 sizeof(uint64_t)*cs->in.num_chunks)) {
183 r = -EFAULT; 183 ret = -EFAULT;
184 goto out; 184 goto put_bo_list;
185 } 185 }
186 186
187 p->nchunks = cs->in.num_chunks; 187 p->nchunks = cs->in.num_chunks;
188 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), 188 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
189 GFP_KERNEL); 189 GFP_KERNEL);
190 if (p->chunks == NULL) { 190 if (!p->chunks) {
191 r = -ENOMEM; 191 ret = -ENOMEM;
192 goto out; 192 goto put_bo_list;
193 } 193 }
194 194
195 for (i = 0; i < p->nchunks; i++) { 195 for (i = 0; i < p->nchunks; i++) {
@@ -200,8 +200,9 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
200 chunk_ptr = (void __user *)chunk_array[i]; 200 chunk_ptr = (void __user *)chunk_array[i];
201 if (copy_from_user(&user_chunk, chunk_ptr, 201 if (copy_from_user(&user_chunk, chunk_ptr,
202 sizeof(struct drm_amdgpu_cs_chunk))) { 202 sizeof(struct drm_amdgpu_cs_chunk))) {
203 r = -EFAULT; 203 ret = -EFAULT;
204 goto out; 204 i--;
205 goto free_partial_kdata;
205 } 206 }
206 p->chunks[i].chunk_id = user_chunk.chunk_id; 207 p->chunks[i].chunk_id = user_chunk.chunk_id;
207 p->chunks[i].length_dw = user_chunk.length_dw; 208 p->chunks[i].length_dw = user_chunk.length_dw;
@@ -212,13 +213,14 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
212 213
213 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); 214 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
214 if (p->chunks[i].kdata == NULL) { 215 if (p->chunks[i].kdata == NULL) {
215 r = -ENOMEM; 216 ret = -ENOMEM;
216 goto out; 217 i--;
218 goto free_partial_kdata;
217 } 219 }
218 size *= sizeof(uint32_t); 220 size *= sizeof(uint32_t);
219 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { 221 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
220 r = -EFAULT; 222 ret = -EFAULT;
221 goto out; 223 goto free_partial_kdata;
222 } 224 }
223 225
224 switch (p->chunks[i].chunk_id) { 226 switch (p->chunks[i].chunk_id) {
@@ -238,15 +240,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
238 gobj = drm_gem_object_lookup(p->adev->ddev, 240 gobj = drm_gem_object_lookup(p->adev->ddev,
239 p->filp, handle); 241 p->filp, handle);
240 if (gobj == NULL) { 242 if (gobj == NULL) {
241 r = -EINVAL; 243 ret = -EINVAL;
242 goto out; 244 goto free_partial_kdata;
243 } 245 }
244 246
245 p->uf.bo = gem_to_amdgpu_bo(gobj); 247 p->uf.bo = gem_to_amdgpu_bo(gobj);
246 p->uf.offset = fence_data->offset; 248 p->uf.offset = fence_data->offset;
247 } else { 249 } else {
248 r = -EINVAL; 250 ret = -EINVAL;
249 goto out; 251 goto free_partial_kdata;
250 } 252 }
251 break; 253 break;
252 254
@@ -254,19 +256,35 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
254 break; 256 break;
255 257
256 default: 258 default:
257 r = -EINVAL; 259 ret = -EINVAL;
258 goto out; 260 goto free_partial_kdata;
259 } 261 }
260 } 262 }
261 263
262 264
263 p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); 265 p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
264 if (!p->ibs) 266 if (!p->ibs) {
265 r = -ENOMEM; 267 ret = -ENOMEM;
268 goto free_all_kdata;
269 }
266 270
267out:
268 kfree(chunk_array); 271 kfree(chunk_array);
269 return r; 272 return 0;
273
274free_all_kdata:
275 i = p->nchunks - 1;
276free_partial_kdata:
277 for (; i >= 0; i--)
278 drm_free_large(p->chunks[i].kdata);
279 kfree(p->chunks);
280put_bo_list:
281 if (p->bo_list)
282 amdgpu_bo_list_put(p->bo_list);
283 amdgpu_ctx_put(p->ctx);
284free_chunk:
285 kfree(chunk_array);
286
287 return ret;
270} 288}
271 289
272/* Returns how many bytes TTM can move per IB. 290/* Returns how many bytes TTM can move per IB.
@@ -321,25 +339,17 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
321 return max(bytes_moved_threshold, 1024*1024ull); 339 return max(bytes_moved_threshold, 1024*1024ull);
322} 340}
323 341
324int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p) 342int amdgpu_cs_list_validate(struct amdgpu_device *adev,
343 struct amdgpu_vm *vm,
344 struct list_head *validated)
325{ 345{
326 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
327 struct amdgpu_vm *vm = &fpriv->vm;
328 struct amdgpu_device *adev = p->adev;
329 struct amdgpu_bo_list_entry *lobj; 346 struct amdgpu_bo_list_entry *lobj;
330 struct list_head duplicates;
331 struct amdgpu_bo *bo; 347 struct amdgpu_bo *bo;
332 u64 bytes_moved = 0, initial_bytes_moved; 348 u64 bytes_moved = 0, initial_bytes_moved;
333 u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev); 349 u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev);
334 int r; 350 int r;
335 351
336 INIT_LIST_HEAD(&duplicates); 352 list_for_each_entry(lobj, validated, tv.head) {
337 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
338 if (unlikely(r != 0)) {
339 return r;
340 }
341
342 list_for_each_entry(lobj, &p->validated, tv.head) {
343 bo = lobj->robj; 353 bo = lobj->robj;
344 if (!bo->pin_count) { 354 if (!bo->pin_count) {
345 u32 domain = lobj->prefered_domains; 355 u32 domain = lobj->prefered_domains;
@@ -373,7 +383,6 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
373 domain = lobj->allowed_domains; 383 domain = lobj->allowed_domains;
374 goto retry; 384 goto retry;
375 } 385 }
376 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
377 return r; 386 return r;
378 } 387 }
379 } 388 }
@@ -386,6 +395,7 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
386{ 395{
387 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 396 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
388 struct amdgpu_cs_buckets buckets; 397 struct amdgpu_cs_buckets buckets;
398 struct list_head duplicates;
389 bool need_mmap_lock = false; 399 bool need_mmap_lock = false;
390 int i, r; 400 int i, r;
391 401
@@ -405,8 +415,22 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
405 if (need_mmap_lock) 415 if (need_mmap_lock)
406 down_read(&current->mm->mmap_sem); 416 down_read(&current->mm->mmap_sem);
407 417
408 r = amdgpu_cs_list_validate(p); 418 INIT_LIST_HEAD(&duplicates);
419 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
420 if (unlikely(r != 0))
421 goto error_reserve;
422
423 r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated);
424 if (r)
425 goto error_validate;
426
427 r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates);
428
429error_validate:
430 if (r)
431 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
409 432
433error_reserve:
410 if (need_mmap_lock) 434 if (need_mmap_lock)
411 up_read(&current->mm->mmap_sem); 435 up_read(&current->mm->mmap_sem);
412 436
@@ -772,15 +796,15 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
772 return 0; 796 return 0;
773} 797}
774 798
775static int amdgpu_cs_free_job(struct amdgpu_job *sched_job) 799static int amdgpu_cs_free_job(struct amdgpu_job *job)
776{ 800{
777 int i; 801 int i;
778 if (sched_job->ibs) 802 if (job->ibs)
779 for (i = 0; i < sched_job->num_ibs; i++) 803 for (i = 0; i < job->num_ibs; i++)
780 amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]); 804 amdgpu_ib_free(job->adev, &job->ibs[i]);
781 kfree(sched_job->ibs); 805 kfree(job->ibs);
782 if (sched_job->uf.bo) 806 if (job->uf.bo)
783 drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base); 807 drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base);
784 return 0; 808 return 0;
785} 809}
786 810
@@ -804,7 +828,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
804 r = amdgpu_cs_parser_init(parser, data); 828 r = amdgpu_cs_parser_init(parser, data);
805 if (r) { 829 if (r) {
806 DRM_ERROR("Failed to initialize parser !\n"); 830 DRM_ERROR("Failed to initialize parser !\n");
807 amdgpu_cs_parser_fini(parser, r, false); 831 kfree(parser);
808 up_read(&adev->exclusive_lock); 832 up_read(&adev->exclusive_lock);
809 r = amdgpu_cs_handle_lockup(adev, r); 833 r = amdgpu_cs_handle_lockup(adev, r);
810 return r; 834 return r;
@@ -842,7 +866,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
842 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 866 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
843 if (!job) 867 if (!job)
844 return -ENOMEM; 868 return -ENOMEM;
845 job->base.sched = ring->scheduler; 869 job->base.sched = &ring->sched;
846 job->base.s_entity = &parser->ctx->rings[ring->idx].entity; 870 job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
847 job->adev = parser->adev; 871 job->adev = parser->adev;
848 job->ibs = parser->ibs; 872 job->ibs = parser->ibs;
@@ -857,7 +881,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
857 881
858 job->free_job = amdgpu_cs_free_job; 882 job->free_job = amdgpu_cs_free_job;
859 mutex_lock(&job->job_lock); 883 mutex_lock(&job->job_lock);
860 r = amd_sched_entity_push_job((struct amd_sched_job *)job); 884 r = amd_sched_entity_push_job(&job->base);
861 if (r) { 885 if (r) {
862 mutex_unlock(&job->job_lock); 886 mutex_unlock(&job->job_lock);
863 amdgpu_cs_free_job(job); 887 amdgpu_cs_free_job(job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 20cbc4eb5a6f..e0b80ccdfe8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,10 +43,10 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
43 for (i = 0; i < adev->num_rings; i++) { 43 for (i = 0; i < adev->num_rings; i++) {
44 struct amd_sched_rq *rq; 44 struct amd_sched_rq *rq;
45 if (kernel) 45 if (kernel)
46 rq = &adev->rings[i]->scheduler->kernel_rq; 46 rq = &adev->rings[i]->sched.kernel_rq;
47 else 47 else
48 rq = &adev->rings[i]->scheduler->sched_rq; 48 rq = &adev->rings[i]->sched.sched_rq;
49 r = amd_sched_entity_init(adev->rings[i]->scheduler, 49 r = amd_sched_entity_init(&adev->rings[i]->sched,
50 &ctx->rings[i].entity, 50 &ctx->rings[i].entity,
51 rq, amdgpu_sched_jobs); 51 rq, amdgpu_sched_jobs);
52 if (r) 52 if (r)
@@ -55,7 +55,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
55 55
56 if (i < adev->num_rings) { 56 if (i < adev->num_rings) {
57 for (j = 0; j < i; j++) 57 for (j = 0; j < i; j++)
58 amd_sched_entity_fini(adev->rings[j]->scheduler, 58 amd_sched_entity_fini(&adev->rings[j]->sched,
59 &ctx->rings[j].entity); 59 &ctx->rings[j].entity);
60 kfree(ctx); 60 kfree(ctx);
61 return r; 61 return r;
@@ -75,7 +75,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
75 75
76 if (amdgpu_enable_scheduler) { 76 if (amdgpu_enable_scheduler) {
77 for (i = 0; i < adev->num_rings; i++) 77 for (i = 0; i < adev->num_rings; i++)
78 amd_sched_entity_fini(adev->rings[i]->scheduler, 78 amd_sched_entity_fini(&adev->rings[i]->sched,
79 &ctx->rings[i].entity); 79 &ctx->rings[i].entity);
80 } 80 }
81} 81}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6ff6ae945794..6068d8207d10 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -246,7 +246,7 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
246 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, 246 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
247 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 247 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
248 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 248 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
249 NULL, &adev->vram_scratch.robj); 249 NULL, NULL, &adev->vram_scratch.robj);
250 if (r) { 250 if (r) {
251 return r; 251 return r;
252 } 252 }
@@ -449,7 +449,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
449 449
450 if (adev->wb.wb_obj == NULL) { 450 if (adev->wb.wb_obj == NULL) {
451 r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true, 451 r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
452 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, &adev->wb.wb_obj); 452 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
453 &adev->wb.wb_obj);
453 if (r) { 454 if (r) {
454 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); 455 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
455 return r; 456 return r;
@@ -1650,9 +1651,11 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1650 drm_kms_helper_poll_disable(dev); 1651 drm_kms_helper_poll_disable(dev);
1651 1652
1652 /* turn off display hw */ 1653 /* turn off display hw */
1654 drm_modeset_lock_all(dev);
1653 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1655 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1654 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 1656 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1655 } 1657 }
1658 drm_modeset_unlock_all(dev);
1656 1659
1657 /* unpin the front buffers */ 1660 /* unpin the front buffers */
1658 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1661 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1747,9 +1750,11 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1747 if (fbcon) { 1750 if (fbcon) {
1748 drm_helper_resume_force_mode(dev); 1751 drm_helper_resume_force_mode(dev);
1749 /* turn on display hw */ 1752 /* turn on display hw */
1753 drm_modeset_lock_all(dev);
1750 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1754 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1751 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 1755 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1752 } 1756 }
1757 drm_modeset_unlock_all(dev);
1753 } 1758 }
1754 1759
1755 drm_kms_helper_poll_enable(dev); 1760 drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0fcc0bd1622c..adb48353f2e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -79,6 +79,7 @@ int amdgpu_exp_hw_support = 0;
79int amdgpu_enable_scheduler = 0; 79int amdgpu_enable_scheduler = 0;
80int amdgpu_sched_jobs = 16; 80int amdgpu_sched_jobs = 16;
81int amdgpu_sched_hw_submission = 2; 81int amdgpu_sched_hw_submission = 2;
82int amdgpu_enable_semaphores = 1;
82 83
83MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); 84MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
84module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); 85module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -152,6 +153,9 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
152MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); 153MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
153module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); 154module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
154 155
156MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable (default), 0 = disable)");
157module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644);
158
155static struct pci_device_id pciidlist[] = { 159static struct pci_device_id pciidlist[] = {
156#ifdef CONFIG_DRM_AMDGPU_CIK 160#ifdef CONFIG_DRM_AMDGPU_CIK
157 /* Kaveri */ 161 /* Kaveri */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 1be2bd6d07ea..b3fc26c59787 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -609,9 +609,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
609 * Init the fence driver for the requested ring (all asics). 609 * Init the fence driver for the requested ring (all asics).
610 * Helper function for amdgpu_fence_driver_init(). 610 * Helper function for amdgpu_fence_driver_init().
611 */ 611 */
612void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) 612int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
613{ 613{
614 int i; 614 int i, r;
615 615
616 ring->fence_drv.cpu_addr = NULL; 616 ring->fence_drv.cpu_addr = NULL;
617 ring->fence_drv.gpu_addr = 0; 617 ring->fence_drv.gpu_addr = 0;
@@ -625,15 +625,19 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
625 amdgpu_fence_check_lockup); 625 amdgpu_fence_check_lockup);
626 ring->fence_drv.ring = ring; 626 ring->fence_drv.ring = ring;
627 627
628 init_waitqueue_head(&ring->fence_drv.fence_queue);
629
628 if (amdgpu_enable_scheduler) { 630 if (amdgpu_enable_scheduler) {
629 ring->scheduler = amd_sched_create(&amdgpu_sched_ops, 631 r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
630 ring->idx, 632 amdgpu_sched_hw_submission, ring->name);
631 amdgpu_sched_hw_submission, 633 if (r) {
632 (void *)ring->adev); 634 DRM_ERROR("Failed to create scheduler on ring %s.\n",
633 if (!ring->scheduler) 635 ring->name);
634 DRM_ERROR("Failed to create scheduler on ring %d.\n", 636 return r;
635 ring->idx); 637 }
636 } 638 }
639
640 return 0;
637} 641}
638 642
639/** 643/**
@@ -681,8 +685,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
681 wake_up_all(&ring->fence_drv.fence_queue); 685 wake_up_all(&ring->fence_drv.fence_queue);
682 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 686 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
683 ring->fence_drv.irq_type); 687 ring->fence_drv.irq_type);
684 if (ring->scheduler) 688 amd_sched_fini(&ring->sched);
685 amd_sched_destroy(ring->scheduler);
686 ring->fence_drv.initialized = false; 689 ring->fence_drv.initialized = false;
687 } 690 }
688 mutex_unlock(&adev->ring_lock); 691 mutex_unlock(&adev->ring_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index cbd3a486c5c2..7312d729d300 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -127,7 +127,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
127 r = amdgpu_bo_create(adev, adev->gart.table_size, 127 r = amdgpu_bo_create(adev, adev->gart.table_size,
128 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 128 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
129 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 129 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
130 NULL, &adev->gart.robj); 130 NULL, NULL, &adev->gart.robj);
131 if (r) { 131 if (r) {
132 return r; 132 return r;
133 } 133 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 5839fab374bf..7297ca3a0ba7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -69,7 +69,8 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
69 } 69 }
70 } 70 }
71retry: 71retry:
72 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, &robj); 72 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
73 flags, NULL, NULL, &robj);
73 if (r) { 74 if (r) {
74 if (r != -ERESTARTSYS) { 75 if (r != -ERESTARTSYS) {
75 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { 76 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
@@ -426,6 +427,10 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
426 &args->data.data_size_bytes, 427 &args->data.data_size_bytes,
427 &args->data.flags); 428 &args->data.flags);
428 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) { 429 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
430 if (args->data.data_size_bytes > sizeof(args->data.data)) {
431 r = -EINVAL;
432 goto unreserve;
433 }
429 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); 434 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
430 if (!r) 435 if (!r)
431 r = amdgpu_bo_set_metadata(robj, args->data.data, 436 r = amdgpu_bo_set_metadata(robj, args->data.data,
@@ -433,6 +438,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
433 args->data.flags); 438 args->data.flags);
434 } 439 }
435 440
441unreserve:
436 amdgpu_bo_unreserve(robj); 442 amdgpu_bo_unreserve(robj);
437out: 443out:
438 drm_gem_object_unreference_unlocked(gobj); 444 drm_gem_object_unreference_unlocked(gobj);
@@ -454,11 +460,12 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
454 struct ttm_validate_buffer tv, *entry; 460 struct ttm_validate_buffer tv, *entry;
455 struct amdgpu_bo_list_entry *vm_bos; 461 struct amdgpu_bo_list_entry *vm_bos;
456 struct ww_acquire_ctx ticket; 462 struct ww_acquire_ctx ticket;
457 struct list_head list; 463 struct list_head list, duplicates;
458 unsigned domain; 464 unsigned domain;
459 int r; 465 int r;
460 466
461 INIT_LIST_HEAD(&list); 467 INIT_LIST_HEAD(&list);
468 INIT_LIST_HEAD(&duplicates);
462 469
463 tv.bo = &bo_va->bo->tbo; 470 tv.bo = &bo_va->bo->tbo;
464 tv.shared = true; 471 tv.shared = true;
@@ -468,7 +475,8 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
468 if (!vm_bos) 475 if (!vm_bos)
469 return; 476 return;
470 477
471 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 478 /* Provide duplicates to avoid -EALREADY */
479 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
472 if (r) 480 if (r)
473 goto error_free; 481 goto error_free;
474 482
@@ -651,7 +659,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
651 int r; 659 int r;
652 660
653 args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); 661 args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
654 args->size = args->pitch * args->height; 662 args->size = (u64)args->pitch * args->height;
655 args->size = ALIGN(args->size, PAGE_SIZE); 663 args->size = ALIGN(args->size, PAGE_SIZE);
656 664
657 r = amdgpu_gem_object_create(adev, args->size, 0, 665 r = amdgpu_gem_object_create(adev, args->size, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 5c8a803acedc..534fc04e80fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -43,7 +43,7 @@ static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev)
43 r = amdgpu_bo_create(adev, adev->irq.ih.ring_size, 43 r = amdgpu_bo_create(adev, adev->irq.ih.ring_size,
44 PAGE_SIZE, true, 44 PAGE_SIZE, true,
45 AMDGPU_GEM_DOMAIN_GTT, 0, 45 AMDGPU_GEM_DOMAIN_GTT, 0,
46 NULL, &adev->irq.ih.ring_obj); 46 NULL, NULL, &adev->irq.ih.ring_obj);
47 if (r) { 47 if (r) {
48 DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r); 48 DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
49 return r; 49 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 0aba8e9bc8a0..7c42ff670080 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -140,7 +140,7 @@ void amdgpu_irq_preinstall(struct drm_device *dev)
140 */ 140 */
141int amdgpu_irq_postinstall(struct drm_device *dev) 141int amdgpu_irq_postinstall(struct drm_device *dev)
142{ 142{
143 dev->max_vblank_count = 0x001fffff; 143 dev->max_vblank_count = 0x00ffffff;
144 return 0; 144 return 0;
145} 145}
146 146
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 22367939ebf1..8c735f544b66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -390,7 +390,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
390 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; 390 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
391 } 391 }
392 case AMDGPU_INFO_READ_MMR_REG: { 392 case AMDGPU_INFO_READ_MMR_REG: {
393 unsigned n, alloc_size = info->read_mmr_reg.count * 4; 393 unsigned n, alloc_size;
394 uint32_t *regs; 394 uint32_t *regs;
395 unsigned se_num = (info->read_mmr_reg.instance >> 395 unsigned se_num = (info->read_mmr_reg.instance >>
396 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & 396 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
@@ -406,9 +406,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
406 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) 406 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
407 sh_num = 0xffffffff; 407 sh_num = 0xffffffff;
408 408
409 regs = kmalloc(alloc_size, GFP_KERNEL); 409 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
410 if (!regs) 410 if (!regs)
411 return -ENOMEM; 411 return -ENOMEM;
412 alloc_size = info->read_mmr_reg.count * sizeof(*regs);
412 413
413 for (i = 0; i < info->read_mmr_reg.count; i++) 414 for (i = 0; i < info->read_mmr_reg.count; i++)
414 if (amdgpu_asic_read_register(adev, se_num, sh_num, 415 if (amdgpu_asic_read_register(adev, se_num, sh_num,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 08b09d55b96f..1a7708f365f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -215,6 +215,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
215 bool kernel, u32 domain, u64 flags, 215 bool kernel, u32 domain, u64 flags,
216 struct sg_table *sg, 216 struct sg_table *sg,
217 struct ttm_placement *placement, 217 struct ttm_placement *placement,
218 struct reservation_object *resv,
218 struct amdgpu_bo **bo_ptr) 219 struct amdgpu_bo **bo_ptr)
219{ 220{
220 struct amdgpu_bo *bo; 221 struct amdgpu_bo *bo;
@@ -261,7 +262,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
261 /* Kernel allocation are uninterruptible */ 262 /* Kernel allocation are uninterruptible */
262 r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, 263 r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
263 &bo->placement, page_align, !kernel, NULL, 264 &bo->placement, page_align, !kernel, NULL,
264 acc_size, sg, NULL, &amdgpu_ttm_bo_destroy); 265 acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
265 if (unlikely(r != 0)) { 266 if (unlikely(r != 0)) {
266 return r; 267 return r;
267 } 268 }
@@ -275,7 +276,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
275int amdgpu_bo_create(struct amdgpu_device *adev, 276int amdgpu_bo_create(struct amdgpu_device *adev,
276 unsigned long size, int byte_align, 277 unsigned long size, int byte_align,
277 bool kernel, u32 domain, u64 flags, 278 bool kernel, u32 domain, u64 flags,
278 struct sg_table *sg, struct amdgpu_bo **bo_ptr) 279 struct sg_table *sg,
280 struct reservation_object *resv,
281 struct amdgpu_bo **bo_ptr)
279{ 282{
280 struct ttm_placement placement = {0}; 283 struct ttm_placement placement = {0};
281 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; 284 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
@@ -286,11 +289,9 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
286 amdgpu_ttm_placement_init(adev, &placement, 289 amdgpu_ttm_placement_init(adev, &placement,
287 placements, domain, flags); 290 placements, domain, flags);
288 291
289 return amdgpu_bo_create_restricted(adev, size, byte_align, 292 return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
290 kernel, domain, flags, 293 domain, flags, sg, &placement,
291 sg, 294 resv, bo_ptr);
292 &placement,
293 bo_ptr);
294} 295}
295 296
296int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) 297int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
@@ -535,12 +536,10 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
535 if (metadata == NULL) 536 if (metadata == NULL)
536 return -EINVAL; 537 return -EINVAL;
537 538
538 buffer = kzalloc(metadata_size, GFP_KERNEL); 539 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
539 if (buffer == NULL) 540 if (buffer == NULL)
540 return -ENOMEM; 541 return -ENOMEM;
541 542
542 memcpy(buffer, metadata, metadata_size);
543
544 kfree(bo->metadata); 543 kfree(bo->metadata);
545 bo->metadata_flags = flags; 544 bo->metadata_flags = flags;
546 bo->metadata = buffer; 545 bo->metadata = buffer;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 6ea18dcec561..3c2ff4567798 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -129,12 +129,14 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
129 unsigned long size, int byte_align, 129 unsigned long size, int byte_align,
130 bool kernel, u32 domain, u64 flags, 130 bool kernel, u32 domain, u64 flags,
131 struct sg_table *sg, 131 struct sg_table *sg,
132 struct reservation_object *resv,
132 struct amdgpu_bo **bo_ptr); 133 struct amdgpu_bo **bo_ptr);
133int amdgpu_bo_create_restricted(struct amdgpu_device *adev, 134int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
134 unsigned long size, int byte_align, 135 unsigned long size, int byte_align,
135 bool kernel, u32 domain, u64 flags, 136 bool kernel, u32 domain, u64 flags,
136 struct sg_table *sg, 137 struct sg_table *sg,
137 struct ttm_placement *placement, 138 struct ttm_placement *placement,
139 struct reservation_object *resv,
138 struct amdgpu_bo **bo_ptr); 140 struct amdgpu_bo **bo_ptr);
139int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); 141int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
140void amdgpu_bo_kunmap(struct amdgpu_bo *bo); 142void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index d9652fe32d6a..59f735a933a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -61,12 +61,15 @@ struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
61 struct dma_buf_attachment *attach, 61 struct dma_buf_attachment *attach,
62 struct sg_table *sg) 62 struct sg_table *sg)
63{ 63{
64 struct reservation_object *resv = attach->dmabuf->resv;
64 struct amdgpu_device *adev = dev->dev_private; 65 struct amdgpu_device *adev = dev->dev_private;
65 struct amdgpu_bo *bo; 66 struct amdgpu_bo *bo;
66 int ret; 67 int ret;
67 68
69 ww_mutex_lock(&resv->lock, NULL);
68 ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, 70 ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
69 AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo); 71 AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
72 ww_mutex_unlock(&resv->lock);
70 if (ret) 73 if (ret)
71 return ERR_PTR(ret); 74 return ERR_PTR(ret);
72 75
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 9bec91484c24..30dce235ddeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -357,11 +357,11 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
357 ring->adev = adev; 357 ring->adev = adev;
358 ring->idx = adev->num_rings++; 358 ring->idx = adev->num_rings++;
359 adev->rings[ring->idx] = ring; 359 adev->rings[ring->idx] = ring;
360 amdgpu_fence_driver_init_ring(ring); 360 r = amdgpu_fence_driver_init_ring(ring);
361 if (r)
362 return r;
361 } 363 }
362 364
363 init_waitqueue_head(&ring->fence_drv.fence_queue);
364
365 r = amdgpu_wb_get(adev, &ring->rptr_offs); 365 r = amdgpu_wb_get(adev, &ring->rptr_offs);
366 if (r) { 366 if (r) {
367 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); 367 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
@@ -407,7 +407,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
407 if (ring->ring_obj == NULL) { 407 if (ring->ring_obj == NULL) {
408 r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true, 408 r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
409 AMDGPU_GEM_DOMAIN_GTT, 0, 409 AMDGPU_GEM_DOMAIN_GTT, 0,
410 NULL, &ring->ring_obj); 410 NULL, NULL, &ring->ring_obj);
411 if (r) { 411 if (r) {
412 dev_err(adev->dev, "(%d) ring create failed\n", r); 412 dev_err(adev->dev, "(%d) ring create failed\n", r);
413 return r; 413 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 74dad270362c..e90712443fe9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -64,8 +64,8 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
64 INIT_LIST_HEAD(&sa_manager->flist[i]); 64 INIT_LIST_HEAD(&sa_manager->flist[i]);
65 } 65 }
66 66
67 r = amdgpu_bo_create(adev, size, align, true, 67 r = amdgpu_bo_create(adev, size, align, true, domain,
68 domain, 0, NULL, &sa_manager->bo); 68 0, NULL, NULL, &sa_manager->bo);
69 if (r) { 69 if (r) {
70 dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); 70 dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
71 return r; 71 return r;
@@ -145,8 +145,13 @@ static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
145 struct amd_sched_fence *s_fence; 145 struct amd_sched_fence *s_fence;
146 146
147 s_fence = to_amd_sched_fence(f); 147 s_fence = to_amd_sched_fence(f);
148 if (s_fence) 148 if (s_fence) {
149 return s_fence->scheduler->ring_id; 149 struct amdgpu_ring *ring;
150
151 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
152 return ring->idx;
153 }
154
150 a_fence = to_amdgpu_fence(f); 155 a_fence = to_amdgpu_fence(f);
151 if (a_fence) 156 if (a_fence)
152 return a_fence->ring->idx; 157 return a_fence->ring->idx;
@@ -412,6 +417,26 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
412} 417}
413 418
414#if defined(CONFIG_DEBUG_FS) 419#if defined(CONFIG_DEBUG_FS)
420
421static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
422{
423 struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
424 struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
425
426 if (a_fence)
427 seq_printf(m, " protected by 0x%016llx on ring %d",
428 a_fence->seq, a_fence->ring->idx);
429
430 if (s_fence) {
431 struct amdgpu_ring *ring;
432
433
434 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
435 seq_printf(m, " protected by 0x%016x on ring %d",
436 s_fence->base.seqno, ring->idx);
437 }
438}
439
415void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, 440void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
416 struct seq_file *m) 441 struct seq_file *m)
417{ 442{
@@ -428,18 +453,8 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
428 } 453 }
429 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", 454 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
430 soffset, eoffset, eoffset - soffset); 455 soffset, eoffset, eoffset - soffset);
431 if (i->fence) { 456 if (i->fence)
432 struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence); 457 amdgpu_sa_bo_dump_fence(i->fence, m);
433 struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence);
434 if (a_fence)
435 seq_printf(m, " protected by 0x%016llx on ring %d",
436 a_fence->seq, a_fence->ring->idx);
437 if (s_fence)
438 seq_printf(m, " protected by 0x%016x on ring %d",
439 s_fence->base.seqno,
440 s_fence->scheduler->ring_id);
441
442 }
443 seq_printf(m, "\n"); 458 seq_printf(m, "\n");
444 } 459 }
445 spin_unlock(&sa_manager->wq.lock); 460 spin_unlock(&sa_manager->wq.lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index de98fbd2971e..2e946b2cad88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -27,63 +27,48 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include "amdgpu.h" 28#include "amdgpu.h"
29 29
30static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job) 30static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
31{ 31{
32 struct amdgpu_job *sched_job = (struct amdgpu_job *)job; 32 struct amdgpu_job *job = to_amdgpu_job(sched_job);
33 return amdgpu_sync_get_fence(&sched_job->ibs->sync); 33 return amdgpu_sync_get_fence(&job->ibs->sync);
34} 34}
35 35
36static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job) 36static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
37{ 37{
38 struct amdgpu_job *sched_job; 38 struct amdgpu_fence *fence = NULL;
39 struct amdgpu_fence *fence; 39 struct amdgpu_job *job;
40 int r; 40 int r;
41 41
42 if (!job) { 42 if (!sched_job) {
43 DRM_ERROR("job is null\n"); 43 DRM_ERROR("job is null\n");
44 return NULL; 44 return NULL;
45 } 45 }
46 sched_job = (struct amdgpu_job *)job; 46 job = to_amdgpu_job(sched_job);
47 mutex_lock(&sched_job->job_lock); 47 mutex_lock(&job->job_lock);
48 r = amdgpu_ib_schedule(sched_job->adev, 48 r = amdgpu_ib_schedule(job->adev,
49 sched_job->num_ibs, 49 job->num_ibs,
50 sched_job->ibs, 50 job->ibs,
51 sched_job->base.owner); 51 job->base.owner);
52 if (r) 52 if (r) {
53 DRM_ERROR("Error scheduling IBs (%d)\n", r);
53 goto err; 54 goto err;
54 fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence); 55 }
55
56 if (sched_job->free_job)
57 sched_job->free_job(sched_job);
58 56
59 mutex_unlock(&sched_job->job_lock); 57 fence = amdgpu_fence_ref(job->ibs[job->num_ibs - 1].fence);
60 return &fence->base;
61 58
62err: 59err:
63 DRM_ERROR("Run job error\n"); 60 if (job->free_job)
64 mutex_unlock(&sched_job->job_lock); 61 job->free_job(job);
65 job->sched->ops->process_job(job);
66 return NULL;
67}
68 62
69static void amdgpu_sched_process_job(struct amd_sched_job *job) 63 mutex_unlock(&job->job_lock);
70{ 64 fence_put(&job->base.s_fence->base);
71 struct amdgpu_job *sched_job; 65 kfree(job);
72 66 return fence ? &fence->base : NULL;
73 if (!job) {
74 DRM_ERROR("job is null\n");
75 return;
76 }
77 sched_job = (struct amdgpu_job *)job;
78 /* after processing job, free memory */
79 fence_put(&sched_job->base.s_fence->base);
80 kfree(sched_job);
81} 67}
82 68
83struct amd_sched_backend_ops amdgpu_sched_ops = { 69struct amd_sched_backend_ops amdgpu_sched_ops = {
84 .dependency = amdgpu_sched_dependency, 70 .dependency = amdgpu_sched_dependency,
85 .run_job = amdgpu_sched_run_job, 71 .run_job = amdgpu_sched_run_job,
86 .process_job = amdgpu_sched_process_job
87}; 72};
88 73
89int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, 74int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
@@ -100,7 +85,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
100 kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 85 kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
101 if (!job) 86 if (!job)
102 return -ENOMEM; 87 return -ENOMEM;
103 job->base.sched = ring->scheduler; 88 job->base.sched = &ring->sched;
104 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; 89 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
105 job->adev = adev; 90 job->adev = adev;
106 job->ibs = ibs; 91 job->ibs = ibs;
@@ -109,7 +94,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
109 mutex_init(&job->job_lock); 94 mutex_init(&job->job_lock);
110 job->free_job = free_job; 95 job->free_job = free_job;
111 mutex_lock(&job->job_lock); 96 mutex_lock(&job->job_lock);
112 r = amd_sched_entity_push_job((struct amd_sched_job *)job); 97 r = amd_sched_entity_push_job(&job->base);
113 if (r) { 98 if (r) {
114 mutex_unlock(&job->job_lock); 99 mutex_unlock(&job->job_lock);
115 kfree(job); 100 kfree(job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 068aeaff7183..4921de15b451 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -65,8 +65,14 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
65 65
66 if (a_fence) 66 if (a_fence)
67 return a_fence->ring->adev == adev; 67 return a_fence->ring->adev == adev;
68 if (s_fence) 68
69 return (struct amdgpu_device *)s_fence->scheduler->priv == adev; 69 if (s_fence) {
70 struct amdgpu_ring *ring;
71
72 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
73 return ring->adev == adev;
74 }
75
70 return false; 76 return false;
71} 77}
72 78
@@ -251,6 +257,20 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
251 fence_put(e->fence); 257 fence_put(e->fence);
252 kfree(e); 258 kfree(e);
253 } 259 }
260
261 if (amdgpu_enable_semaphores)
262 return 0;
263
264 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
265 struct amdgpu_fence *fence = sync->sync_to[i];
266 if (!fence)
267 continue;
268
269 r = fence_wait(&fence->base, false);
270 if (r)
271 return r;
272 }
273
254 return 0; 274 return 0;
255} 275}
256 276
@@ -285,7 +305,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
285 return -EINVAL; 305 return -EINVAL;
286 } 306 }
287 307
288 if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) { 308 if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores ||
309 (count >= AMDGPU_NUM_SYNCS)) {
289 /* not enough room, wait manually */ 310 /* not enough room, wait manually */
290 r = fence_wait(&fence->base, false); 311 r = fence_wait(&fence->base, false);
291 if (r) 312 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index f80b1a43be8a..4865615e9c06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -59,8 +59,9 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
59 goto out_cleanup; 59 goto out_cleanup;
60 } 60 }
61 61
62 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, 62 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
63 NULL, &vram_obj); 63 AMDGPU_GEM_DOMAIN_VRAM, 0,
64 NULL, NULL, &vram_obj);
64 if (r) { 65 if (r) {
65 DRM_ERROR("Failed to create VRAM object\n"); 66 DRM_ERROR("Failed to create VRAM object\n");
66 goto out_cleanup; 67 goto out_cleanup;
@@ -80,7 +81,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
80 struct fence *fence = NULL; 81 struct fence *fence = NULL;
81 82
82 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, 83 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
83 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i); 84 AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
85 NULL, gtt_obj + i);
84 if (r) { 86 if (r) {
85 DRM_ERROR("Failed to create GTT object %d\n", i); 87 DRM_ERROR("Failed to create GTT object %d\n", i);
86 goto out_lclean; 88 goto out_lclean;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b5abd5cde413..364cbe975332 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -861,7 +861,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
861 r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, 861 r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
862 AMDGPU_GEM_DOMAIN_VRAM, 862 AMDGPU_GEM_DOMAIN_VRAM,
863 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 863 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
864 NULL, &adev->stollen_vga_memory); 864 NULL, NULL, &adev->stollen_vga_memory);
865 if (r) { 865 if (r) {
866 return r; 866 return r;
867 } 867 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 482e66797ae6..5cc95f1a7dab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -247,7 +247,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
247 const struct common_firmware_header *header = NULL; 247 const struct common_firmware_header *header = NULL;
248 248
249 err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, 249 err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
250 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, bo); 250 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
251 if (err) { 251 if (err) {
252 dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); 252 dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
253 err = -ENOMEM; 253 err = -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2cf6c6b06e3b..d0312364d950 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -156,7 +156,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
156 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, 156 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
157 AMDGPU_GEM_DOMAIN_VRAM, 157 AMDGPU_GEM_DOMAIN_VRAM,
158 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 158 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
159 NULL, &adev->uvd.vcpu_bo); 159 NULL, NULL, &adev->uvd.vcpu_bo);
160 if (r) { 160 if (r) {
161 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); 161 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
162 return r; 162 return r;
@@ -543,46 +543,60 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
543 return -EINVAL; 543 return -EINVAL;
544 } 544 }
545 545
546 if (msg_type == 1) { 546 switch (msg_type) {
547 case 0:
548 /* it's a create msg, calc image size (width * height) */
549 amdgpu_bo_kunmap(bo);
550
551 /* try to alloc a new handle */
552 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
553 if (atomic_read(&adev->uvd.handles[i]) == handle) {
554 DRM_ERROR("Handle 0x%x already in use!\n", handle);
555 return -EINVAL;
556 }
557
558 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
559 adev->uvd.filp[i] = ctx->parser->filp;
560 return 0;
561 }
562 }
563
564 DRM_ERROR("No more free UVD handles!\n");
565 return -EINVAL;
566
567 case 1:
547 /* it's a decode msg, calc buffer sizes */ 568 /* it's a decode msg, calc buffer sizes */
548 r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes); 569 r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes);
549 amdgpu_bo_kunmap(bo); 570 amdgpu_bo_kunmap(bo);
550 if (r) 571 if (r)
551 return r; 572 return r;
552 573
553 } else if (msg_type == 2) { 574 /* validate the handle */
575 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
576 if (atomic_read(&adev->uvd.handles[i]) == handle) {
577 if (adev->uvd.filp[i] != ctx->parser->filp) {
578 DRM_ERROR("UVD handle collision detected!\n");
579 return -EINVAL;
580 }
581 return 0;
582 }
583 }
584
585 DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
586 return -ENOENT;
587
588 case 2:
554 /* it's a destroy msg, free the handle */ 589 /* it's a destroy msg, free the handle */
555 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) 590 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
556 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); 591 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
557 amdgpu_bo_kunmap(bo); 592 amdgpu_bo_kunmap(bo);
558 return 0; 593 return 0;
559 } else {
560 /* it's a create msg */
561 amdgpu_bo_kunmap(bo);
562
563 if (msg_type != 0) {
564 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
565 return -EINVAL;
566 }
567
568 /* it's a create msg, no special handling needed */
569 }
570
571 /* create or decode, validate the handle */
572 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
573 if (atomic_read(&adev->uvd.handles[i]) == handle)
574 return 0;
575 }
576 594
577 /* handle not found try to alloc a new one */ 595 default:
578 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { 596 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
579 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { 597 return -EINVAL;
580 adev->uvd.filp[i] = ctx->parser->filp;
581 return 0;
582 }
583 } 598 }
584 599 BUG();
585 DRM_ERROR("No more free UVD handles!\n");
586 return -EINVAL; 600 return -EINVAL;
587} 601}
588 602
@@ -805,10 +819,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
805} 819}
806 820
807static int amdgpu_uvd_free_job( 821static int amdgpu_uvd_free_job(
808 struct amdgpu_job *sched_job) 822 struct amdgpu_job *job)
809{ 823{
810 amdgpu_ib_free(sched_job->adev, sched_job->ibs); 824 amdgpu_ib_free(job->adev, job->ibs);
811 kfree(sched_job->ibs); 825 kfree(job->ibs);
812 return 0; 826 return 0;
813} 827}
814 828
@@ -905,7 +919,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
905 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, 919 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
906 AMDGPU_GEM_DOMAIN_VRAM, 920 AMDGPU_GEM_DOMAIN_VRAM,
907 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 921 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
908 NULL, &bo); 922 NULL, NULL, &bo);
909 if (r) 923 if (r)
910 return r; 924 return r;
911 925
@@ -954,7 +968,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
954 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, 968 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
955 AMDGPU_GEM_DOMAIN_VRAM, 969 AMDGPU_GEM_DOMAIN_VRAM,
956 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 970 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
957 NULL, &bo); 971 NULL, NULL, &bo);
958 if (r) 972 if (r)
959 return r; 973 return r;
960 974
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 3cab96c42aa8..74f2038ac747 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -143,7 +143,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
143 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, 143 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
144 AMDGPU_GEM_DOMAIN_VRAM, 144 AMDGPU_GEM_DOMAIN_VRAM,
145 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 145 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
146 NULL, &adev->vce.vcpu_bo); 146 NULL, NULL, &adev->vce.vcpu_bo);
147 if (r) { 147 if (r) {
148 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); 148 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
149 return r; 149 return r;
@@ -342,10 +342,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
342} 342}
343 343
344static int amdgpu_vce_free_job( 344static int amdgpu_vce_free_job(
345 struct amdgpu_job *sched_job) 345 struct amdgpu_job *job)
346{ 346{
347 amdgpu_ib_free(sched_job->adev, sched_job->ibs); 347 amdgpu_ib_free(job->adev, job->ibs);
348 kfree(sched_job->ibs); 348 kfree(job->ibs);
349 return 0; 349 return 0;
350} 350}
351 351
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index f68b7cdc370a..1e14531353e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -316,12 +316,12 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
316 } 316 }
317} 317}
318 318
319int amdgpu_vm_free_job(struct amdgpu_job *sched_job) 319int amdgpu_vm_free_job(struct amdgpu_job *job)
320{ 320{
321 int i; 321 int i;
322 for (i = 0; i < sched_job->num_ibs; i++) 322 for (i = 0; i < job->num_ibs; i++)
323 amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]); 323 amdgpu_ib_free(job->adev, &job->ibs[i]);
324 kfree(sched_job->ibs); 324 kfree(job->ibs);
325 return 0; 325 return 0;
326} 326}
327 327
@@ -686,31 +686,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
686} 686}
687 687
688/** 688/**
689 * amdgpu_vm_fence_pts - fence page tables after an update
690 *
691 * @vm: requested vm
692 * @start: start of GPU address range
693 * @end: end of GPU address range
694 * @fence: fence to use
695 *
696 * Fence the page tables in the range @start - @end (cayman+).
697 *
698 * Global and local mutex must be locked!
699 */
700static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
701 uint64_t start, uint64_t end,
702 struct fence *fence)
703{
704 unsigned i;
705
706 start >>= amdgpu_vm_block_size;
707 end >>= amdgpu_vm_block_size;
708
709 for (i = start; i <= end; ++i)
710 amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
711}
712
713/**
714 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table 689 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
715 * 690 *
716 * @adev: amdgpu_device pointer 691 * @adev: amdgpu_device pointer
@@ -813,8 +788,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
813 if (r) 788 if (r)
814 goto error_free; 789 goto error_free;
815 790
816 amdgpu_vm_fence_pts(vm, mapping->it.start, 791 amdgpu_bo_fence(vm->page_directory, f, true);
817 mapping->it.last + 1, f);
818 if (fence) { 792 if (fence) {
819 fence_put(*fence); 793 fence_put(*fence);
820 *fence = fence_get(f); 794 *fence = fence_get(f);
@@ -855,7 +829,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
855 int r; 829 int r;
856 830
857 if (mem) { 831 if (mem) {
858 addr = mem->start << PAGE_SHIFT; 832 addr = (u64)mem->start << PAGE_SHIFT;
859 if (mem->mem_type != TTM_PL_TT) 833 if (mem->mem_type != TTM_PL_TT)
860 addr += adev->vm_manager.vram_base_offset; 834 addr += adev->vm_manager.vram_base_offset;
861 } else { 835 } else {
@@ -1089,6 +1063,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1089 1063
1090 /* walk over the address space and allocate the page tables */ 1064 /* walk over the address space and allocate the page tables */
1091 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { 1065 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1066 struct reservation_object *resv = vm->page_directory->tbo.resv;
1092 struct amdgpu_bo *pt; 1067 struct amdgpu_bo *pt;
1093 1068
1094 if (vm->page_tables[pt_idx].bo) 1069 if (vm->page_tables[pt_idx].bo)
@@ -1097,11 +1072,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1097 /* drop mutex to allocate and clear page table */ 1072 /* drop mutex to allocate and clear page table */
1098 mutex_unlock(&vm->mutex); 1073 mutex_unlock(&vm->mutex);
1099 1074
1075 ww_mutex_lock(&resv->lock, NULL);
1100 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, 1076 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1101 AMDGPU_GPU_PAGE_SIZE, true, 1077 AMDGPU_GPU_PAGE_SIZE, true,
1102 AMDGPU_GEM_DOMAIN_VRAM, 1078 AMDGPU_GEM_DOMAIN_VRAM,
1103 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1079 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1104 NULL, &pt); 1080 NULL, resv, &pt);
1081 ww_mutex_unlock(&resv->lock);
1105 if (r) 1082 if (r)
1106 goto error_free; 1083 goto error_free;
1107 1084
@@ -1303,7 +1280,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1303 r = amdgpu_bo_create(adev, pd_size, align, true, 1280 r = amdgpu_bo_create(adev, pd_size, align, true,
1304 AMDGPU_GEM_DOMAIN_VRAM, 1281 AMDGPU_GEM_DOMAIN_VRAM,
1305 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1282 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1306 NULL, &vm->page_directory); 1283 NULL, NULL, &vm->page_directory);
1307 if (r) 1284 if (r)
1308 return r; 1285 return r;
1309 1286
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index cd6edc40c9cd..1e0bba29e167 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -1279,8 +1279,7 @@ amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action)
1279 amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); 1279 amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
1280 } 1280 }
1281 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 1281 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1282 amdgpu_atombios_encoder_setup_dig_transmitter(encoder, 1282 amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level);
1283 ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
1284 if (ext_encoder) 1283 if (ext_encoder)
1285 amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE); 1284 amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE);
1286 } else { 1285 } else {
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
index a72ffc7d6c26..e33180d3314a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
@@ -814,7 +814,8 @@ int cz_smu_init(struct amdgpu_device *adev)
814 * 3. map kernel virtual address 814 * 3. map kernel virtual address
815 */ 815 */
816 ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE, 816 ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
817 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, toc_buf); 817 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
818 toc_buf);
818 819
819 if (ret) { 820 if (ret) {
820 dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret); 821 dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
@@ -822,7 +823,8 @@ int cz_smu_init(struct amdgpu_device *adev)
822 } 823 }
823 824
824 ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE, 825 ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
825 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, smu_buf); 826 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
827 smu_buf);
826 828
827 if (ret) { 829 if (ret) {
828 dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret); 830 dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
index 322edea65857..bda1249eb871 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
@@ -764,7 +764,7 @@ int fiji_smu_init(struct amdgpu_device *adev)
764 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, 764 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
765 true, AMDGPU_GEM_DOMAIN_VRAM, 765 true, AMDGPU_GEM_DOMAIN_VRAM,
766 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 766 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
767 NULL, toc_buf); 767 NULL, NULL, toc_buf);
768 if (ret) { 768 if (ret) {
769 DRM_ERROR("Failed to allocate memory for TOC buffer\n"); 769 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
770 return -ENOMEM; 770 return -ENOMEM;
@@ -774,7 +774,7 @@ int fiji_smu_init(struct amdgpu_device *adev)
774 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, 774 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
775 true, AMDGPU_GEM_DOMAIN_VRAM, 775 true, AMDGPU_GEM_DOMAIN_VRAM,
776 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 776 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
777 NULL, smu_buf); 777 NULL, NULL, smu_buf);
778 if (ret) { 778 if (ret) {
779 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); 779 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
780 return -ENOMEM; 780 return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 4bd1e5cf65ca..e992bf2ff66c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3206,7 +3206,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
3206 r = amdgpu_bo_create(adev, 3206 r = amdgpu_bo_create(adev,
3207 adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, 3207 adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
3208 PAGE_SIZE, true, 3208 PAGE_SIZE, true,
3209 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, 3209 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
3210 &adev->gfx.mec.hpd_eop_obj); 3210 &adev->gfx.mec.hpd_eop_obj);
3211 if (r) { 3211 if (r) {
3212 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 3212 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -3373,7 +3373,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
3373 r = amdgpu_bo_create(adev, 3373 r = amdgpu_bo_create(adev,
3374 sizeof(struct bonaire_mqd), 3374 sizeof(struct bonaire_mqd),
3375 PAGE_SIZE, true, 3375 PAGE_SIZE, true,
3376 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, 3376 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
3377 &ring->mqd_obj); 3377 &ring->mqd_obj);
3378 if (r) { 3378 if (r) {
3379 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); 3379 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
@@ -3610,41 +3610,6 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
3610 return 0; 3610 return 0;
3611} 3611}
3612 3612
3613static void gfx_v7_0_ce_sync_me(struct amdgpu_ring *ring)
3614{
3615 struct amdgpu_device *adev = ring->adev;
3616 u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4;
3617
3618 /* instruct DE to set a magic number */
3619 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3620 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3621 WRITE_DATA_DST_SEL(5)));
3622 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
3623 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
3624 amdgpu_ring_write(ring, 1);
3625
3626 /* let CE wait till condition satisfied */
3627 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3628 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
3629 WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
3630 WAIT_REG_MEM_FUNCTION(3) | /* == */
3631 WAIT_REG_MEM_ENGINE(2))); /* ce */
3632 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
3633 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
3634 amdgpu_ring_write(ring, 1);
3635 amdgpu_ring_write(ring, 0xffffffff);
3636 amdgpu_ring_write(ring, 4); /* poll interval */
3637
3638 /* instruct CE to reset wb of ce_sync to zero */
3639 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3640 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
3641 WRITE_DATA_DST_SEL(5) |
3642 WR_CONFIRM));
3643 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
3644 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
3645 amdgpu_ring_write(ring, 0);
3646}
3647
3648/* 3613/*
3649 * vm 3614 * vm
3650 * VMID 0 is the physical GPU addresses as used by the kernel. 3615 * VMID 0 is the physical GPU addresses as used by the kernel.
@@ -3663,6 +3628,13 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3663 unsigned vm_id, uint64_t pd_addr) 3628 unsigned vm_id, uint64_t pd_addr)
3664{ 3629{
3665 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); 3630 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
3631 if (usepfp) {
3632 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3633 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3634 amdgpu_ring_write(ring, 0);
3635 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3636 amdgpu_ring_write(ring, 0);
3637 }
3666 3638
3667 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3639 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3668 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | 3640 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -3703,7 +3675,10 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3703 amdgpu_ring_write(ring, 0x0); 3675 amdgpu_ring_write(ring, 0x0);
3704 3676
3705 /* synce CE with ME to prevent CE fetch CEIB before context switch done */ 3677 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3706 gfx_v7_0_ce_sync_me(ring); 3678 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3679 amdgpu_ring_write(ring, 0);
3680 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3681 amdgpu_ring_write(ring, 0);
3707 } 3682 }
3708} 3683}
3709 3684
@@ -3788,7 +3763,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3788 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, 3763 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
3789 AMDGPU_GEM_DOMAIN_VRAM, 3764 AMDGPU_GEM_DOMAIN_VRAM,
3790 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 3765 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3791 NULL, &adev->gfx.rlc.save_restore_obj); 3766 NULL, NULL,
3767 &adev->gfx.rlc.save_restore_obj);
3792 if (r) { 3768 if (r) {
3793 dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); 3769 dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
3794 return r; 3770 return r;
@@ -3831,7 +3807,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3831 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, 3807 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
3832 AMDGPU_GEM_DOMAIN_VRAM, 3808 AMDGPU_GEM_DOMAIN_VRAM,
3833 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 3809 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3834 NULL, &adev->gfx.rlc.clear_state_obj); 3810 NULL, NULL,
3811 &adev->gfx.rlc.clear_state_obj);
3835 if (r) { 3812 if (r) {
3836 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); 3813 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
3837 gfx_v7_0_rlc_fini(adev); 3814 gfx_v7_0_rlc_fini(adev);
@@ -3870,7 +3847,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3870 r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, 3847 r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
3871 AMDGPU_GEM_DOMAIN_VRAM, 3848 AMDGPU_GEM_DOMAIN_VRAM,
3872 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 3849 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3873 NULL, &adev->gfx.rlc.cp_table_obj); 3850 NULL, NULL,
3851 &adev->gfx.rlc.cp_table_obj);
3874 if (r) { 3852 if (r) {
3875 dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); 3853 dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
3876 gfx_v7_0_rlc_fini(adev); 3854 gfx_v7_0_rlc_fini(adev);
@@ -4802,12 +4780,6 @@ static int gfx_v7_0_sw_init(void *handle)
4802 return r; 4780 return r;
4803 } 4781 }
4804 4782
4805 r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs);
4806 if (r) {
4807 DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r);
4808 return r;
4809 }
4810
4811 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4783 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4812 ring = &adev->gfx.gfx_ring[i]; 4784 ring = &adev->gfx.gfx_ring[i];
4813 ring->ring_obj = NULL; 4785 ring->ring_obj = NULL;
@@ -4851,21 +4823,21 @@ static int gfx_v7_0_sw_init(void *handle)
4851 r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, 4823 r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
4852 PAGE_SIZE, true, 4824 PAGE_SIZE, true,
4853 AMDGPU_GEM_DOMAIN_GDS, 0, 4825 AMDGPU_GEM_DOMAIN_GDS, 0,
4854 NULL, &adev->gds.gds_gfx_bo); 4826 NULL, NULL, &adev->gds.gds_gfx_bo);
4855 if (r) 4827 if (r)
4856 return r; 4828 return r;
4857 4829
4858 r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, 4830 r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
4859 PAGE_SIZE, true, 4831 PAGE_SIZE, true,
4860 AMDGPU_GEM_DOMAIN_GWS, 0, 4832 AMDGPU_GEM_DOMAIN_GWS, 0,
4861 NULL, &adev->gds.gws_gfx_bo); 4833 NULL, NULL, &adev->gds.gws_gfx_bo);
4862 if (r) 4834 if (r)
4863 return r; 4835 return r;
4864 4836
4865 r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, 4837 r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
4866 PAGE_SIZE, true, 4838 PAGE_SIZE, true,
4867 AMDGPU_GEM_DOMAIN_OA, 0, 4839 AMDGPU_GEM_DOMAIN_OA, 0,
4868 NULL, &adev->gds.oa_gfx_bo); 4840 NULL, NULL, &adev->gds.oa_gfx_bo);
4869 if (r) 4841 if (r)
4870 return r; 4842 return r;
4871 4843
@@ -4886,8 +4858,6 @@ static int gfx_v7_0_sw_fini(void *handle)
4886 for (i = 0; i < adev->gfx.num_compute_rings; i++) 4858 for (i = 0; i < adev->gfx.num_compute_rings; i++)
4887 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 4859 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
4888 4860
4889 amdgpu_wb_free(adev, adev->gfx.ce_sync_offs);
4890
4891 gfx_v7_0_cp_compute_fini(adev); 4861 gfx_v7_0_cp_compute_fini(adev);
4892 gfx_v7_0_rlc_fini(adev); 4862 gfx_v7_0_rlc_fini(adev);
4893 gfx_v7_0_mec_fini(adev); 4863 gfx_v7_0_mec_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 53f07439a512..cb4f68f53f24 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -868,7 +868,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
868 r = amdgpu_bo_create(adev, 868 r = amdgpu_bo_create(adev,
869 adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, 869 adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
870 PAGE_SIZE, true, 870 PAGE_SIZE, true,
871 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, 871 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
872 &adev->gfx.mec.hpd_eop_obj); 872 &adev->gfx.mec.hpd_eop_obj);
873 if (r) { 873 if (r) {
874 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 874 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -940,12 +940,6 @@ static int gfx_v8_0_sw_init(void *handle)
940 return r; 940 return r;
941 } 941 }
942 942
943 r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs);
944 if (r) {
945 DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r);
946 return r;
947 }
948
949 /* set up the gfx ring */ 943 /* set up the gfx ring */
950 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 944 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
951 ring = &adev->gfx.gfx_ring[i]; 945 ring = &adev->gfx.gfx_ring[i];
@@ -995,21 +989,21 @@ static int gfx_v8_0_sw_init(void *handle)
995 /* reserve GDS, GWS and OA resource for gfx */ 989 /* reserve GDS, GWS and OA resource for gfx */
996 r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, 990 r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
997 PAGE_SIZE, true, 991 PAGE_SIZE, true,
998 AMDGPU_GEM_DOMAIN_GDS, 0, 992 AMDGPU_GEM_DOMAIN_GDS, 0, NULL,
999 NULL, &adev->gds.gds_gfx_bo); 993 NULL, &adev->gds.gds_gfx_bo);
1000 if (r) 994 if (r)
1001 return r; 995 return r;
1002 996
1003 r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, 997 r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
1004 PAGE_SIZE, true, 998 PAGE_SIZE, true,
1005 AMDGPU_GEM_DOMAIN_GWS, 0, 999 AMDGPU_GEM_DOMAIN_GWS, 0, NULL,
1006 NULL, &adev->gds.gws_gfx_bo); 1000 NULL, &adev->gds.gws_gfx_bo);
1007 if (r) 1001 if (r)
1008 return r; 1002 return r;
1009 1003
1010 r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, 1004 r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
1011 PAGE_SIZE, true, 1005 PAGE_SIZE, true,
1012 AMDGPU_GEM_DOMAIN_OA, 0, 1006 AMDGPU_GEM_DOMAIN_OA, 0, NULL,
1013 NULL, &adev->gds.oa_gfx_bo); 1007 NULL, &adev->gds.oa_gfx_bo);
1014 if (r) 1008 if (r)
1015 return r; 1009 return r;
@@ -1033,8 +1027,6 @@ static int gfx_v8_0_sw_fini(void *handle)
1033 for (i = 0; i < adev->gfx.num_compute_rings; i++) 1027 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1034 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1028 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1035 1029
1036 amdgpu_wb_free(adev, adev->gfx.ce_sync_offs);
1037
1038 gfx_v8_0_mec_fini(adev); 1030 gfx_v8_0_mec_fini(adev);
1039 1031
1040 return 0; 1032 return 0;
@@ -3106,7 +3098,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
3106 sizeof(struct vi_mqd), 3098 sizeof(struct vi_mqd),
3107 PAGE_SIZE, true, 3099 PAGE_SIZE, true,
3108 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, 3100 AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
3109 &ring->mqd_obj); 3101 NULL, &ring->mqd_obj);
3110 if (r) { 3102 if (r) {
3111 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); 3103 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
3112 return r; 3104 return r;
@@ -3965,6 +3957,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
3965 DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 3957 DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
3966 amdgpu_ring_write(ring, lower_32_bits(seq)); 3958 amdgpu_ring_write(ring, lower_32_bits(seq));
3967 amdgpu_ring_write(ring, upper_32_bits(seq)); 3959 amdgpu_ring_write(ring, upper_32_bits(seq));
3960
3968} 3961}
3969 3962
3970/** 3963/**
@@ -4005,49 +3998,34 @@ static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring,
4005 return true; 3998 return true;
4006} 3999}
4007 4000
4008static void gfx_v8_0_ce_sync_me(struct amdgpu_ring *ring) 4001static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4002 unsigned vm_id, uint64_t pd_addr)
4009{ 4003{
4010 struct amdgpu_device *adev = ring->adev; 4004 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
4011 u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4; 4005 uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
4012 4006 uint64_t addr = ring->fence_drv.gpu_addr;
4013 /* instruct DE to set a magic number */
4014 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4015 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4016 WRITE_DATA_DST_SEL(5)));
4017 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
4018 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
4019 amdgpu_ring_write(ring, 1);
4020 4007
4021 /* let CE wait till condition satisfied */
4022 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 4008 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
4023 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ 4009 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
4024 WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ 4010 WAIT_REG_MEM_FUNCTION(3))); /* equal */
4025 WAIT_REG_MEM_FUNCTION(3) | /* == */ 4011 amdgpu_ring_write(ring, addr & 0xfffffffc);
4026 WAIT_REG_MEM_ENGINE(2))); /* ce */ 4012 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
4027 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); 4013 amdgpu_ring_write(ring, seq);
4028 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
4029 amdgpu_ring_write(ring, 1);
4030 amdgpu_ring_write(ring, 0xffffffff); 4014 amdgpu_ring_write(ring, 0xffffffff);
4031 amdgpu_ring_write(ring, 4); /* poll interval */ 4015 amdgpu_ring_write(ring, 4); /* poll interval */
4032 4016
4033 /* instruct CE to reset wb of ce_sync to zero */ 4017 if (usepfp) {
4034 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4018 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
4035 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | 4019 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4036 WRITE_DATA_DST_SEL(5) | 4020 amdgpu_ring_write(ring, 0);
4037 WR_CONFIRM)); 4021 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4038 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); 4022 amdgpu_ring_write(ring, 0);
4039 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); 4023 }
4040 amdgpu_ring_write(ring, 0);
4041}
4042
4043static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4044 unsigned vm_id, uint64_t pd_addr)
4045{
4046 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
4047 4024
4048 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4025 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4049 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | 4026 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
4050 WRITE_DATA_DST_SEL(0))); 4027 WRITE_DATA_DST_SEL(0)) |
4028 WR_CONFIRM);
4051 if (vm_id < 8) { 4029 if (vm_id < 8) {
4052 amdgpu_ring_write(ring, 4030 amdgpu_ring_write(ring,
4053 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); 4031 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
@@ -4083,9 +4061,10 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4083 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 4061 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4084 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 4062 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4085 amdgpu_ring_write(ring, 0x0); 4063 amdgpu_ring_write(ring, 0x0);
4086 4064 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4087 /* synce CE with ME to prevent CE fetch CEIB before context switch done */ 4065 amdgpu_ring_write(ring, 0);
4088 gfx_v8_0_ce_sync_me(ring); 4066 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4067 amdgpu_ring_write(ring, 0);
4089 } 4068 }
4090} 4069}
4091 4070
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 774528ab8704..fab5471d25d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1262,6 +1262,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1262 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); 1262 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1263 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); 1263 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1264 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); 1264 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1265 /* reset addr and status */
1266 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1267
1268 if (!addr && !status)
1269 return 0;
1270
1265 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", 1271 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1266 entry->src_id, entry->src_data); 1272 entry->src_id, entry->src_data);
1267 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1273 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
@@ -1269,8 +1275,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1269 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1275 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1270 status); 1276 status);
1271 gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); 1277 gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
1272 /* reset addr and status */
1273 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1274 1278
1275 return 0; 1279 return 0;
1276} 1280}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 9a07742620d0..7bc9e9fcf3d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1262,6 +1262,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1262 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); 1262 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1263 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); 1263 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1264 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); 1264 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1265 /* reset addr and status */
1266 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1267
1268 if (!addr && !status)
1269 return 0;
1270
1265 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", 1271 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1266 entry->src_id, entry->src_data); 1272 entry->src_id, entry->src_data);
1267 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1273 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
@@ -1269,8 +1275,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1269 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1275 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1270 status); 1276 status);
1271 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); 1277 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
1272 /* reset addr and status */
1273 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1274 1278
1275 return 0; 1279 return 0;
1276} 1280}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index c900aa942ade..966d4b2ed9da 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -625,7 +625,7 @@ int iceland_smu_init(struct amdgpu_device *adev)
625 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, 625 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
626 true, AMDGPU_GEM_DOMAIN_VRAM, 626 true, AMDGPU_GEM_DOMAIN_VRAM,
627 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 627 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
628 NULL, toc_buf); 628 NULL, NULL, toc_buf);
629 if (ret) { 629 if (ret) {
630 DRM_ERROR("Failed to allocate memory for TOC buffer\n"); 630 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
631 return -ENOMEM; 631 return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
index 1f5ac941a610..5421309c1862 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
@@ -763,7 +763,7 @@ int tonga_smu_init(struct amdgpu_device *adev)
763 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, 763 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
764 true, AMDGPU_GEM_DOMAIN_VRAM, 764 true, AMDGPU_GEM_DOMAIN_VRAM,
765 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 765 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
766 NULL, toc_buf); 766 NULL, NULL, toc_buf);
767 if (ret) { 767 if (ret) {
768 DRM_ERROR("Failed to allocate memory for TOC buffer\n"); 768 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
769 return -ENOMEM; 769 return -ENOMEM;
@@ -773,7 +773,7 @@ int tonga_smu_init(struct amdgpu_device *adev)
773 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, 773 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
774 true, AMDGPU_GEM_DOMAIN_VRAM, 774 true, AMDGPU_GEM_DOMAIN_VRAM,
775 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 775 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
776 NULL, smu_buf); 776 NULL, NULL, smu_buf);
777 if (ret) { 777 if (ret) {
778 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); 778 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
779 return -ENOMEM; 779 return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 5fac5da694f0..ed50dd725788 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle)
224 int r; 224 int r;
225 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 225 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
226 226
227 r = uvd_v4_2_hw_fini(adev); 227 r = amdgpu_uvd_suspend(adev);
228 if (r) 228 if (r)
229 return r; 229 return r;
230 230
231 r = amdgpu_uvd_suspend(adev); 231 r = uvd_v4_2_hw_fini(adev);
232 if (r) 232 if (r)
233 return r; 233 return r;
234 234
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 2d5c59c318af..9ad8b9906c0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle)
220 int r; 220 int r;
221 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 221 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222 222
223 r = uvd_v5_0_hw_fini(adev); 223 r = amdgpu_uvd_suspend(adev);
224 if (r) 224 if (r)
225 return r; 225 return r;
226 226
227 r = amdgpu_uvd_suspend(adev); 227 r = uvd_v5_0_hw_fini(adev);
228 if (r) 228 if (r)
229 return r; 229 return r;
230 230
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index d9f553fce531..7e9934fa4193 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -214,14 +214,16 @@ static int uvd_v6_0_suspend(void *handle)
214 int r; 214 int r;
215 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 215 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
216 216
217 /* Skip this for APU for now */
218 if (!(adev->flags & AMD_IS_APU)) {
219 r = amdgpu_uvd_suspend(adev);
220 if (r)
221 return r;
222 }
217 r = uvd_v6_0_hw_fini(adev); 223 r = uvd_v6_0_hw_fini(adev);
218 if (r) 224 if (r)
219 return r; 225 return r;
220 226
221 r = amdgpu_uvd_suspend(adev);
222 if (r)
223 return r;
224
225 return r; 227 return r;
226} 228}
227 229
@@ -230,10 +232,12 @@ static int uvd_v6_0_resume(void *handle)
230 int r; 232 int r;
231 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 233 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
232 234
233 r = amdgpu_uvd_resume(adev); 235 /* Skip this for APU for now */
234 if (r) 236 if (!(adev->flags & AMD_IS_APU)) {
235 return r; 237 r = amdgpu_uvd_resume(adev);
236 238 if (r)
239 return r;
240 }
237 r = uvd_v6_0_hw_init(adev); 241 r = uvd_v6_0_hw_init(adev);
238 if (r) 242 if (r)
239 return r; 243 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 552d9e75ad1b..b55ceb14fdcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1400,7 +1400,8 @@ static int vi_common_early_init(void *handle)
1400 case CHIP_CARRIZO: 1400 case CHIP_CARRIZO:
1401 adev->has_uvd = true; 1401 adev->has_uvd = true;
1402 adev->cg_flags = 0; 1402 adev->cg_flags = 0;
1403 adev->pg_flags = AMDGPU_PG_SUPPORT_UVD | AMDGPU_PG_SUPPORT_VCE; 1403 /* Disable UVD pg */
1404 adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
1404 adev->external_rev_id = adev->rev_id + 0x1; 1405 adev->external_rev_id = adev->rev_id + 0x1;
1405 if (amdgpu_smc_load_fw && smc_enabled) 1406 if (amdgpu_smc_load_fw && smc_enabled)
1406 adev->firmware.smu_load = true; 1407 adev->firmware.smu_load = true;
diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h
index 488642f08267..3b47ae313e36 100644
--- a/drivers/gpu/drm/amd/include/cgs_linux.h
+++ b/drivers/gpu/drm/amd/include/cgs_linux.h
@@ -27,19 +27,6 @@
27#include "cgs_common.h" 27#include "cgs_common.h"
28 28
29/** 29/**
30 * cgs_import_gpu_mem() - Import dmabuf handle
31 * @cgs_device: opaque device handle
32 * @dmabuf_fd: DMABuf file descriptor
33 * @handle: memory handle (output)
34 *
35 * Must be called in the process context that dmabuf_fd belongs to.
36 *
37 * Return: 0 on success, -errno otherwise
38 */
39typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd,
40 cgs_handle_t *handle);
41
42/**
43 * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources 30 * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources
44 * @private_data: private data provided to cgs_add_irq_source 31 * @private_data: private data provided to cgs_add_irq_source
45 * @src_id: interrupt source ID 32 * @src_id: interrupt source ID
@@ -114,16 +101,12 @@ typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type);
114typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type); 101typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type);
115 102
116struct cgs_os_ops { 103struct cgs_os_ops {
117 cgs_import_gpu_mem_t import_gpu_mem;
118
119 /* IRQ handling */ 104 /* IRQ handling */
120 cgs_add_irq_source_t add_irq_source; 105 cgs_add_irq_source_t add_irq_source;
121 cgs_irq_get_t irq_get; 106 cgs_irq_get_t irq_get;
122 cgs_irq_put_t irq_put; 107 cgs_irq_put_t irq_put;
123}; 108};
124 109
125#define cgs_import_gpu_mem(dev,dmabuf_fd,handle) \
126 CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle)
127#define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \ 110#define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \
128 CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \ 111 CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \
129 private_data) 112 private_data)
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
new file mode 100644
index 000000000000..144f50acc971
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -0,0 +1,41 @@
1#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _GPU_SCHED_TRACE_H_
3
4#include <linux/stringify.h>
5#include <linux/types.h>
6#include <linux/tracepoint.h>
7
8#include <drm/drmP.h>
9
10#undef TRACE_SYSTEM
11#define TRACE_SYSTEM gpu_sched
12#define TRACE_INCLUDE_FILE gpu_sched_trace
13
14TRACE_EVENT(amd_sched_job,
15 TP_PROTO(struct amd_sched_job *sched_job),
16 TP_ARGS(sched_job),
17 TP_STRUCT__entry(
18 __field(struct amd_sched_entity *, entity)
19 __field(const char *, name)
20 __field(u32, job_count)
21 __field(int, hw_job_count)
22 ),
23
24 TP_fast_assign(
25 __entry->entity = sched_job->s_entity;
26 __entry->name = sched_job->sched->name;
27 __entry->job_count = kfifo_len(
28 &sched_job->s_entity->job_queue) / sizeof(sched_job);
29 __entry->hw_job_count = atomic_read(
30 &sched_job->sched->hw_rq_count);
31 ),
32 TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d",
33 __entry->entity, __entry->name, __entry->job_count,
34 __entry->hw_job_count)
35);
36#endif
37
38/* This part must be outside protection */
39#undef TRACE_INCLUDE_PATH
40#define TRACE_INCLUDE_PATH .
41#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 9259f1b6664c..3697eeeecf82 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -27,6 +27,9 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include "gpu_scheduler.h" 28#include "gpu_scheduler.h"
29 29
30#define CREATE_TRACE_POINTS
31#include "gpu_sched_trace.h"
32
30static struct amd_sched_job * 33static struct amd_sched_job *
31amd_sched_entity_pop_job(struct amd_sched_entity *entity); 34amd_sched_entity_pop_job(struct amd_sched_entity *entity);
32static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); 35static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
@@ -65,29 +68,29 @@ static struct amd_sched_job *
65amd_sched_rq_select_job(struct amd_sched_rq *rq) 68amd_sched_rq_select_job(struct amd_sched_rq *rq)
66{ 69{
67 struct amd_sched_entity *entity; 70 struct amd_sched_entity *entity;
68 struct amd_sched_job *job; 71 struct amd_sched_job *sched_job;
69 72
70 spin_lock(&rq->lock); 73 spin_lock(&rq->lock);
71 74
72 entity = rq->current_entity; 75 entity = rq->current_entity;
73 if (entity) { 76 if (entity) {
74 list_for_each_entry_continue(entity, &rq->entities, list) { 77 list_for_each_entry_continue(entity, &rq->entities, list) {
75 job = amd_sched_entity_pop_job(entity); 78 sched_job = amd_sched_entity_pop_job(entity);
76 if (job) { 79 if (sched_job) {
77 rq->current_entity = entity; 80 rq->current_entity = entity;
78 spin_unlock(&rq->lock); 81 spin_unlock(&rq->lock);
79 return job; 82 return sched_job;
80 } 83 }
81 } 84 }
82 } 85 }
83 86
84 list_for_each_entry(entity, &rq->entities, list) { 87 list_for_each_entry(entity, &rq->entities, list) {
85 88
86 job = amd_sched_entity_pop_job(entity); 89 sched_job = amd_sched_entity_pop_job(entity);
87 if (job) { 90 if (sched_job) {
88 rq->current_entity = entity; 91 rq->current_entity = entity;
89 spin_unlock(&rq->lock); 92 spin_unlock(&rq->lock);
90 return job; 93 return sched_job;
91 } 94 }
92 95
93 if (entity == rq->current_entity) 96 if (entity == rq->current_entity)
@@ -115,23 +118,27 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
115 struct amd_sched_rq *rq, 118 struct amd_sched_rq *rq,
116 uint32_t jobs) 119 uint32_t jobs)
117{ 120{
121 int r;
122
118 if (!(sched && entity && rq)) 123 if (!(sched && entity && rq))
119 return -EINVAL; 124 return -EINVAL;
120 125
121 memset(entity, 0, sizeof(struct amd_sched_entity)); 126 memset(entity, 0, sizeof(struct amd_sched_entity));
122 entity->belongto_rq = rq; 127 INIT_LIST_HEAD(&entity->list);
123 entity->scheduler = sched; 128 entity->rq = rq;
124 entity->fence_context = fence_context_alloc(1); 129 entity->sched = sched;
125 if(kfifo_alloc(&entity->job_queue,
126 jobs * sizeof(void *),
127 GFP_KERNEL))
128 return -EINVAL;
129 130
130 spin_lock_init(&entity->queue_lock); 131 spin_lock_init(&entity->queue_lock);
132 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
133 if (r)
134 return r;
135
131 atomic_set(&entity->fence_seq, 0); 136 atomic_set(&entity->fence_seq, 0);
137 entity->fence_context = fence_context_alloc(1);
132 138
133 /* Add the entity to the run queue */ 139 /* Add the entity to the run queue */
134 amd_sched_rq_add_entity(rq, entity); 140 amd_sched_rq_add_entity(rq, entity);
141
135 return 0; 142 return 0;
136} 143}
137 144
@@ -146,8 +153,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
146static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, 153static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
147 struct amd_sched_entity *entity) 154 struct amd_sched_entity *entity)
148{ 155{
149 return entity->scheduler == sched && 156 return entity->sched == sched &&
150 entity->belongto_rq != NULL; 157 entity->rq != NULL;
151} 158}
152 159
153/** 160/**
@@ -177,7 +184,7 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
177void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, 184void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
178 struct amd_sched_entity *entity) 185 struct amd_sched_entity *entity)
179{ 186{
180 struct amd_sched_rq *rq = entity->belongto_rq; 187 struct amd_sched_rq *rq = entity->rq;
181 188
182 if (!amd_sched_entity_is_initialized(sched, entity)) 189 if (!amd_sched_entity_is_initialized(sched, entity))
183 return; 190 return;
@@ -198,22 +205,22 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
198 container_of(cb, struct amd_sched_entity, cb); 205 container_of(cb, struct amd_sched_entity, cb);
199 entity->dependency = NULL; 206 entity->dependency = NULL;
200 fence_put(f); 207 fence_put(f);
201 amd_sched_wakeup(entity->scheduler); 208 amd_sched_wakeup(entity->sched);
202} 209}
203 210
204static struct amd_sched_job * 211static struct amd_sched_job *
205amd_sched_entity_pop_job(struct amd_sched_entity *entity) 212amd_sched_entity_pop_job(struct amd_sched_entity *entity)
206{ 213{
207 struct amd_gpu_scheduler *sched = entity->scheduler; 214 struct amd_gpu_scheduler *sched = entity->sched;
208 struct amd_sched_job *job; 215 struct amd_sched_job *sched_job;
209 216
210 if (ACCESS_ONCE(entity->dependency)) 217 if (ACCESS_ONCE(entity->dependency))
211 return NULL; 218 return NULL;
212 219
213 if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job))) 220 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
214 return NULL; 221 return NULL;
215 222
216 while ((entity->dependency = sched->ops->dependency(job))) { 223 while ((entity->dependency = sched->ops->dependency(sched_job))) {
217 224
218 if (fence_add_callback(entity->dependency, &entity->cb, 225 if (fence_add_callback(entity->dependency, &entity->cb,
219 amd_sched_entity_wakeup)) 226 amd_sched_entity_wakeup))
@@ -222,32 +229,33 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
222 return NULL; 229 return NULL;
223 } 230 }
224 231
225 return job; 232 return sched_job;
226} 233}
227 234
228/** 235/**
229 * Helper to submit a job to the job queue 236 * Helper to submit a job to the job queue
230 * 237 *
231 * @job The pointer to job required to submit 238 * @sched_job The pointer to job required to submit
232 * 239 *
233 * Returns true if we could submit the job. 240 * Returns true if we could submit the job.
234 */ 241 */
235static bool amd_sched_entity_in(struct amd_sched_job *job) 242static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
236{ 243{
237 struct amd_sched_entity *entity = job->s_entity; 244 struct amd_sched_entity *entity = sched_job->s_entity;
238 bool added, first = false; 245 bool added, first = false;
239 246
240 spin_lock(&entity->queue_lock); 247 spin_lock(&entity->queue_lock);
241 added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job); 248 added = kfifo_in(&entity->job_queue, &sched_job,
249 sizeof(sched_job)) == sizeof(sched_job);
242 250
243 if (added && kfifo_len(&entity->job_queue) == sizeof(job)) 251 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
244 first = true; 252 first = true;
245 253
246 spin_unlock(&entity->queue_lock); 254 spin_unlock(&entity->queue_lock);
247 255
248 /* first job wakes up scheduler */ 256 /* first job wakes up scheduler */
249 if (first) 257 if (first)
250 amd_sched_wakeup(job->sched); 258 amd_sched_wakeup(sched_job->sched);
251 259
252 return added; 260 return added;
253} 261}
@@ -255,7 +263,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *job)
255/** 263/**
256 * Submit a job to the job queue 264 * Submit a job to the job queue
257 * 265 *
258 * @job The pointer to job required to submit 266 * @sched_job The pointer to job required to submit
259 * 267 *
260 * Returns 0 for success, negative error code otherwise. 268 * Returns 0 for success, negative error code otherwise.
261 */ 269 */
@@ -271,9 +279,9 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
271 fence_get(&fence->base); 279 fence_get(&fence->base);
272 sched_job->s_fence = fence; 280 sched_job->s_fence = fence;
273 281
274 wait_event(entity->scheduler->job_scheduled, 282 wait_event(entity->sched->job_scheduled,
275 amd_sched_entity_in(sched_job)); 283 amd_sched_entity_in(sched_job));
276 284 trace_amd_sched_job(sched_job);
277 return 0; 285 return 0;
278} 286}
279 287
@@ -301,30 +309,28 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
301static struct amd_sched_job * 309static struct amd_sched_job *
302amd_sched_select_job(struct amd_gpu_scheduler *sched) 310amd_sched_select_job(struct amd_gpu_scheduler *sched)
303{ 311{
304 struct amd_sched_job *job; 312 struct amd_sched_job *sched_job;
305 313
306 if (!amd_sched_ready(sched)) 314 if (!amd_sched_ready(sched))
307 return NULL; 315 return NULL;
308 316
309 /* Kernel run queue has higher priority than normal run queue*/ 317 /* Kernel run queue has higher priority than normal run queue*/
310 job = amd_sched_rq_select_job(&sched->kernel_rq); 318 sched_job = amd_sched_rq_select_job(&sched->kernel_rq);
311 if (job == NULL) 319 if (sched_job == NULL)
312 job = amd_sched_rq_select_job(&sched->sched_rq); 320 sched_job = amd_sched_rq_select_job(&sched->sched_rq);
313 321
314 return job; 322 return sched_job;
315} 323}
316 324
317static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) 325static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
318{ 326{
319 struct amd_sched_job *sched_job = 327 struct amd_sched_fence *s_fence =
320 container_of(cb, struct amd_sched_job, cb); 328 container_of(cb, struct amd_sched_fence, cb);
321 struct amd_gpu_scheduler *sched; 329 struct amd_gpu_scheduler *sched = s_fence->sched;
322 330
323 sched = sched_job->sched;
324 amd_sched_fence_signal(sched_job->s_fence);
325 atomic_dec(&sched->hw_rq_count); 331 atomic_dec(&sched->hw_rq_count);
326 fence_put(&sched_job->s_fence->base); 332 amd_sched_fence_signal(s_fence);
327 sched->ops->process_job(sched_job); 333 fence_put(&s_fence->base);
328 wake_up_interruptible(&sched->wake_up_worker); 334 wake_up_interruptible(&sched->wake_up_worker);
329} 335}
330 336
@@ -338,87 +344,82 @@ static int amd_sched_main(void *param)
338 344
339 while (!kthread_should_stop()) { 345 while (!kthread_should_stop()) {
340 struct amd_sched_entity *entity; 346 struct amd_sched_entity *entity;
341 struct amd_sched_job *job; 347 struct amd_sched_fence *s_fence;
348 struct amd_sched_job *sched_job;
342 struct fence *fence; 349 struct fence *fence;
343 350
344 wait_event_interruptible(sched->wake_up_worker, 351 wait_event_interruptible(sched->wake_up_worker,
345 kthread_should_stop() || 352 kthread_should_stop() ||
346 (job = amd_sched_select_job(sched))); 353 (sched_job = amd_sched_select_job(sched)));
347 354
348 if (!job) 355 if (!sched_job)
349 continue; 356 continue;
350 357
351 entity = job->s_entity; 358 entity = sched_job->s_entity;
359 s_fence = sched_job->s_fence;
352 atomic_inc(&sched->hw_rq_count); 360 atomic_inc(&sched->hw_rq_count);
353 fence = sched->ops->run_job(job); 361 fence = sched->ops->run_job(sched_job);
354 if (fence) { 362 if (fence) {
355 r = fence_add_callback(fence, &job->cb, 363 r = fence_add_callback(fence, &s_fence->cb,
356 amd_sched_process_job); 364 amd_sched_process_job);
357 if (r == -ENOENT) 365 if (r == -ENOENT)
358 amd_sched_process_job(fence, &job->cb); 366 amd_sched_process_job(fence, &s_fence->cb);
359 else if (r) 367 else if (r)
360 DRM_ERROR("fence add callback failed (%d)\n", r); 368 DRM_ERROR("fence add callback failed (%d)\n", r);
361 fence_put(fence); 369 fence_put(fence);
370 } else {
371 DRM_ERROR("Failed to run job!\n");
372 amd_sched_process_job(NULL, &s_fence->cb);
362 } 373 }
363 374
364 count = kfifo_out(&entity->job_queue, &job, sizeof(job)); 375 count = kfifo_out(&entity->job_queue, &sched_job,
365 WARN_ON(count != sizeof(job)); 376 sizeof(sched_job));
377 WARN_ON(count != sizeof(sched_job));
366 wake_up(&sched->job_scheduled); 378 wake_up(&sched->job_scheduled);
367 } 379 }
368 return 0; 380 return 0;
369} 381}
370 382
371/** 383/**
372 * Create a gpu scheduler 384 * Init a gpu scheduler instance
373 * 385 *
386 * @sched The pointer to the scheduler
374 * @ops The backend operations for this scheduler. 387 * @ops The backend operations for this scheduler.
375 * @ring The the ring id for the scheduler.
376 * @hw_submissions Number of hw submissions to do. 388 * @hw_submissions Number of hw submissions to do.
389 * @name Name used for debugging
377 * 390 *
378 * Return the pointer to scheduler for success, otherwise return NULL 391 * Return 0 on success, otherwise error code.
379*/ 392*/
380struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops, 393int amd_sched_init(struct amd_gpu_scheduler *sched,
381 unsigned ring, unsigned hw_submission, 394 struct amd_sched_backend_ops *ops,
382 void *priv) 395 unsigned hw_submission, const char *name)
383{ 396{
384 struct amd_gpu_scheduler *sched;
385
386 sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
387 if (!sched)
388 return NULL;
389
390 sched->ops = ops; 397 sched->ops = ops;
391 sched->ring_id = ring;
392 sched->hw_submission_limit = hw_submission; 398 sched->hw_submission_limit = hw_submission;
393 sched->priv = priv; 399 sched->name = name;
394 snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring);
395 amd_sched_rq_init(&sched->sched_rq); 400 amd_sched_rq_init(&sched->sched_rq);
396 amd_sched_rq_init(&sched->kernel_rq); 401 amd_sched_rq_init(&sched->kernel_rq);
397 402
398 init_waitqueue_head(&sched->wake_up_worker); 403 init_waitqueue_head(&sched->wake_up_worker);
399 init_waitqueue_head(&sched->job_scheduled); 404 init_waitqueue_head(&sched->job_scheduled);
400 atomic_set(&sched->hw_rq_count, 0); 405 atomic_set(&sched->hw_rq_count, 0);
406
401 /* Each scheduler will run on a seperate kernel thread */ 407 /* Each scheduler will run on a seperate kernel thread */
402 sched->thread = kthread_run(amd_sched_main, sched, sched->name); 408 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
403 if (IS_ERR(sched->thread)) { 409 if (IS_ERR(sched->thread)) {
404 DRM_ERROR("Failed to create scheduler for id %d.\n", ring); 410 DRM_ERROR("Failed to create scheduler for %s.\n", name);
405 kfree(sched); 411 return PTR_ERR(sched->thread);
406 return NULL;
407 } 412 }
408 413
409 return sched; 414 return 0;
410} 415}
411 416
412/** 417/**
413 * Destroy a gpu scheduler 418 * Destroy a gpu scheduler
414 * 419 *
415 * @sched The pointer to the scheduler 420 * @sched The pointer to the scheduler
416 *
417 * return 0 if succeed. -1 if failed.
418 */ 421 */
419int amd_sched_destroy(struct amd_gpu_scheduler *sched) 422void amd_sched_fini(struct amd_gpu_scheduler *sched)
420{ 423{
421 kthread_stop(sched->thread); 424 kthread_stop(sched->thread);
422 kfree(sched);
423 return 0;
424} 425}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 2af0e4d4d817..80b64dc22214 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -38,13 +38,15 @@ struct amd_sched_rq;
38*/ 38*/
39struct amd_sched_entity { 39struct amd_sched_entity {
40 struct list_head list; 40 struct list_head list;
41 struct amd_sched_rq *belongto_rq; 41 struct amd_sched_rq *rq;
42 atomic_t fence_seq; 42 struct amd_gpu_scheduler *sched;
43 /* the job_queue maintains the jobs submitted by clients */ 43
44 struct kfifo job_queue;
45 spinlock_t queue_lock; 44 spinlock_t queue_lock;
46 struct amd_gpu_scheduler *scheduler; 45 struct kfifo job_queue;
46
47 atomic_t fence_seq;
47 uint64_t fence_context; 48 uint64_t fence_context;
49
48 struct fence *dependency; 50 struct fence *dependency;
49 struct fence_cb cb; 51 struct fence_cb cb;
50}; 52};
@@ -62,13 +64,13 @@ struct amd_sched_rq {
62 64
63struct amd_sched_fence { 65struct amd_sched_fence {
64 struct fence base; 66 struct fence base;
65 struct amd_gpu_scheduler *scheduler; 67 struct fence_cb cb;
68 struct amd_gpu_scheduler *sched;
66 spinlock_t lock; 69 spinlock_t lock;
67 void *owner; 70 void *owner;
68}; 71};
69 72
70struct amd_sched_job { 73struct amd_sched_job {
71 struct fence_cb cb;
72 struct amd_gpu_scheduler *sched; 74 struct amd_gpu_scheduler *sched;
73 struct amd_sched_entity *s_entity; 75 struct amd_sched_entity *s_entity;
74 struct amd_sched_fence *s_fence; 76 struct amd_sched_fence *s_fence;
@@ -91,32 +93,29 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
91 * these functions should be implemented in driver side 93 * these functions should be implemented in driver side
92*/ 94*/
93struct amd_sched_backend_ops { 95struct amd_sched_backend_ops {
94 struct fence *(*dependency)(struct amd_sched_job *job); 96 struct fence *(*dependency)(struct amd_sched_job *sched_job);
95 struct fence *(*run_job)(struct amd_sched_job *job); 97 struct fence *(*run_job)(struct amd_sched_job *sched_job);
96 void (*process_job)(struct amd_sched_job *job);
97}; 98};
98 99
99/** 100/**
100 * One scheduler is implemented for each hardware ring 101 * One scheduler is implemented for each hardware ring
101*/ 102*/
102struct amd_gpu_scheduler { 103struct amd_gpu_scheduler {
103 struct task_struct *thread; 104 struct amd_sched_backend_ops *ops;
105 uint32_t hw_submission_limit;
106 const char *name;
104 struct amd_sched_rq sched_rq; 107 struct amd_sched_rq sched_rq;
105 struct amd_sched_rq kernel_rq; 108 struct amd_sched_rq kernel_rq;
106 atomic_t hw_rq_count;
107 struct amd_sched_backend_ops *ops;
108 uint32_t ring_id;
109 wait_queue_head_t wake_up_worker; 109 wait_queue_head_t wake_up_worker;
110 wait_queue_head_t job_scheduled; 110 wait_queue_head_t job_scheduled;
111 uint32_t hw_submission_limit; 111 atomic_t hw_rq_count;
112 char name[20]; 112 struct task_struct *thread;
113 void *priv;
114}; 113};
115 114
116struct amd_gpu_scheduler * 115int amd_sched_init(struct amd_gpu_scheduler *sched,
117amd_sched_create(struct amd_sched_backend_ops *ops, 116 struct amd_sched_backend_ops *ops,
118 uint32_t ring, uint32_t hw_submission, void *priv); 117 uint32_t hw_submission, const char *name);
119int amd_sched_destroy(struct amd_gpu_scheduler *sched); 118void amd_sched_fini(struct amd_gpu_scheduler *sched);
120 119
121int amd_sched_entity_init(struct amd_gpu_scheduler *sched, 120int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
122 struct amd_sched_entity *entity, 121 struct amd_sched_entity *entity,
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index e62c37920e11..d802638094f4 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -36,7 +36,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
36 if (fence == NULL) 36 if (fence == NULL)
37 return NULL; 37 return NULL;
38 fence->owner = owner; 38 fence->owner = owner;
39 fence->scheduler = s_entity->scheduler; 39 fence->sched = s_entity->sched;
40 spin_lock_init(&fence->lock); 40 spin_lock_init(&fence->lock);
41 41
42 seq = atomic_inc_return(&s_entity->fence_seq); 42 seq = atomic_inc_return(&s_entity->fence_seq);
@@ -63,7 +63,7 @@ static const char *amd_sched_fence_get_driver_name(struct fence *fence)
63static const char *amd_sched_fence_get_timeline_name(struct fence *f) 63static const char *amd_sched_fence_get_timeline_name(struct fence *f)
64{ 64{
65 struct amd_sched_fence *fence = to_amd_sched_fence(f); 65 struct amd_sched_fence *fence = to_amd_sched_fence(f);
66 return (const char *)fence->scheduler->name; 66 return (const char *)fence->sched->name;
67} 67}
68 68
69static bool amd_sched_fence_enable_signaling(struct fence *f) 69static bool amd_sched_fence_enable_signaling(struct fence *f)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index e23df5fd3836..bf27a07dbce3 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -53,8 +53,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
53 struct drm_dp_mst_port *port, 53 struct drm_dp_mst_port *port,
54 int offset, int size, u8 *bytes); 54 int offset, int size, u8 *bytes);
55 55
56static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 56static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
57 struct drm_dp_mst_branch *mstb); 57 struct drm_dp_mst_branch *mstb);
58static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, 58static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
59 struct drm_dp_mst_branch *mstb, 59 struct drm_dp_mst_branch *mstb,
60 struct drm_dp_mst_port *port); 60 struct drm_dp_mst_port *port);
@@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
804 struct drm_dp_mst_port *port, *tmp; 804 struct drm_dp_mst_port *port, *tmp;
805 bool wake_tx = false; 805 bool wake_tx = false;
806 806
807 cancel_work_sync(&mstb->mgr->work);
808
809 /* 807 /*
810 * destroy all ports - don't need lock 808 * destroy all ports - don't need lock
811 * as there are no more references to the mst branch 809 * as there are no more references to the mst branch
@@ -863,29 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref)
863{ 861{
864 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); 862 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
865 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 863 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
864
866 if (!port->input) { 865 if (!port->input) {
867 port->vcpi.num_slots = 0; 866 port->vcpi.num_slots = 0;
868 867
869 kfree(port->cached_edid); 868 kfree(port->cached_edid);
870 869
871 /* we can't destroy the connector here, as 870 /*
872 we might be holding the mode_config.mutex 871 * The only time we don't have a connector
873 from an EDID retrieval */ 872 * on an output port is if the connector init
873 * fails.
874 */
874 if (port->connector) { 875 if (port->connector) {
876 /* we can't destroy the connector here, as
877 * we might be holding the mode_config.mutex
878 * from an EDID retrieval */
879
875 mutex_lock(&mgr->destroy_connector_lock); 880 mutex_lock(&mgr->destroy_connector_lock);
876 list_add(&port->next, &mgr->destroy_connector_list); 881 list_add(&port->next, &mgr->destroy_connector_list);
877 mutex_unlock(&mgr->destroy_connector_lock); 882 mutex_unlock(&mgr->destroy_connector_lock);
878 schedule_work(&mgr->destroy_connector_work); 883 schedule_work(&mgr->destroy_connector_work);
879 return; 884 return;
880 } 885 }
886 /* no need to clean up vcpi
887 * as if we have no connector we never setup a vcpi */
881 drm_dp_port_teardown_pdt(port, port->pdt); 888 drm_dp_port_teardown_pdt(port, port->pdt);
882
883 if (!port->input && port->vcpi.vcpi > 0)
884 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
885 } 889 }
886 kfree(port); 890 kfree(port);
887
888 (*mgr->cbs->hotplug)(mgr);
889} 891}
890 892
891static void drm_dp_put_port(struct drm_dp_mst_port *port) 893static void drm_dp_put_port(struct drm_dp_mst_port *port)
@@ -1027,8 +1029,8 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
1027 } 1029 }
1028} 1030}
1029 1031
1030static void build_mst_prop_path(struct drm_dp_mst_port *port, 1032static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1031 struct drm_dp_mst_branch *mstb, 1033 int pnum,
1032 char *proppath, 1034 char *proppath,
1033 size_t proppath_size) 1035 size_t proppath_size)
1034{ 1036{
@@ -1041,7 +1043,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port,
1041 snprintf(temp, sizeof(temp), "-%d", port_num); 1043 snprintf(temp, sizeof(temp), "-%d", port_num);
1042 strlcat(proppath, temp, proppath_size); 1044 strlcat(proppath, temp, proppath_size);
1043 } 1045 }
1044 snprintf(temp, sizeof(temp), "-%d", port->port_num); 1046 snprintf(temp, sizeof(temp), "-%d", pnum);
1045 strlcat(proppath, temp, proppath_size); 1047 strlcat(proppath, temp, proppath_size);
1046} 1048}
1047 1049
@@ -1105,22 +1107,32 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1105 drm_dp_port_teardown_pdt(port, old_pdt); 1107 drm_dp_port_teardown_pdt(port, old_pdt);
1106 1108
1107 ret = drm_dp_port_setup_pdt(port); 1109 ret = drm_dp_port_setup_pdt(port);
1108 if (ret == true) { 1110 if (ret == true)
1109 drm_dp_send_link_address(mstb->mgr, port->mstb); 1111 drm_dp_send_link_address(mstb->mgr, port->mstb);
1110 port->mstb->link_address_sent = true;
1111 }
1112 } 1112 }
1113 1113
1114 if (created && !port->input) { 1114 if (created && !port->input) {
1115 char proppath[255]; 1115 char proppath[255];
1116 build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
1117 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1118 1116
1119 if (port->port_num >= 8) { 1117 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
1118 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1119 if (!port->connector) {
1120 /* remove it from the port list */
1121 mutex_lock(&mstb->mgr->lock);
1122 list_del(&port->next);
1123 mutex_unlock(&mstb->mgr->lock);
1124 /* drop port list reference */
1125 drm_dp_put_port(port);
1126 goto out;
1127 }
1128 if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
1120 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); 1129 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1130 drm_mode_connector_set_tile_property(port->connector);
1121 } 1131 }
1132 (*mstb->mgr->cbs->register_connector)(port->connector);
1122 } 1133 }
1123 1134
1135out:
1124 /* put reference to this port */ 1136 /* put reference to this port */
1125 drm_dp_put_port(port); 1137 drm_dp_put_port(port);
1126} 1138}
@@ -1202,10 +1214,9 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
1202{ 1214{
1203 struct drm_dp_mst_port *port; 1215 struct drm_dp_mst_port *port;
1204 struct drm_dp_mst_branch *mstb_child; 1216 struct drm_dp_mst_branch *mstb_child;
1205 if (!mstb->link_address_sent) { 1217 if (!mstb->link_address_sent)
1206 drm_dp_send_link_address(mgr, mstb); 1218 drm_dp_send_link_address(mgr, mstb);
1207 mstb->link_address_sent = true; 1219
1208 }
1209 list_for_each_entry(port, &mstb->ports, next) { 1220 list_for_each_entry(port, &mstb->ports, next) {
1210 if (port->input) 1221 if (port->input)
1211 continue; 1222 continue;
@@ -1458,8 +1469,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1458 mutex_unlock(&mgr->qlock); 1469 mutex_unlock(&mgr->qlock);
1459} 1470}
1460 1471
1461static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 1472static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1462 struct drm_dp_mst_branch *mstb) 1473 struct drm_dp_mst_branch *mstb)
1463{ 1474{
1464 int len; 1475 int len;
1465 struct drm_dp_sideband_msg_tx *txmsg; 1476 struct drm_dp_sideband_msg_tx *txmsg;
@@ -1467,11 +1478,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1467 1478
1468 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1479 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1469 if (!txmsg) 1480 if (!txmsg)
1470 return -ENOMEM; 1481 return;
1471 1482
1472 txmsg->dst = mstb; 1483 txmsg->dst = mstb;
1473 len = build_link_address(txmsg); 1484 len = build_link_address(txmsg);
1474 1485
1486 mstb->link_address_sent = true;
1475 drm_dp_queue_down_tx(mgr, txmsg); 1487 drm_dp_queue_down_tx(mgr, txmsg);
1476 1488
1477 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 1489 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
@@ -1499,11 +1511,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1499 } 1511 }
1500 (*mgr->cbs->hotplug)(mgr); 1512 (*mgr->cbs->hotplug)(mgr);
1501 } 1513 }
1502 } else 1514 } else {
1515 mstb->link_address_sent = false;
1503 DRM_DEBUG_KMS("link address failed %d\n", ret); 1516 DRM_DEBUG_KMS("link address failed %d\n", ret);
1517 }
1504 1518
1505 kfree(txmsg); 1519 kfree(txmsg);
1506 return 0;
1507} 1520}
1508 1521
1509static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, 1522static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
@@ -1978,6 +1991,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
1978 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 1991 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1979 DP_MST_EN | DP_UPSTREAM_IS_SRC); 1992 DP_MST_EN | DP_UPSTREAM_IS_SRC);
1980 mutex_unlock(&mgr->lock); 1993 mutex_unlock(&mgr->lock);
1994 flush_work(&mgr->work);
1995 flush_work(&mgr->destroy_connector_work);
1981} 1996}
1982EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); 1997EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
1983 1998
@@ -2263,10 +2278,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
2263 2278
2264 if (port->cached_edid) 2279 if (port->cached_edid)
2265 edid = drm_edid_duplicate(port->cached_edid); 2280 edid = drm_edid_duplicate(port->cached_edid);
2266 else 2281 else {
2267 edid = drm_get_edid(connector, &port->aux.ddc); 2282 edid = drm_get_edid(connector, &port->aux.ddc);
2268 2283 drm_mode_connector_set_tile_property(connector);
2269 drm_mode_connector_set_tile_property(connector); 2284 }
2270 drm_dp_put_port(port); 2285 drm_dp_put_port(port);
2271 return edid; 2286 return edid;
2272} 2287}
@@ -2671,7 +2686,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
2671{ 2686{
2672 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); 2687 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
2673 struct drm_dp_mst_port *port; 2688 struct drm_dp_mst_port *port;
2674 2689 bool send_hotplug = false;
2675 /* 2690 /*
2676 * Not a regular list traverse as we have to drop the destroy 2691 * Not a regular list traverse as we have to drop the destroy
2677 * connector lock before destroying the connector, to avoid AB->BA 2692 * connector lock before destroying the connector, to avoid AB->BA
@@ -2694,7 +2709,10 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
2694 if (!port->input && port->vcpi.vcpi > 0) 2709 if (!port->input && port->vcpi.vcpi > 0)
2695 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 2710 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2696 kfree(port); 2711 kfree(port);
2712 send_hotplug = true;
2697 } 2713 }
2714 if (send_hotplug)
2715 (*mgr->cbs->hotplug)(mgr);
2698} 2716}
2699 2717
2700/** 2718/**
@@ -2747,6 +2765,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
2747 */ 2765 */
2748void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) 2766void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
2749{ 2767{
2768 flush_work(&mgr->work);
2750 flush_work(&mgr->destroy_connector_work); 2769 flush_work(&mgr->destroy_connector_work);
2751 mutex_lock(&mgr->payload_lock); 2770 mutex_lock(&mgr->payload_lock);
2752 kfree(mgr->payloads); 2771 kfree(mgr->payloads);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 418d299f3b12..ca08c472311b 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -345,7 +345,11 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
345 struct drm_crtc *crtc = mode_set->crtc; 345 struct drm_crtc *crtc = mode_set->crtc;
346 int ret; 346 int ret;
347 347
348 if (crtc->funcs->cursor_set) { 348 if (crtc->funcs->cursor_set2) {
349 ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
350 if (ret)
351 error = true;
352 } else if (crtc->funcs->cursor_set) {
349 ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0); 353 ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
350 if (ret) 354 if (ret)
351 error = true; 355 error = true;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 9a860ca1e9d7..d93e7378c077 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -520,7 +520,8 @@ EXPORT_SYMBOL(drm_ioctl_permit);
520 520
521/** Ioctl table */ 521/** Ioctl table */
522static const struct drm_ioctl_desc drm_ioctls[] = { 522static const struct drm_ioctl_desc drm_ioctls[] = {
523 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW), 523 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
524 DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
524 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 525 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
525 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 526 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
526 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), 527 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index d734780b31c0..a18164f2f6d2 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -94,7 +94,18 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
94} 94}
95 95
96#define DRM_OUTPUT_POLL_PERIOD (10*HZ) 96#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
97static void __drm_kms_helper_poll_enable(struct drm_device *dev) 97/**
98 * drm_kms_helper_poll_enable_locked - re-enable output polling.
99 * @dev: drm_device
100 *
101 * This function re-enables the output polling work without
102 * locking the mode_config mutex.
103 *
104 * This is like drm_kms_helper_poll_enable() however it is to be
105 * called from a context where the mode_config mutex is locked
106 * already.
107 */
108void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
98{ 109{
99 bool poll = false; 110 bool poll = false;
100 struct drm_connector *connector; 111 struct drm_connector *connector;
@@ -113,6 +124,8 @@ static void __drm_kms_helper_poll_enable(struct drm_device *dev)
113 if (poll) 124 if (poll)
114 schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); 125 schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
115} 126}
127EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
128
116 129
117static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, 130static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
118 uint32_t maxX, uint32_t maxY, bool merge_type_bits) 131 uint32_t maxX, uint32_t maxY, bool merge_type_bits)
@@ -174,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
174 187
175 /* Re-enable polling in case the global poll config changed. */ 188 /* Re-enable polling in case the global poll config changed. */
176 if (drm_kms_helper_poll != dev->mode_config.poll_running) 189 if (drm_kms_helper_poll != dev->mode_config.poll_running)
177 __drm_kms_helper_poll_enable(dev); 190 drm_kms_helper_poll_enable_locked(dev);
178 191
179 dev->mode_config.poll_running = drm_kms_helper_poll; 192 dev->mode_config.poll_running = drm_kms_helper_poll;
180 193
@@ -428,7 +441,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable);
428void drm_kms_helper_poll_enable(struct drm_device *dev) 441void drm_kms_helper_poll_enable(struct drm_device *dev)
429{ 442{
430 mutex_lock(&dev->mode_config.mutex); 443 mutex_lock(&dev->mode_config.mutex);
431 __drm_kms_helper_poll_enable(dev); 444 drm_kms_helper_poll_enable_locked(dev);
432 mutex_unlock(&dev->mode_config.mutex); 445 mutex_unlock(&dev->mode_config.mutex);
433} 446}
434EXPORT_SYMBOL(drm_kms_helper_poll_enable); 447EXPORT_SYMBOL(drm_kms_helper_poll_enable);
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index cbdb78ef3bac..e6cbaca821a4 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -37,7 +37,6 @@
37 * DECON stands for Display and Enhancement controller. 37 * DECON stands for Display and Enhancement controller.
38 */ 38 */
39 39
40#define DECON_DEFAULT_FRAMERATE 60
41#define MIN_FB_WIDTH_FOR_16WORD_BURST 128 40#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
42 41
43#define WINDOWS_NR 2 42#define WINDOWS_NR 2
@@ -165,16 +164,6 @@ static u32 decon_calc_clkdiv(struct decon_context *ctx,
165 return (clkdiv < 0x100) ? clkdiv : 0xff; 164 return (clkdiv < 0x100) ? clkdiv : 0xff;
166} 165}
167 166
168static bool decon_mode_fixup(struct exynos_drm_crtc *crtc,
169 const struct drm_display_mode *mode,
170 struct drm_display_mode *adjusted_mode)
171{
172 if (adjusted_mode->vrefresh == 0)
173 adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE;
174
175 return true;
176}
177
178static void decon_commit(struct exynos_drm_crtc *crtc) 167static void decon_commit(struct exynos_drm_crtc *crtc)
179{ 168{
180 struct decon_context *ctx = crtc->ctx; 169 struct decon_context *ctx = crtc->ctx;
@@ -637,7 +626,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
637static const struct exynos_drm_crtc_ops decon_crtc_ops = { 626static const struct exynos_drm_crtc_ops decon_crtc_ops = {
638 .enable = decon_enable, 627 .enable = decon_enable,
639 .disable = decon_disable, 628 .disable = decon_disable,
640 .mode_fixup = decon_mode_fixup,
641 .commit = decon_commit, 629 .commit = decon_commit,
642 .enable_vblank = decon_enable_vblank, 630 .enable_vblank = decon_enable_vblank,
643 .disable_vblank = decon_disable_vblank, 631 .disable_vblank = decon_disable_vblank,
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index d66ade0efac8..124fb9a56f02 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -1383,28 +1383,6 @@ static int exynos_dp_remove(struct platform_device *pdev)
1383 return 0; 1383 return 0;
1384} 1384}
1385 1385
1386#ifdef CONFIG_PM_SLEEP
1387static int exynos_dp_suspend(struct device *dev)
1388{
1389 struct exynos_dp_device *dp = dev_get_drvdata(dev);
1390
1391 exynos_dp_disable(&dp->encoder);
1392 return 0;
1393}
1394
1395static int exynos_dp_resume(struct device *dev)
1396{
1397 struct exynos_dp_device *dp = dev_get_drvdata(dev);
1398
1399 exynos_dp_enable(&dp->encoder);
1400 return 0;
1401}
1402#endif
1403
1404static const struct dev_pm_ops exynos_dp_pm_ops = {
1405 SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume)
1406};
1407
1408static const struct of_device_id exynos_dp_match[] = { 1386static const struct of_device_id exynos_dp_match[] = {
1409 { .compatible = "samsung,exynos5-dp" }, 1387 { .compatible = "samsung,exynos5-dp" },
1410 {}, 1388 {},
@@ -1417,7 +1395,6 @@ struct platform_driver dp_driver = {
1417 .driver = { 1395 .driver = {
1418 .name = "exynos-dp", 1396 .name = "exynos-dp",
1419 .owner = THIS_MODULE, 1397 .owner = THIS_MODULE,
1420 .pm = &exynos_dp_pm_ops,
1421 .of_match_table = exynos_dp_match, 1398 .of_match_table = exynos_dp_match,
1422 }, 1399 },
1423}; 1400};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index c68a6a2a9b57..7f55ba6771c6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -28,7 +28,6 @@ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
28 28
29 return 0; 29 return 0;
30} 30}
31EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
32 31
33int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) 32int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
34{ 33{
@@ -39,7 +38,6 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
39 38
40 return 0; 39 return 0;
41} 40}
42EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
43 41
44int exynos_drm_device_subdrv_probe(struct drm_device *dev) 42int exynos_drm_device_subdrv_probe(struct drm_device *dev)
45{ 43{
@@ -69,7 +67,6 @@ int exynos_drm_device_subdrv_probe(struct drm_device *dev)
69 67
70 return 0; 68 return 0;
71} 69}
72EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe);
73 70
74int exynos_drm_device_subdrv_remove(struct drm_device *dev) 71int exynos_drm_device_subdrv_remove(struct drm_device *dev)
75{ 72{
@@ -87,7 +84,6 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev)
87 84
88 return 0; 85 return 0;
89} 86}
90EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove);
91 87
92int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) 88int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
93{ 89{
@@ -111,7 +107,6 @@ err:
111 } 107 }
112 return ret; 108 return ret;
113} 109}
114EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open);
115 110
116void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) 111void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
117{ 112{
@@ -122,4 +117,3 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
122 subdrv->close(dev, subdrv->dev, file); 117 subdrv->close(dev, subdrv->dev, file);
123 } 118 }
124} 119}
125EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 0872aa2f450f..ed28823d3b35 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -41,20 +41,6 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
41 exynos_crtc->ops->disable(exynos_crtc); 41 exynos_crtc->ops->disable(exynos_crtc);
42} 42}
43 43
44static bool
45exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
46 const struct drm_display_mode *mode,
47 struct drm_display_mode *adjusted_mode)
48{
49 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
50
51 if (exynos_crtc->ops->mode_fixup)
52 return exynos_crtc->ops->mode_fixup(exynos_crtc, mode,
53 adjusted_mode);
54
55 return true;
56}
57
58static void 44static void
59exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) 45exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
60{ 46{
@@ -99,7 +85,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
99static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { 85static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
100 .enable = exynos_drm_crtc_enable, 86 .enable = exynos_drm_crtc_enable,
101 .disable = exynos_drm_crtc_disable, 87 .disable = exynos_drm_crtc_disable,
102 .mode_fixup = exynos_drm_crtc_mode_fixup,
103 .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, 88 .mode_set_nofb = exynos_drm_crtc_mode_set_nofb,
104 .atomic_begin = exynos_crtc_atomic_begin, 89 .atomic_begin = exynos_crtc_atomic_begin,
105 .atomic_flush = exynos_crtc_atomic_flush, 90 .atomic_flush = exynos_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 831d2e4cacf9..ae9e6b2d3758 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -304,6 +304,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
304 return 0; 304 return 0;
305} 305}
306 306
307#ifdef CONFIG_PM_SLEEP
307static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state) 308static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
308{ 309{
309 struct drm_connector *connector; 310 struct drm_connector *connector;
@@ -340,6 +341,7 @@ static int exynos_drm_resume(struct drm_device *dev)
340 341
341 return 0; 342 return 0;
342} 343}
344#endif
343 345
344static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) 346static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
345{ 347{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index b7ba21dfb696..6c717ba672db 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -82,7 +82,6 @@ struct exynos_drm_plane {
82 * 82 *
83 * @enable: enable the device 83 * @enable: enable the device
84 * @disable: disable the device 84 * @disable: disable the device
85 * @mode_fixup: fix mode data before applying it
86 * @commit: set current hw specific display mode to hw. 85 * @commit: set current hw specific display mode to hw.
87 * @enable_vblank: specific driver callback for enabling vblank interrupt. 86 * @enable_vblank: specific driver callback for enabling vblank interrupt.
88 * @disable_vblank: specific driver callback for disabling vblank interrupt. 87 * @disable_vblank: specific driver callback for disabling vblank interrupt.
@@ -103,9 +102,6 @@ struct exynos_drm_crtc;
103struct exynos_drm_crtc_ops { 102struct exynos_drm_crtc_ops {
104 void (*enable)(struct exynos_drm_crtc *crtc); 103 void (*enable)(struct exynos_drm_crtc *crtc);
105 void (*disable)(struct exynos_drm_crtc *crtc); 104 void (*disable)(struct exynos_drm_crtc *crtc);
106 bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
107 const struct drm_display_mode *mode,
108 struct drm_display_mode *adjusted_mode);
109 void (*commit)(struct exynos_drm_crtc *crtc); 105 void (*commit)(struct exynos_drm_crtc *crtc);
110 int (*enable_vblank)(struct exynos_drm_crtc *crtc); 106 int (*enable_vblank)(struct exynos_drm_crtc *crtc);
111 void (*disable_vblank)(struct exynos_drm_crtc *crtc); 107 void (*disable_vblank)(struct exynos_drm_crtc *crtc);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 2a652359af64..dd3a5e6d58c8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1206,23 +1206,6 @@ static struct exynos_drm_ipp_ops fimc_dst_ops = {
1206 .set_addr = fimc_dst_set_addr, 1206 .set_addr = fimc_dst_set_addr,
1207}; 1207};
1208 1208
1209static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
1210{
1211 DRM_DEBUG_KMS("enable[%d]\n", enable);
1212
1213 if (enable) {
1214 clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
1215 clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
1216 ctx->suspended = false;
1217 } else {
1218 clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
1219 clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
1220 ctx->suspended = true;
1221 }
1222
1223 return 0;
1224}
1225
1226static irqreturn_t fimc_irq_handler(int irq, void *dev_id) 1209static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
1227{ 1210{
1228 struct fimc_context *ctx = dev_id; 1211 struct fimc_context *ctx = dev_id;
@@ -1780,6 +1763,24 @@ static int fimc_remove(struct platform_device *pdev)
1780 return 0; 1763 return 0;
1781} 1764}
1782 1765
1766#ifdef CONFIG_PM
1767static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
1768{
1769 DRM_DEBUG_KMS("enable[%d]\n", enable);
1770
1771 if (enable) {
1772 clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
1773 clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
1774 ctx->suspended = false;
1775 } else {
1776 clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
1777 clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
1778 ctx->suspended = true;
1779 }
1780
1781 return 0;
1782}
1783
1783#ifdef CONFIG_PM_SLEEP 1784#ifdef CONFIG_PM_SLEEP
1784static int fimc_suspend(struct device *dev) 1785static int fimc_suspend(struct device *dev)
1785{ 1786{
@@ -1806,7 +1807,6 @@ static int fimc_resume(struct device *dev)
1806} 1807}
1807#endif 1808#endif
1808 1809
1809#ifdef CONFIG_PM
1810static int fimc_runtime_suspend(struct device *dev) 1810static int fimc_runtime_suspend(struct device *dev)
1811{ 1811{
1812 struct fimc_context *ctx = get_fimc_context(dev); 1812 struct fimc_context *ctx = get_fimc_context(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 750a9e6b9e8d..3d1aba67758b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -41,7 +41,6 @@
41 * CPU Interface. 41 * CPU Interface.
42 */ 42 */
43 43
44#define FIMD_DEFAULT_FRAMERATE 60
45#define MIN_FB_WIDTH_FOR_16WORD_BURST 128 44#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
46 45
47/* position control register for hardware window 0, 2 ~ 4.*/ 46/* position control register for hardware window 0, 2 ~ 4.*/
@@ -377,16 +376,6 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
377 return (clkdiv < 0x100) ? clkdiv : 0xff; 376 return (clkdiv < 0x100) ? clkdiv : 0xff;
378} 377}
379 378
380static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc,
381 const struct drm_display_mode *mode,
382 struct drm_display_mode *adjusted_mode)
383{
384 if (adjusted_mode->vrefresh == 0)
385 adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE;
386
387 return true;
388}
389
390static void fimd_commit(struct exynos_drm_crtc *crtc) 379static void fimd_commit(struct exynos_drm_crtc *crtc)
391{ 380{
392 struct fimd_context *ctx = crtc->ctx; 381 struct fimd_context *ctx = crtc->ctx;
@@ -882,13 +871,12 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
882 return; 871 return;
883 872
884 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; 873 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
885 writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON); 874 writel(val, ctx->regs + DP_MIE_CLKCON);
886} 875}
887 876
888static const struct exynos_drm_crtc_ops fimd_crtc_ops = { 877static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
889 .enable = fimd_enable, 878 .enable = fimd_enable,
890 .disable = fimd_disable, 879 .disable = fimd_disable,
891 .mode_fixup = fimd_mode_fixup,
892 .commit = fimd_commit, 880 .commit = fimd_commit,
893 .enable_vblank = fimd_enable_vblank, 881 .enable_vblank = fimd_enable_vblank,
894 .disable_vblank = fimd_disable_vblank, 882 .disable_vblank = fimd_disable_vblank,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 3734c34aed16..c17efdb238a6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1059,7 +1059,6 @@ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
1059 1059
1060 return 0; 1060 return 0;
1061} 1061}
1062EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl);
1063 1062
1064int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, 1063int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
1065 struct drm_file *file) 1064 struct drm_file *file)
@@ -1230,7 +1229,6 @@ err:
1230 g2d_put_cmdlist(g2d, node); 1229 g2d_put_cmdlist(g2d, node);
1231 return ret; 1230 return ret;
1232} 1231}
1233EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl);
1234 1232
1235int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, 1233int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
1236 struct drm_file *file) 1234 struct drm_file *file)
@@ -1293,7 +1291,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
1293out: 1291out:
1294 return 0; 1292 return 0;
1295} 1293}
1296EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
1297 1294
1298static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) 1295static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1299{ 1296{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index f12fbc36b120..407afedb6003 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -56,39 +56,35 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
56 nr_pages = obj->size >> PAGE_SHIFT; 56 nr_pages = obj->size >> PAGE_SHIFT;
57 57
58 if (!is_drm_iommu_supported(dev)) { 58 if (!is_drm_iommu_supported(dev)) {
59 dma_addr_t start_addr;
60 unsigned int i = 0;
61
62 obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); 59 obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
63 if (!obj->pages) { 60 if (!obj->pages) {
64 DRM_ERROR("failed to allocate pages.\n"); 61 DRM_ERROR("failed to allocate pages.\n");
65 return -ENOMEM; 62 return -ENOMEM;
66 } 63 }
64 }
67 65
68 obj->cookie = dma_alloc_attrs(dev->dev, 66 obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr,
69 obj->size, 67 GFP_KERNEL, &obj->dma_attrs);
70 &obj->dma_addr, GFP_KERNEL, 68 if (!obj->cookie) {
71 &obj->dma_attrs); 69 DRM_ERROR("failed to allocate buffer.\n");
72 if (!obj->cookie) { 70 if (obj->pages)
73 DRM_ERROR("failed to allocate buffer.\n");
74 drm_free_large(obj->pages); 71 drm_free_large(obj->pages);
75 return -ENOMEM; 72 return -ENOMEM;
76 } 73 }
74
75 if (obj->pages) {
76 dma_addr_t start_addr;
77 unsigned int i = 0;
77 78
78 start_addr = obj->dma_addr; 79 start_addr = obj->dma_addr;
79 while (i < nr_pages) { 80 while (i < nr_pages) {
80 obj->pages[i] = phys_to_page(start_addr); 81 obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev,
82 start_addr));
81 start_addr += PAGE_SIZE; 83 start_addr += PAGE_SIZE;
82 i++; 84 i++;
83 } 85 }
84 } else { 86 } else {
85 obj->pages = dma_alloc_attrs(dev->dev, obj->size, 87 obj->pages = obj->cookie;
86 &obj->dma_addr, GFP_KERNEL,
87 &obj->dma_attrs);
88 if (!obj->pages) {
89 DRM_ERROR("failed to allocate buffer.\n");
90 return -ENOMEM;
91 }
92 } 88 }
93 89
94 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", 90 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
@@ -110,15 +106,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
110 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", 106 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
111 (unsigned long)obj->dma_addr, obj->size); 107 (unsigned long)obj->dma_addr, obj->size);
112 108
113 if (!is_drm_iommu_supported(dev)) { 109 dma_free_attrs(dev->dev, obj->size, obj->cookie,
114 dma_free_attrs(dev->dev, obj->size, obj->cookie, 110 (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
115 (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
116 drm_free_large(obj->pages);
117 } else
118 dma_free_attrs(dev->dev, obj->size, obj->pages,
119 (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
120 111
121 obj->dma_addr = (dma_addr_t)NULL; 112 if (!is_drm_iommu_supported(dev))
113 drm_free_large(obj->pages);
122} 114}
123 115
124static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, 116static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -156,18 +148,14 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
156 * once dmabuf's refcount becomes 0. 148 * once dmabuf's refcount becomes 0.
157 */ 149 */
158 if (obj->import_attach) 150 if (obj->import_attach)
159 goto out; 151 drm_prime_gem_destroy(obj, exynos_gem_obj->sgt);
160 152 else
161 exynos_drm_free_buf(exynos_gem_obj); 153 exynos_drm_free_buf(exynos_gem_obj);
162
163out:
164 drm_gem_free_mmap_offset(obj);
165 154
166 /* release file pointer to gem object. */ 155 /* release file pointer to gem object. */
167 drm_gem_object_release(obj); 156 drm_gem_object_release(obj);
168 157
169 kfree(exynos_gem_obj); 158 kfree(exynos_gem_obj);
170 exynos_gem_obj = NULL;
171} 159}
172 160
173unsigned long exynos_drm_gem_get_size(struct drm_device *dev, 161unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
@@ -190,8 +178,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
190 return exynos_gem_obj->size; 178 return exynos_gem_obj->size;
191} 179}
192 180
193 181static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
194struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
195 unsigned long size) 182 unsigned long size)
196{ 183{
197 struct exynos_drm_gem_obj *exynos_gem_obj; 184 struct exynos_drm_gem_obj *exynos_gem_obj;
@@ -212,6 +199,13 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
212 return ERR_PTR(ret); 199 return ERR_PTR(ret);
213 } 200 }
214 201
202 ret = drm_gem_create_mmap_offset(obj);
203 if (ret < 0) {
204 drm_gem_object_release(obj);
205 kfree(exynos_gem_obj);
206 return ERR_PTR(ret);
207 }
208
215 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); 209 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
216 210
217 return exynos_gem_obj; 211 return exynos_gem_obj;
@@ -313,7 +307,7 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
313 drm_gem_object_unreference_unlocked(obj); 307 drm_gem_object_unreference_unlocked(obj);
314} 308}
315 309
316int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, 310static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
317 struct vm_area_struct *vma) 311 struct vm_area_struct *vma)
318{ 312{
319 struct drm_device *drm_dev = exynos_gem_obj->base.dev; 313 struct drm_device *drm_dev = exynos_gem_obj->base.dev;
@@ -342,7 +336,8 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
342 336
343int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 337int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
344 struct drm_file *file_priv) 338 struct drm_file *file_priv)
345{ struct exynos_drm_gem_obj *exynos_gem_obj; 339{
340 struct exynos_drm_gem_obj *exynos_gem_obj;
346 struct drm_exynos_gem_info *args = data; 341 struct drm_exynos_gem_info *args = data;
347 struct drm_gem_object *obj; 342 struct drm_gem_object *obj;
348 343
@@ -402,6 +397,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
402 struct drm_mode_create_dumb *args) 397 struct drm_mode_create_dumb *args)
403{ 398{
404 struct exynos_drm_gem_obj *exynos_gem_obj; 399 struct exynos_drm_gem_obj *exynos_gem_obj;
400 unsigned int flags;
405 int ret; 401 int ret;
406 402
407 /* 403 /*
@@ -413,16 +409,12 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
413 args->pitch = args->width * ((args->bpp + 7) / 8); 409 args->pitch = args->width * ((args->bpp + 7) / 8);
414 args->size = args->pitch * args->height; 410 args->size = args->pitch * args->height;
415 411
416 if (is_drm_iommu_supported(dev)) { 412 if (is_drm_iommu_supported(dev))
417 exynos_gem_obj = exynos_drm_gem_create(dev, 413 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
418 EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC, 414 else
419 args->size); 415 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
420 } else {
421 exynos_gem_obj = exynos_drm_gem_create(dev,
422 EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
423 args->size);
424 }
425 416
417 exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size);
426 if (IS_ERR(exynos_gem_obj)) { 418 if (IS_ERR(exynos_gem_obj)) {
427 dev_warn(dev->dev, "FB allocation failed.\n"); 419 dev_warn(dev->dev, "FB allocation failed.\n");
428 return PTR_ERR(exynos_gem_obj); 420 return PTR_ERR(exynos_gem_obj);
@@ -460,14 +452,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
460 goto unlock; 452 goto unlock;
461 } 453 }
462 454
463 ret = drm_gem_create_mmap_offset(obj);
464 if (ret)
465 goto out;
466
467 *offset = drm_vma_node_offset_addr(&obj->vma_node); 455 *offset = drm_vma_node_offset_addr(&obj->vma_node);
468 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); 456 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
469 457
470out:
471 drm_gem_object_unreference(obj); 458 drm_gem_object_unreference(obj);
472unlock: 459unlock:
473 mutex_unlock(&dev->struct_mutex); 460 mutex_unlock(&dev->struct_mutex);
@@ -543,7 +530,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
543 530
544err_close_vm: 531err_close_vm:
545 drm_gem_vm_close(vma); 532 drm_gem_vm_close(vma);
546 drm_gem_free_mmap_offset(obj);
547 533
548 return ret; 534 return ret;
549} 535}
@@ -588,6 +574,8 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
588 if (ret < 0) 574 if (ret < 0)
589 goto err_free_large; 575 goto err_free_large;
590 576
577 exynos_gem_obj->sgt = sgt;
578
591 if (sgt->nents == 1) { 579 if (sgt->nents == 1) {
592 /* always physically continuous memory if sgt->nents is 1. */ 580 /* always physically continuous memory if sgt->nents is 1. */
593 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; 581 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index cd62f8410d1e..b62d1007c0e0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -39,6 +39,7 @@
39 * - this address could be physical address without IOMMU and 39 * - this address could be physical address without IOMMU and
40 * device address with IOMMU. 40 * device address with IOMMU.
41 * @pages: Array of backing pages. 41 * @pages: Array of backing pages.
42 * @sgt: Imported sg_table.
42 * 43 *
43 * P.S. this object would be transferred to user as kms_bo.handle so 44 * P.S. this object would be transferred to user as kms_bo.handle so
44 * user can access the buffer through kms_bo.handle. 45 * user can access the buffer through kms_bo.handle.
@@ -52,6 +53,7 @@ struct exynos_drm_gem_obj {
52 dma_addr_t dma_addr; 53 dma_addr_t dma_addr;
53 struct dma_attrs dma_attrs; 54 struct dma_attrs dma_attrs;
54 struct page **pages; 55 struct page **pages;
56 struct sg_table *sgt;
55}; 57};
56 58
57struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); 59struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
@@ -59,10 +61,6 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
59/* destroy a buffer with gem object */ 61/* destroy a buffer with gem object */
60void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj); 62void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
61 63
62/* create a private gem object and initialize it. */
63struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
64 unsigned long size);
65
66/* create a new buffer with gem object */ 64/* create a new buffer with gem object */
67struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, 65struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
68 unsigned int flags, 66 unsigned int flags,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 425e70625388..2f5c118f4c8e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -786,6 +786,7 @@ static int rotator_remove(struct platform_device *pdev)
786 return 0; 786 return 0;
787} 787}
788 788
789#ifdef CONFIG_PM
789static int rotator_clk_crtl(struct rot_context *rot, bool enable) 790static int rotator_clk_crtl(struct rot_context *rot, bool enable)
790{ 791{
791 if (enable) { 792 if (enable) {
@@ -822,7 +823,6 @@ static int rotator_resume(struct device *dev)
822} 823}
823#endif 824#endif
824 825
825#ifdef CONFIG_PM
826static int rotator_runtime_suspend(struct device *dev) 826static int rotator_runtime_suspend(struct device *dev)
827{ 827{
828 struct rot_context *rot = dev_get_drvdata(dev); 828 struct rot_context *rot = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 82be6b86a168..d1e300dcd544 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -58,7 +58,8 @@ static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane,
58 struct drm_plane_state *old_state) 58 struct drm_plane_state *old_state)
59{ 59{
60 struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private; 60 struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
61 unsigned int index, value, ret; 61 unsigned int value;
62 int index, ret;
62 63
63 index = fsl_dcu_drm_plane_index(plane); 64 index = fsl_dcu_drm_plane_index(plane);
64 if (index < 0) 65 if (index < 0)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5a244ab9395b..39d73dbc1c47 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -640,6 +640,32 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
640 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 640 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
641 641
642 /* 642 /*
643 * On HSW, the DSL reg (0x70000) appears to return 0 if we
644 * read it just before the start of vblank. So try it again
645 * so we don't accidentally end up spanning a vblank frame
646 * increment, causing the pipe_update_end() code to squak at us.
647 *
648 * The nature of this problem means we can't simply check the ISR
649 * bit and return the vblank start value; nor can we use the scanline
650 * debug register in the transcoder as it appears to have the same
651 * problem. We may need to extend this to include other platforms,
652 * but so far testing only shows the problem on HSW.
653 */
654 if (IS_HASWELL(dev) && !position) {
655 int i, temp;
656
657 for (i = 0; i < 100; i++) {
658 udelay(1);
659 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
660 DSL_LINEMASK_GEN3;
661 if (temp != position) {
662 position = temp;
663 break;
664 }
665 }
666 }
667
668 /*
643 * See update_scanline_offset() for the details on the 669 * See update_scanline_offset() for the details on the
644 * scanline_offset adjustment. 670 * scanline_offset adjustment.
645 */ 671 */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 89c1a8ce1f98..2a5c76faf9f8 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -430,7 +430,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
430 430
431/** 431/**
432 * intel_audio_codec_disable - Disable the audio codec for HD audio 432 * intel_audio_codec_disable - Disable the audio codec for HD audio
433 * @encoder: encoder on which to disable audio 433 * @intel_encoder: encoder on which to disable audio
434 * 434 *
435 * The disable sequences must be performed before disabling the transcoder or 435 * The disable sequences must be performed before disabling the transcoder or
436 * port. 436 * port.
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b3e437b3bb54..c19e669ffe50 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -42,7 +42,7 @@ find_section(const void *_bdb, int section_id)
42 const struct bdb_header *bdb = _bdb; 42 const struct bdb_header *bdb = _bdb;
43 const u8 *base = _bdb; 43 const u8 *base = _bdb;
44 int index = 0; 44 int index = 0;
45 u16 total, current_size; 45 u32 total, current_size;
46 u8 current_id; 46 u8 current_id;
47 47
48 /* skip to first section */ 48 /* skip to first section */
@@ -57,6 +57,10 @@ find_section(const void *_bdb, int section_id)
57 current_size = *((const u16 *)(base + index)); 57 current_size = *((const u16 *)(base + index));
58 index += 2; 58 index += 2;
59 59
60 /* The MIPI Sequence Block v3+ has a separate size field. */
61 if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
62 current_size = *((const u32 *)(base + index + 1));
63
60 if (index + current_size > total) 64 if (index + current_size > total)
61 return NULL; 65 return NULL;
62 66
@@ -799,6 +803,12 @@ parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
799 return; 803 return;
800 } 804 }
801 805
806 /* Fail gracefully for forward incompatible sequence block. */
807 if (sequence->version >= 3) {
808 DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
809 return;
810 }
811
802 DRM_DEBUG_DRIVER("Found MIPI sequence block\n"); 812 DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
803 813
804 block_size = get_blocksize(sequence); 814 block_size = get_blocksize(sequence);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8cc9264f7809..cf418be7d30a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15087,9 +15087,12 @@ static void readout_plane_state(struct intel_crtc *crtc,
15087 15087
15088 plane_state = to_intel_plane_state(p->base.state); 15088 plane_state = to_intel_plane_state(p->base.state);
15089 15089
15090 if (p->base.type == DRM_PLANE_TYPE_PRIMARY) 15090 if (p->base.type == DRM_PLANE_TYPE_PRIMARY) {
15091 plane_state->visible = primary_get_hw_state(crtc); 15091 plane_state->visible = primary_get_hw_state(crtc);
15092 else { 15092 if (plane_state->visible)
15093 crtc->base.state->plane_mask |=
15094 1 << drm_plane_index(&p->base);
15095 } else {
15093 if (active) 15096 if (active)
15094 p->disable_plane(&p->base, &crtc->base); 15097 p->disable_plane(&p->base, &crtc->base);
15095 15098
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 3e4be5a3becd..6ade06888432 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -462,11 +462,17 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
462 drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); 462 drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
463 463
464 drm_mode_connector_set_path_property(connector, pathprop); 464 drm_mode_connector_set_path_property(connector, pathprop);
465 return connector;
466}
467
468static void intel_dp_register_mst_connector(struct drm_connector *connector)
469{
470 struct intel_connector *intel_connector = to_intel_connector(connector);
471 struct drm_device *dev = connector->dev;
465 drm_modeset_lock_all(dev); 472 drm_modeset_lock_all(dev);
466 intel_connector_add_to_fbdev(intel_connector); 473 intel_connector_add_to_fbdev(intel_connector);
467 drm_modeset_unlock_all(dev); 474 drm_modeset_unlock_all(dev);
468 drm_connector_register(&intel_connector->base); 475 drm_connector_register(&intel_connector->base);
469 return connector;
470} 476}
471 477
472static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, 478static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
@@ -512,6 +518,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
512 518
513static struct drm_dp_mst_topology_cbs mst_cbs = { 519static struct drm_dp_mst_topology_cbs mst_cbs = {
514 .add_connector = intel_dp_add_mst_connector, 520 .add_connector = intel_dp_add_mst_connector,
521 .register_connector = intel_dp_register_mst_connector,
515 .destroy_connector = intel_dp_destroy_mst_connector, 522 .destroy_connector = intel_dp_destroy_mst_connector,
516 .hotplug = intel_dp_mst_hotplug, 523 .hotplug = intel_dp_mst_hotplug,
517}; 524};
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 53c0173a39fe..b17785719598 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
180 180
181 /* Enable polling and queue hotplug re-enabling. */ 181 /* Enable polling and queue hotplug re-enabling. */
182 if (hpd_disabled) { 182 if (hpd_disabled) {
183 drm_kms_helper_poll_enable(dev); 183 drm_kms_helper_poll_enable_locked(dev);
184 mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, 184 mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
185 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); 185 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
186 } 186 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 72e0edd7bbde..7412caedcf7f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -484,18 +484,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
484 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); 484 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
485 485
486 read_pointer = ring->next_context_status_buffer; 486 read_pointer = ring->next_context_status_buffer;
487 write_pointer = status_pointer & 0x07; 487 write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
488 if (read_pointer > write_pointer) 488 if (read_pointer > write_pointer)
489 write_pointer += 6; 489 write_pointer += GEN8_CSB_ENTRIES;
490 490
491 spin_lock(&ring->execlist_lock); 491 spin_lock(&ring->execlist_lock);
492 492
493 while (read_pointer < write_pointer) { 493 while (read_pointer < write_pointer) {
494 read_pointer++; 494 read_pointer++;
495 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 495 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
496 (read_pointer % 6) * 8); 496 (read_pointer % GEN8_CSB_ENTRIES) * 8);
497 status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 497 status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
498 (read_pointer % 6) * 8 + 4); 498 (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4);
499 499
500 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) 500 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
501 continue; 501 continue;
@@ -521,10 +521,12 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
521 spin_unlock(&ring->execlist_lock); 521 spin_unlock(&ring->execlist_lock);
522 522
523 WARN(submit_contexts > 2, "More than two context complete events?\n"); 523 WARN(submit_contexts > 2, "More than two context complete events?\n");
524 ring->next_context_status_buffer = write_pointer % 6; 524 ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
525 525
526 I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), 526 I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
527 _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8)); 527 _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
528 ((u32)ring->next_context_status_buffer &
529 GEN8_CSB_PTR_MASK) << 8));
528} 530}
529 531
530static int execlists_context_queue(struct drm_i915_gem_request *request) 532static int execlists_context_queue(struct drm_i915_gem_request *request)
@@ -1422,6 +1424,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
1422{ 1424{
1423 struct drm_device *dev = ring->dev; 1425 struct drm_device *dev = ring->dev;
1424 struct drm_i915_private *dev_priv = dev->dev_private; 1426 struct drm_i915_private *dev_priv = dev->dev_private;
1427 u8 next_context_status_buffer_hw;
1425 1428
1426 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); 1429 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
1427 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); 1430 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
@@ -1436,7 +1439,29 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
1436 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | 1439 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1437 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); 1440 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1438 POSTING_READ(RING_MODE_GEN7(ring)); 1441 POSTING_READ(RING_MODE_GEN7(ring));
1439 ring->next_context_status_buffer = 0; 1442
1443 /*
1444 * Instead of resetting the Context Status Buffer (CSB) read pointer to
1445 * zero, we need to read the write pointer from hardware and use its
1446 * value because "this register is power context save restored".
1447 * Effectively, these states have been observed:
1448 *
1449 * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
1450 * BDW | CSB regs not reset | CSB regs reset |
1451 * CHT | CSB regs not reset | CSB regs not reset |
1452 */
1453 next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
1454 & GEN8_CSB_PTR_MASK);
1455
1456 /*
1457 * When the CSB registers are reset (also after power-up / gpu reset),
1458 * CSB write pointer is set to all 1's, which is not valid, use '5' in
1459 * this special case, so the first element read is CSB[0].
1460 */
1461 if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
1462 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
1463
1464 ring->next_context_status_buffer = next_context_status_buffer_hw;
1440 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); 1465 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
1441 1466
1442 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 1467 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 64f89f9982a2..3c63bb32ad81 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -25,6 +25,8 @@
25#define _INTEL_LRC_H_ 25#define _INTEL_LRC_H_
26 26
27#define GEN8_LR_CONTEXT_ALIGN 4096 27#define GEN8_LR_CONTEXT_ALIGN 4096
28#define GEN8_CSB_ENTRIES 6
29#define GEN8_CSB_PTR_MASK 0x07
28 30
29/* Execlists regs */ 31/* Execlists regs */
30#define RING_ELSP(ring) ((ring)->mmio_base+0x230) 32#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index af7fdb3bd663..7401cf90b0db 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -246,7 +246,8 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
246 } 246 }
247 247
248 if (power_well->data == SKL_DISP_PW_1) { 248 if (power_well->data == SKL_DISP_PW_1) {
249 intel_prepare_ddi(dev); 249 if (!dev_priv->power_domains.initializing)
250 intel_prepare_ddi(dev);
250 gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); 251 gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
251 } 252 }
252} 253}
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 87de15ea1f93..b35b5b2db4ec 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -186,17 +186,19 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
186 186
187 sysram = vmalloc(size); 187 sysram = vmalloc(size);
188 if (!sysram) 188 if (!sysram)
189 return -ENOMEM; 189 goto err_sysram;
190 190
191 info = drm_fb_helper_alloc_fbi(helper); 191 info = drm_fb_helper_alloc_fbi(helper);
192 if (IS_ERR(info)) 192 if (IS_ERR(info)) {
193 return PTR_ERR(info); 193 ret = PTR_ERR(info);
194 goto err_alloc_fbi;
195 }
194 196
195 info->par = mfbdev; 197 info->par = mfbdev;
196 198
197 ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj); 199 ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
198 if (ret) 200 if (ret)
199 return ret; 201 goto err_framebuffer_init;
200 202
201 mfbdev->sysram = sysram; 203 mfbdev->sysram = sysram;
202 mfbdev->size = size; 204 mfbdev->size = size;
@@ -225,7 +227,17 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
225 227
226 DRM_DEBUG_KMS("allocated %dx%d\n", 228 DRM_DEBUG_KMS("allocated %dx%d\n",
227 fb->width, fb->height); 229 fb->width, fb->height);
230
228 return 0; 231 return 0;
232
233err_framebuffer_init:
234 drm_fb_helper_release_fbi(helper);
235err_alloc_fbi:
236 vfree(sysram);
237err_sysram:
238 drm_gem_object_unreference_unlocked(gobj);
239
240 return ret;
229} 241}
230 242
231static int mga_fbdev_destroy(struct drm_device *dev, 243static int mga_fbdev_destroy(struct drm_device *dev,
@@ -276,23 +288,26 @@ int mgag200_fbdev_init(struct mga_device *mdev)
276 ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper, 288 ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
277 mdev->num_crtc, MGAG200FB_CONN_LIMIT); 289 mdev->num_crtc, MGAG200FB_CONN_LIMIT);
278 if (ret) 290 if (ret)
279 return ret; 291 goto err_fb_helper;
280 292
281 ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper); 293 ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
282 if (ret) 294 if (ret)
283 goto fini; 295 goto err_fb_setup;
284 296
285 /* disable all the possible outputs/crtcs before entering KMS mode */ 297 /* disable all the possible outputs/crtcs before entering KMS mode */
286 drm_helper_disable_unused_functions(mdev->dev); 298 drm_helper_disable_unused_functions(mdev->dev);
287 299
288 ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel); 300 ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
289 if (ret) 301 if (ret)
290 goto fini; 302 goto err_fb_setup;
291 303
292 return 0; 304 return 0;
293 305
294fini: 306err_fb_setup:
295 drm_fb_helper_fini(&mfbdev->helper); 307 drm_fb_helper_fini(&mfbdev->helper);
308err_fb_helper:
309 mdev->mfbdev = NULL;
310
296 return ret; 311 return ret;
297} 312}
298 313
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index de06388069e7..b1a0f5656175 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -220,7 +220,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
220 } 220 }
221 r = mgag200_mm_init(mdev); 221 r = mgag200_mm_init(mdev);
222 if (r) 222 if (r)
223 goto out; 223 goto err_mm;
224 224
225 drm_mode_config_init(dev); 225 drm_mode_config_init(dev);
226 dev->mode_config.funcs = (void *)&mga_mode_funcs; 226 dev->mode_config.funcs = (void *)&mga_mode_funcs;
@@ -233,7 +233,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
233 r = mgag200_modeset_init(mdev); 233 r = mgag200_modeset_init(mdev);
234 if (r) { 234 if (r) {
235 dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r); 235 dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
236 goto out; 236 goto err_modeset;
237 } 237 }
238 238
239 /* Make small buffers to store a hardware cursor (double buffered icon updates) */ 239 /* Make small buffers to store a hardware cursor (double buffered icon updates) */
@@ -241,20 +241,24 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
241 &mdev->cursor.pixels_1); 241 &mdev->cursor.pixels_1);
242 mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0, 242 mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0,
243 &mdev->cursor.pixels_2); 243 &mdev->cursor.pixels_2);
244 if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1) 244 if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1) {
245 goto cursor_nospace; 245 mdev->cursor.pixels_1 = NULL;
246 mdev->cursor.pixels_current = mdev->cursor.pixels_1; 246 mdev->cursor.pixels_2 = NULL;
247 mdev->cursor.pixels_prev = mdev->cursor.pixels_2; 247 dev_warn(&dev->pdev->dev,
248 goto cursor_done; 248 "Could not allocate space for cursors. Not doing hardware cursors.\n");
249 cursor_nospace: 249 } else {
250 mdev->cursor.pixels_1 = NULL; 250 mdev->cursor.pixels_current = mdev->cursor.pixels_1;
251 mdev->cursor.pixels_2 = NULL; 251 mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
252 dev_warn(&dev->pdev->dev, "Could not allocate space for cursors. Not doing hardware cursors.\n"); 252 }
253 cursor_done: 253
254 254 return 0;
255out: 255
256 if (r) 256err_modeset:
257 mgag200_driver_unload(dev); 257 drm_mode_config_cleanup(dev);
258 mgag200_mm_fini(mdev);
259err_mm:
260 dev->dev_private = NULL;
261
258 return r; 262 return r;
259} 263}
260 264
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 7c6225c84ba6..4649bd2ed340 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -618,7 +618,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
618 adjusted_mode->hdisplay, 618 adjusted_mode->hdisplay,
619 adjusted_mode->vdisplay); 619 adjusted_mode->vdisplay);
620 620
621 if (qcrtc->index == 0) 621 if (bo->is_primary == false)
622 recreate_primary = true; 622 recreate_primary = true;
623 623
624 if (bo->surf.stride * bo->surf.height > qdev->vram_size) { 624 if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
@@ -886,13 +886,15 @@ static enum drm_connector_status qxl_conn_detect(
886 drm_connector_to_qxl_output(connector); 886 drm_connector_to_qxl_output(connector);
887 struct drm_device *ddev = connector->dev; 887 struct drm_device *ddev = connector->dev;
888 struct qxl_device *qdev = ddev->dev_private; 888 struct qxl_device *qdev = ddev->dev_private;
889 int connected; 889 bool connected = false;
890 890
891 /* The first monitor is always connected */ 891 /* The first monitor is always connected */
892 connected = (output->index == 0) || 892 if (!qdev->client_monitors_config) {
893 (qdev->client_monitors_config && 893 if (output->index == 0)
894 qdev->client_monitors_config->count > output->index && 894 connected = true;
895 qxl_head_enabled(&qdev->client_monitors_config->heads[output->index])); 895 } else
896 connected = qdev->client_monitors_config->count > output->index &&
897 qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
896 898
897 DRM_DEBUG("#%d connected: %d\n", output->index, connected); 899 DRM_DEBUG("#%d connected: %d\n", output->index, connected);
898 if (!connected) 900 if (!connected)
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index c3872598b85a..65adb9c72377 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1624,8 +1624,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
1624 } else 1624 } else
1625 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1625 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1626 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 1626 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
1627 args.ucAction = ATOM_LCD_BLON; 1627 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1628 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1628
1629 atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
1629 } 1630 }
1630 break; 1631 break;
1631 case DRM_MODE_DPMS_STANDBY: 1632 case DRM_MODE_DPMS_STANDBY:
@@ -1706,8 +1707,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1706 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); 1707 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
1707 } 1708 }
1708 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 1709 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1709 atombios_dig_transmitter_setup(encoder, 1710 atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
1710 ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
1711 if (ext_encoder) 1711 if (ext_encoder)
1712 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); 1712 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1713 break; 1713 break;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index d8319dae8358..f3f562f6d848 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1573,10 +1573,12 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1573 1573
1574 drm_kms_helper_poll_disable(dev); 1574 drm_kms_helper_poll_disable(dev);
1575 1575
1576 drm_modeset_lock_all(dev);
1576 /* turn off display hw */ 1577 /* turn off display hw */
1577 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1578 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1578 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 1579 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1579 } 1580 }
1581 drm_modeset_unlock_all(dev);
1580 1582
1581 /* unpin the front buffers and cursors */ 1583 /* unpin the front buffers and cursors */
1582 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1584 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1734,9 +1736,11 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1734 if (fbcon) { 1736 if (fbcon) {
1735 drm_helper_resume_force_mode(dev); 1737 drm_helper_resume_force_mode(dev);
1736 /* turn on display hw */ 1738 /* turn on display hw */
1739 drm_modeset_lock_all(dev);
1737 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1740 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1738 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 1741 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1739 } 1742 }
1743 drm_modeset_unlock_all(dev);
1740 } 1744 }
1741 1745
1742 drm_kms_helper_poll_enable(dev); 1746 drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 5e09c061847f..6cddae44fa6e 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -265,7 +265,6 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
265{ 265{
266 struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr); 266 struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
267 struct drm_device *dev = master->base.dev; 267 struct drm_device *dev = master->base.dev;
268 struct radeon_device *rdev = dev->dev_private;
269 struct radeon_connector *radeon_connector; 268 struct radeon_connector *radeon_connector;
270 struct drm_connector *connector; 269 struct drm_connector *connector;
271 270
@@ -286,12 +285,19 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
286 drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); 285 drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
287 drm_mode_connector_set_path_property(connector, pathprop); 286 drm_mode_connector_set_path_property(connector, pathprop);
288 287
288 return connector;
289}
290
291static void radeon_dp_register_mst_connector(struct drm_connector *connector)
292{
293 struct drm_device *dev = connector->dev;
294 struct radeon_device *rdev = dev->dev_private;
295
289 drm_modeset_lock_all(dev); 296 drm_modeset_lock_all(dev);
290 radeon_fb_add_connector(rdev, connector); 297 radeon_fb_add_connector(rdev, connector);
291 drm_modeset_unlock_all(dev); 298 drm_modeset_unlock_all(dev);
292 299
293 drm_connector_register(connector); 300 drm_connector_register(connector);
294 return connector;
295} 301}
296 302
297static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, 303static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
@@ -324,6 +330,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
324 330
325struct drm_dp_mst_topology_cbs mst_cbs = { 331struct drm_dp_mst_topology_cbs mst_cbs = {
326 .add_connector = radeon_dp_add_mst_connector, 332 .add_connector = radeon_dp_add_mst_connector,
333 .register_connector = radeon_dp_register_mst_connector,
327 .destroy_connector = radeon_dp_destroy_mst_connector, 334 .destroy_connector = radeon_dp_destroy_mst_connector,
328 .hotplug = radeon_dp_mst_hotplug, 335 .hotplug = radeon_dp_mst_hotplug,
329}; 336};
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 7214858ffcea..1aa657fe31cb 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -48,40 +48,10 @@ struct radeon_fbdev {
48 struct radeon_device *rdev; 48 struct radeon_device *rdev;
49}; 49};
50 50
51/**
52 * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev.
53 *
54 * @info: fbdev info
55 *
56 * This function hides the cursor on all CRTCs used by fbdev.
57 */
58static int radeon_fb_helper_set_par(struct fb_info *info)
59{
60 int ret;
61
62 ret = drm_fb_helper_set_par(info);
63
64 /* XXX: with universal plane support fbdev will automatically disable
65 * all non-primary planes (including the cursor)
66 */
67 if (ret == 0) {
68 struct drm_fb_helper *fb_helper = info->par;
69 int i;
70
71 for (i = 0; i < fb_helper->crtc_count; i++) {
72 struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc;
73
74 radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
75 }
76 }
77
78 return ret;
79}
80
81static struct fb_ops radeonfb_ops = { 51static struct fb_ops radeonfb_ops = {
82 .owner = THIS_MODULE, 52 .owner = THIS_MODULE,
83 .fb_check_var = drm_fb_helper_check_var, 53 .fb_check_var = drm_fb_helper_check_var,
84 .fb_set_par = radeon_fb_helper_set_par, 54 .fb_set_par = drm_fb_helper_set_par,
85 .fb_fillrect = drm_fb_helper_cfb_fillrect, 55 .fb_fillrect = drm_fb_helper_cfb_fillrect,
86 .fb_copyarea = drm_fb_helper_cfb_copyarea, 56 .fb_copyarea = drm_fb_helper_cfb_copyarea,
87 .fb_imageblit = drm_fb_helper_cfb_imageblit, 57 .fb_imageblit = drm_fb_helper_cfb_imageblit,
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 787cd8fd897f..e9115d3f67b0 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2927,6 +2927,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, 2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, 2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
2929 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, 2929 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
2930 { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
2930 { 0, 0, 0, 0 }, 2931 { 0, 0, 0, 0 },
2931}; 2932};
2932 2933
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8d9b7de25613..745e996d2dbc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -882,6 +882,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
882 if (ret) 882 if (ret)
883 return ret; 883 return ret;
884 man = &bdev->man[mem_type]; 884 man = &bdev->man[mem_type];
885 if (!man->has_type || !man->use_type)
886 continue;
885 887
886 type_ok = ttm_bo_mt_compatible(man, mem_type, place, 888 type_ok = ttm_bo_mt_compatible(man, mem_type, place,
887 &cur_flags); 889 &cur_flags);
@@ -889,6 +891,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
889 if (!type_ok) 891 if (!type_ok)
890 continue; 892 continue;
891 893
894 type_found = true;
892 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 895 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
893 cur_flags); 896 cur_flags);
894 /* 897 /*
@@ -901,12 +904,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
901 if (mem_type == TTM_PL_SYSTEM) 904 if (mem_type == TTM_PL_SYSTEM)
902 break; 905 break;
903 906
904 if (man->has_type && man->use_type) { 907 ret = (*man->func->get_node)(man, bo, place, mem);
905 type_found = true; 908 if (unlikely(ret))
906 ret = (*man->func->get_node)(man, bo, place, mem); 909 return ret;
907 if (unlikely(ret)) 910
908 return ret;
909 }
910 if (mem->mm_node) 911 if (mem->mm_node)
911 break; 912 break;
912 } 913 }
@@ -917,9 +918,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
917 return 0; 918 return 0;
918 } 919 }
919 920
920 if (!type_found)
921 return -EINVAL;
922
923 for (i = 0; i < placement->num_busy_placement; ++i) { 921 for (i = 0; i < placement->num_busy_placement; ++i) {
924 const struct ttm_place *place = &placement->busy_placement[i]; 922 const struct ttm_place *place = &placement->busy_placement[i];
925 923
@@ -927,11 +925,12 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
927 if (ret) 925 if (ret)
928 return ret; 926 return ret;
929 man = &bdev->man[mem_type]; 927 man = &bdev->man[mem_type];
930 if (!man->has_type) 928 if (!man->has_type || !man->use_type)
931 continue; 929 continue;
932 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) 930 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
933 continue; 931 continue;
934 932
933 type_found = true;
935 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 934 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
936 cur_flags); 935 cur_flags);
937 /* 936 /*
@@ -957,8 +956,13 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
957 if (ret == -ERESTARTSYS) 956 if (ret == -ERESTARTSYS)
958 has_erestartsys = true; 957 has_erestartsys = true;
959 } 958 }
960 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; 959
961 return ret; 960 if (!type_found) {
961 printk(KERN_ERR TTM_PFX "No compatible memory type found.\n");
962 return -EINVAL;
963 }
964
965 return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
962} 966}
963EXPORT_SYMBOL(ttm_bo_mem_space); 967EXPORT_SYMBOL(ttm_bo_mem_space);
964 968
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 67720f70fe29..b49445df8a7e 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,6 +1,6 @@
1config DRM_VMWGFX 1config DRM_VMWGFX
2 tristate "DRM driver for VMware Virtual GPU" 2 tristate "DRM driver for VMware Virtual GPU"
3 depends on DRM && PCI 3 depends on DRM && PCI && X86
4 select FB_DEFERRED_IO 4 select FB_DEFERRED_IO
5 select FB_CFB_FILLRECT 5 select FB_CFB_FILLRECT
6 select FB_CFB_COPYAREA 6 select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 5ae8f921da2a..8a76821177a6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -681,6 +681,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
681 0, 0, 681 0, 0,
682 DRM_MM_SEARCH_DEFAULT, 682 DRM_MM_SEARCH_DEFAULT,
683 DRM_MM_CREATE_DEFAULT); 683 DRM_MM_CREATE_DEFAULT);
684 if (ret) {
685 (void) vmw_cmdbuf_man_process(man);
686 ret = drm_mm_insert_node_generic(&man->mm, info->node,
687 info->page_size, 0, 0,
688 DRM_MM_SEARCH_DEFAULT,
689 DRM_MM_CREATE_DEFAULT);
690 }
691
684 spin_unlock_bh(&man->lock); 692 spin_unlock_bh(&man->lock);
685 info->done = !ret; 693 info->done = !ret;
686 694
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index ce659a125f2b..092ea81eeff7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -311,7 +311,6 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
311 struct vmw_private *dev_priv = res->dev_priv; 311 struct vmw_private *dev_priv = res->dev_priv;
312 struct ttm_buffer_object *bo = val_buf->bo; 312 struct ttm_buffer_object *bo = val_buf->bo;
313 struct vmw_fence_obj *fence; 313 struct vmw_fence_obj *fence;
314 int ret;
315 314
316 if (list_empty(&res->mob_head)) 315 if (list_empty(&res->mob_head))
317 return 0; 316 return 0;
@@ -328,7 +327,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
328 if (likely(fence != NULL)) 327 if (likely(fence != NULL))
329 vmw_fence_obj_unreference(&fence); 328 vmw_fence_obj_unreference(&fence);
330 329
331 return ret; 330 return 0;
332} 331}
333 332
334/** 333/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e13b20bd9908..2c7a25c71af2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -752,12 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
752 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 752 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
753 dev_priv->active_master = &dev_priv->fbdev_master; 753 dev_priv->active_master = &dev_priv->fbdev_master;
754 754
755 755 dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start,
756 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, 756 dev_priv->mmio_size);
757 dev_priv->mmio_size);
758
759 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
760 dev_priv->mmio_size);
761 757
762 if (unlikely(dev_priv->mmio_virt == NULL)) { 758 if (unlikely(dev_priv->mmio_virt == NULL)) {
763 ret = -ENOMEM; 759 ret = -ENOMEM;
@@ -913,7 +909,6 @@ out_no_device:
913out_err4: 909out_err4:
914 iounmap(dev_priv->mmio_virt); 910 iounmap(dev_priv->mmio_virt);
915out_err3: 911out_err3:
916 arch_phys_wc_del(dev_priv->mmio_mtrr);
917 vmw_ttm_global_release(dev_priv); 912 vmw_ttm_global_release(dev_priv);
918out_err0: 913out_err0:
919 for (i = vmw_res_context; i < vmw_res_max; ++i) 914 for (i = vmw_res_context; i < vmw_res_max; ++i)
@@ -964,7 +959,6 @@ static int vmw_driver_unload(struct drm_device *dev)
964 959
965 ttm_object_device_release(&dev_priv->tdev); 960 ttm_object_device_release(&dev_priv->tdev);
966 iounmap(dev_priv->mmio_virt); 961 iounmap(dev_priv->mmio_virt);
967 arch_phys_wc_del(dev_priv->mmio_mtrr);
968 if (dev_priv->ctx.staged_bindings) 962 if (dev_priv->ctx.staged_bindings)
969 vmw_binding_state_free(dev_priv->ctx.staged_bindings); 963 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
970 vmw_ttm_global_release(dev_priv); 964 vmw_ttm_global_release(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 6d02de6dc36c..f19fd39b43e1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -376,7 +376,6 @@ struct vmw_private {
376 uint32_t initial_width; 376 uint32_t initial_width;
377 uint32_t initial_height; 377 uint32_t initial_height;
378 u32 __iomem *mmio_virt; 378 u32 __iomem *mmio_virt;
379 int mmio_mtrr;
380 uint32_t capabilities; 379 uint32_t capabilities;
381 uint32_t max_gmr_ids; 380 uint32_t max_gmr_ids;
382 uint32_t max_gmr_pages; 381 uint32_t max_gmr_pages;
@@ -631,7 +630,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
631 uint32_t size, 630 uint32_t size,
632 bool shareable, 631 bool shareable,
633 uint32_t *handle, 632 uint32_t *handle,
634 struct vmw_dma_buffer **p_dma_buf); 633 struct vmw_dma_buffer **p_dma_buf,
634 struct ttm_base_object **p_base);
635extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, 635extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
636 struct vmw_dma_buffer *dma_buf, 636 struct vmw_dma_buffer *dma_buf,
637 uint32_t *handle); 637 uint32_t *handle);
@@ -645,7 +645,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
645 uint32_t cur_validate_node); 645 uint32_t cur_validate_node);
646extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); 646extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
647extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, 647extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
648 uint32_t id, struct vmw_dma_buffer **out); 648 uint32_t id, struct vmw_dma_buffer **out,
649 struct ttm_base_object **base);
649extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 650extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
650 struct drm_file *file_priv); 651 struct drm_file *file_priv);
651extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, 652extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index b56565457c96..5da5de0cb522 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1236,7 +1236,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1236 struct vmw_relocation *reloc; 1236 struct vmw_relocation *reloc;
1237 int ret; 1237 int ret;
1238 1238
1239 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); 1239 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1240 NULL);
1240 if (unlikely(ret != 0)) { 1241 if (unlikely(ret != 0)) {
1241 DRM_ERROR("Could not find or use MOB buffer.\n"); 1242 DRM_ERROR("Could not find or use MOB buffer.\n");
1242 ret = -EINVAL; 1243 ret = -EINVAL;
@@ -1296,7 +1297,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1296 struct vmw_relocation *reloc; 1297 struct vmw_relocation *reloc;
1297 int ret; 1298 int ret;
1298 1299
1299 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); 1300 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1301 NULL);
1300 if (unlikely(ret != 0)) { 1302 if (unlikely(ret != 0)) {
1301 DRM_ERROR("Could not find or use GMR region.\n"); 1303 DRM_ERROR("Could not find or use GMR region.\n");
1302 ret = -EINVAL; 1304 ret = -EINVAL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 61fb7f3de311..15a6c01cd016 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1685,7 +1685,6 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
1685 struct drm_crtc *crtc; 1685 struct drm_crtc *crtc;
1686 u32 num_units = 0; 1686 u32 num_units = 0;
1687 u32 i, k; 1687 u32 i, k;
1688 int ret;
1689 1688
1690 dirty->dev_priv = dev_priv; 1689 dirty->dev_priv = dev_priv;
1691 1690
@@ -1711,7 +1710,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
1711 if (!dirty->cmd) { 1710 if (!dirty->cmd) {
1712 DRM_ERROR("Couldn't reserve fifo space " 1711 DRM_ERROR("Couldn't reserve fifo space "
1713 "for dirty blits.\n"); 1712 "for dirty blits.\n");
1714 return ret; 1713 return -ENOMEM;
1715 } 1714 }
1716 memset(dirty->cmd, 0, dirty->fifo_reserve_size); 1715 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
1717 } 1716 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 76069f093ccf..222c9c2123a1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
484 goto out_unlock; 484 goto out_unlock;
485 } 485 }
486 486
487 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf); 487 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
488 if (ret) 488 if (ret)
489 goto out_unlock; 489 goto out_unlock;
490 490
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c1912f852b42..e57667ca7557 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -354,7 +354,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
354 } 354 }
355 355
356 *out_surf = NULL; 356 *out_surf = NULL;
357 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); 357 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
358 return ret; 358 return ret;
359} 359}
360 360
@@ -481,7 +481,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
481 uint32_t size, 481 uint32_t size,
482 bool shareable, 482 bool shareable,
483 uint32_t *handle, 483 uint32_t *handle,
484 struct vmw_dma_buffer **p_dma_buf) 484 struct vmw_dma_buffer **p_dma_buf,
485 struct ttm_base_object **p_base)
485{ 486{
486 struct vmw_user_dma_buffer *user_bo; 487 struct vmw_user_dma_buffer *user_bo;
487 struct ttm_buffer_object *tmp; 488 struct ttm_buffer_object *tmp;
@@ -515,6 +516,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
515 } 516 }
516 517
517 *p_dma_buf = &user_bo->dma; 518 *p_dma_buf = &user_bo->dma;
519 if (p_base) {
520 *p_base = &user_bo->prime.base;
521 kref_get(&(*p_base)->refcount);
522 }
518 *handle = user_bo->prime.base.hash.key; 523 *handle = user_bo->prime.base.hash.key;
519 524
520out_no_base_object: 525out_no_base_object:
@@ -631,6 +636,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
631 struct vmw_dma_buffer *dma_buf; 636 struct vmw_dma_buffer *dma_buf;
632 struct vmw_user_dma_buffer *user_bo; 637 struct vmw_user_dma_buffer *user_bo;
633 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 638 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
639 struct ttm_base_object *buffer_base;
634 int ret; 640 int ret;
635 641
636 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 642 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
@@ -643,7 +649,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
643 649
644 switch (arg->op) { 650 switch (arg->op) {
645 case drm_vmw_synccpu_grab: 651 case drm_vmw_synccpu_grab:
646 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf); 652 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
653 &buffer_base);
647 if (unlikely(ret != 0)) 654 if (unlikely(ret != 0))
648 return ret; 655 return ret;
649 656
@@ -651,6 +658,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
651 dma); 658 dma);
652 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); 659 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
653 vmw_dmabuf_unreference(&dma_buf); 660 vmw_dmabuf_unreference(&dma_buf);
661 ttm_base_object_unref(&buffer_base);
654 if (unlikely(ret != 0 && ret != -ERESTARTSYS && 662 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
655 ret != -EBUSY)) { 663 ret != -EBUSY)) {
656 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", 664 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
@@ -692,7 +700,8 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
692 return ret; 700 return ret;
693 701
694 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, 702 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
695 req->size, false, &handle, &dma_buf); 703 req->size, false, &handle, &dma_buf,
704 NULL);
696 if (unlikely(ret != 0)) 705 if (unlikely(ret != 0))
697 goto out_no_dmabuf; 706 goto out_no_dmabuf;
698 707
@@ -721,7 +730,8 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
721} 730}
722 731
723int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, 732int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
724 uint32_t handle, struct vmw_dma_buffer **out) 733 uint32_t handle, struct vmw_dma_buffer **out,
734 struct ttm_base_object **p_base)
725{ 735{
726 struct vmw_user_dma_buffer *vmw_user_bo; 736 struct vmw_user_dma_buffer *vmw_user_bo;
727 struct ttm_base_object *base; 737 struct ttm_base_object *base;
@@ -743,7 +753,10 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
743 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, 753 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
744 prime.base); 754 prime.base);
745 (void)ttm_bo_reference(&vmw_user_bo->dma.base); 755 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
746 ttm_base_object_unref(&base); 756 if (p_base)
757 *p_base = base;
758 else
759 ttm_base_object_unref(&base);
747 *out = &vmw_user_bo->dma; 760 *out = &vmw_user_bo->dma;
748 761
749 return 0; 762 return 0;
@@ -1004,7 +1017,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
1004 1017
1005 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, 1018 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1006 args->size, false, &args->handle, 1019 args->size, false, &args->handle,
1007 &dma_buf); 1020 &dma_buf, NULL);
1008 if (unlikely(ret != 0)) 1021 if (unlikely(ret != 0))
1009 goto out_no_dmabuf; 1022 goto out_no_dmabuf;
1010 1023
@@ -1032,7 +1045,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
1032 struct vmw_dma_buffer *out_buf; 1045 struct vmw_dma_buffer *out_buf;
1033 int ret; 1046 int ret;
1034 1047
1035 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf); 1048 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
1036 if (ret != 0) 1049 if (ret != 0)
1037 return -EINVAL; 1050 return -EINVAL;
1038 1051
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index bba1ee395478..fd47547b0234 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -855,7 +855,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
855 855
856 if (buffer_handle != SVGA3D_INVALID_ID) { 856 if (buffer_handle != SVGA3D_INVALID_ID) {
857 ret = vmw_user_dmabuf_lookup(tfile, buffer_handle, 857 ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
858 &buffer); 858 &buffer, NULL);
859 if (unlikely(ret != 0)) { 859 if (unlikely(ret != 0)) {
860 DRM_ERROR("Could not find buffer for shader " 860 DRM_ERROR("Could not find buffer for shader "
861 "creation.\n"); 861 "creation.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 3361769842f4..64b50409fa07 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -46,6 +46,7 @@ struct vmw_user_surface {
46 struct vmw_surface srf; 46 struct vmw_surface srf;
47 uint32_t size; 47 uint32_t size;
48 struct drm_master *master; 48 struct drm_master *master;
49 struct ttm_base_object *backup_base;
49}; 50};
50 51
51/** 52/**
@@ -656,6 +657,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
656 struct vmw_resource *res = &user_srf->srf.res; 657 struct vmw_resource *res = &user_srf->srf.res;
657 658
658 *p_base = NULL; 659 *p_base = NULL;
660 ttm_base_object_unref(&user_srf->backup_base);
659 vmw_resource_unreference(&res); 661 vmw_resource_unreference(&res);
660} 662}
661 663
@@ -851,7 +853,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
851 res->backup_size, 853 res->backup_size,
852 true, 854 true,
853 &backup_handle, 855 &backup_handle,
854 &res->backup); 856 &res->backup,
857 &user_srf->backup_base);
855 if (unlikely(ret != 0)) { 858 if (unlikely(ret != 0)) {
856 vmw_resource_unreference(&res); 859 vmw_resource_unreference(&res);
857 goto out_unlock; 860 goto out_unlock;
@@ -1321,7 +1324,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1321 1324
1322 if (req->buffer_handle != SVGA3D_INVALID_ID) { 1325 if (req->buffer_handle != SVGA3D_INVALID_ID) {
1323 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, 1326 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
1324 &res->backup); 1327 &res->backup,
1328 &user_srf->backup_base);
1325 if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < 1329 if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
1326 res->backup_size) { 1330 res->backup_size) {
1327 DRM_ERROR("Surface backup buffer is too small.\n"); 1331 DRM_ERROR("Surface backup buffer is too small.\n");
@@ -1335,7 +1339,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1335 req->drm_surface_flags & 1339 req->drm_surface_flags &
1336 drm_vmw_surface_flag_shareable, 1340 drm_vmw_surface_flag_shareable,
1337 &backup_handle, 1341 &backup_handle,
1338 &res->backup); 1342 &res->backup,
1343 &user_srf->backup_base);
1339 1344
1340 if (unlikely(ret != 0)) { 1345 if (unlikely(ret != 0)) {
1341 vmw_resource_unreference(&res); 1346 vmw_resource_unreference(&res);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 2f9aead4ecfc..652afd11a9ef 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -204,6 +204,8 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
204 spin_lock_irqsave(&vmbus_connection.channel_lock, flags); 204 spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
205 list_del(&channel->listentry); 205 list_del(&channel->listentry);
206 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); 206 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
207
208 primary_channel = channel;
207 } else { 209 } else {
208 primary_channel = channel->primary_channel; 210 primary_channel = channel->primary_channel;
209 spin_lock_irqsave(&primary_channel->lock, flags); 211 spin_lock_irqsave(&primary_channel->lock, flags);
@@ -211,6 +213,14 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
211 primary_channel->num_sc--; 213 primary_channel->num_sc--;
212 spin_unlock_irqrestore(&primary_channel->lock, flags); 214 spin_unlock_irqrestore(&primary_channel->lock, flags);
213 } 215 }
216
217 /*
218 * We need to free the bit for init_vp_index() to work in the case
219 * of sub-channel, when we reload drivers like hv_netvsc.
220 */
221 cpumask_clear_cpu(channel->target_cpu,
222 &primary_channel->alloced_cpus_in_node);
223
214 free_channel(channel); 224 free_channel(channel);
215} 225}
216 226
@@ -458,6 +468,13 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
458 continue; 468 continue;
459 } 469 }
460 470
471 /*
472 * NOTE: in the case of sub-channel, we clear the sub-channel
473 * related bit(s) in primary->alloced_cpus_in_node in
474 * hv_process_channel_removal(), so when we reload drivers
475 * like hv_netvsc in SMP guest, here we're able to re-allocate
476 * bit from primary->alloced_cpus_in_node.
477 */
461 if (!cpumask_test_cpu(cur_cpu, 478 if (!cpumask_test_cpu(cur_cpu,
462 &primary->alloced_cpus_in_node)) { 479 &primary->alloced_cpus_in_node)) {
463 cpumask_set_cpu(cur_cpu, 480 cpumask_set_cpu(cur_cpu,
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
index 6cb89c0ebab6..1fd46859ed29 100644
--- a/drivers/hwmon/abx500.c
+++ b/drivers/hwmon/abx500.c
@@ -470,6 +470,7 @@ static const struct of_device_id abx500_temp_match[] = {
470 { .compatible = "stericsson,abx500-temp" }, 470 { .compatible = "stericsson,abx500-temp" },
471 {}, 471 {},
472}; 472};
473MODULE_DEVICE_TABLE(of, abx500_temp_match);
473#endif 474#endif
474 475
475static struct platform_driver abx500_temp_driver = { 476static struct platform_driver abx500_temp_driver = {
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index a3dae6d0082a..82de3deeb18a 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -539,6 +539,7 @@ static const struct of_device_id of_gpio_fan_match[] = {
539 { .compatible = "gpio-fan", }, 539 { .compatible = "gpio-fan", },
540 {}, 540 {},
541}; 541};
542MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
542#endif /* CONFIG_OF_GPIO */ 543#endif /* CONFIG_OF_GPIO */
543 544
544static int gpio_fan_probe(struct platform_device *pdev) 545static int gpio_fan_probe(struct platform_device *pdev)
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 2d9a712699ff..3e23003f78b0 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -323,6 +323,7 @@ static const struct of_device_id of_pwm_fan_match[] = {
323 { .compatible = "pwm-fan", }, 323 { .compatible = "pwm-fan", },
324 {}, 324 {},
325}; 325};
326MODULE_DEVICE_TABLE(of, of_pwm_fan_match);
326 327
327static struct platform_driver pwm_fan_driver = { 328static struct platform_driver pwm_fan_driver = {
328 .probe = pwm_fan_probe, 329 .probe = pwm_fan_probe,
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 3a3738fe016b..cd4510a63375 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -620,7 +620,7 @@ static struct cpuidle_state skl_cstates[] = {
620 .name = "C6-SKL", 620 .name = "C6-SKL",
621 .desc = "MWAIT 0x20", 621 .desc = "MWAIT 0x20",
622 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 622 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
623 .exit_latency = 75, 623 .exit_latency = 85,
624 .target_residency = 200, 624 .target_residency = 200,
625 .enter = &intel_idle, 625 .enter = &intel_idle,
626 .enter_freeze = intel_idle_freeze, }, 626 .enter_freeze = intel_idle_freeze, },
@@ -636,11 +636,19 @@ static struct cpuidle_state skl_cstates[] = {
636 .name = "C8-SKL", 636 .name = "C8-SKL",
637 .desc = "MWAIT 0x40", 637 .desc = "MWAIT 0x40",
638 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, 638 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
639 .exit_latency = 174, 639 .exit_latency = 200,
640 .target_residency = 800, 640 .target_residency = 800,
641 .enter = &intel_idle, 641 .enter = &intel_idle,
642 .enter_freeze = intel_idle_freeze, }, 642 .enter_freeze = intel_idle_freeze, },
643 { 643 {
644 .name = "C9-SKL",
645 .desc = "MWAIT 0x50",
646 .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
647 .exit_latency = 480,
648 .target_residency = 5000,
649 .enter = &intel_idle,
650 .enter_freeze = intel_idle_freeze, },
651 {
644 .name = "C10-SKL", 652 .name = "C10-SKL",
645 .desc = "MWAIT 0x60", 653 .desc = "MWAIT 0x60",
646 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 654 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 41d6911e244e..f1ccd40beae9 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -245,7 +245,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
245 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; 245 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
246 if (MLX5_CAP_GEN(mdev, apm)) 246 if (MLX5_CAP_GEN(mdev, apm))
247 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 247 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
248 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
249 if (MLX5_CAP_GEN(mdev, xrc)) 248 if (MLX5_CAP_GEN(mdev, xrc))
250 props->device_cap_flags |= IB_DEVICE_XRC; 249 props->device_cap_flags |= IB_DEVICE_XRC;
251 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 250 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
@@ -795,53 +794,6 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
795 return 0; 794 return 0;
796} 795}
797 796
798static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn)
799{
800 struct mlx5_create_mkey_mbox_in *in;
801 struct mlx5_mkey_seg *seg;
802 struct mlx5_core_mr mr;
803 int err;
804
805 in = kzalloc(sizeof(*in), GFP_KERNEL);
806 if (!in)
807 return -ENOMEM;
808
809 seg = &in->seg;
810 seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA;
811 seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
812 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
813 seg->start_addr = 0;
814
815 err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in),
816 NULL, NULL, NULL);
817 if (err) {
818 mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
819 goto err_in;
820 }
821
822 kfree(in);
823 *key = mr.key;
824
825 return 0;
826
827err_in:
828 kfree(in);
829
830 return err;
831}
832
833static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key)
834{
835 struct mlx5_core_mr mr;
836 int err;
837
838 memset(&mr, 0, sizeof(mr));
839 mr.key = key;
840 err = mlx5_core_destroy_mkey(dev->mdev, &mr);
841 if (err)
842 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key);
843}
844
845static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, 797static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
846 struct ib_ucontext *context, 798 struct ib_ucontext *context,
847 struct ib_udata *udata) 799 struct ib_udata *udata)
@@ -867,13 +819,6 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
867 kfree(pd); 819 kfree(pd);
868 return ERR_PTR(-EFAULT); 820 return ERR_PTR(-EFAULT);
869 } 821 }
870 } else {
871 err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn);
872 if (err) {
873 mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
874 kfree(pd);
875 return ERR_PTR(err);
876 }
877 } 822 }
878 823
879 return &pd->ibpd; 824 return &pd->ibpd;
@@ -884,9 +829,6 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
884 struct mlx5_ib_dev *mdev = to_mdev(pd->device); 829 struct mlx5_ib_dev *mdev = to_mdev(pd->device);
885 struct mlx5_ib_pd *mpd = to_mpd(pd); 830 struct mlx5_ib_pd *mpd = to_mpd(pd);
886 831
887 if (!pd->uobject)
888 free_pa_mkey(mdev, mpd->pa_lkey);
889
890 mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); 832 mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
891 kfree(mpd); 833 kfree(mpd);
892 834
@@ -1245,18 +1187,10 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
1245 struct ib_srq_init_attr attr; 1187 struct ib_srq_init_attr attr;
1246 struct mlx5_ib_dev *dev; 1188 struct mlx5_ib_dev *dev;
1247 struct ib_cq_init_attr cq_attr = {.cqe = 1}; 1189 struct ib_cq_init_attr cq_attr = {.cqe = 1};
1248 u32 rsvd_lkey;
1249 int ret = 0; 1190 int ret = 0;
1250 1191
1251 dev = container_of(devr, struct mlx5_ib_dev, devr); 1192 dev = container_of(devr, struct mlx5_ib_dev, devr);
1252 1193
1253 ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey);
1254 if (ret) {
1255 pr_err("Failed to query special context %d\n", ret);
1256 return ret;
1257 }
1258 dev->ib_dev.local_dma_lkey = rsvd_lkey;
1259
1260 devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); 1194 devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
1261 if (IS_ERR(devr->p0)) { 1195 if (IS_ERR(devr->p0)) {
1262 ret = PTR_ERR(devr->p0); 1196 ret = PTR_ERR(devr->p0);
@@ -1418,6 +1352,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1418 strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); 1352 strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
1419 dev->ib_dev.owner = THIS_MODULE; 1353 dev->ib_dev.owner = THIS_MODULE;
1420 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 1354 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1355 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
1421 dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); 1356 dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
1422 dev->ib_dev.phys_port_cnt = dev->num_ports; 1357 dev->ib_dev.phys_port_cnt = dev->num_ports;
1423 dev->ib_dev.num_comp_vectors = 1358 dev->ib_dev.num_comp_vectors =
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index bb8cda79e881..22123b79d550 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -103,7 +103,6 @@ static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibuconte
103struct mlx5_ib_pd { 103struct mlx5_ib_pd {
104 struct ib_pd ibpd; 104 struct ib_pd ibpd;
105 u32 pdn; 105 u32 pdn;
106 u32 pa_lkey;
107}; 106};
108 107
109/* Use macros here so that don't have to duplicate 108/* Use macros here so that don't have to duplicate
@@ -213,7 +212,6 @@ struct mlx5_ib_qp {
213 int uuarn; 212 int uuarn;
214 213
215 int create_type; 214 int create_type;
216 u32 pa_lkey;
217 215
218 /* Store signature errors */ 216 /* Store signature errors */
219 bool signature_en; 217 bool signature_en;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index c745c6c5e10d..6f521a3418e8 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -925,8 +925,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
925 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); 925 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
926 if (err) 926 if (err)
927 mlx5_ib_dbg(dev, "err %d\n", err); 927 mlx5_ib_dbg(dev, "err %d\n", err);
928 else
929 qp->pa_lkey = to_mpd(pd)->pa_lkey;
930 } 928 }
931 929
932 if (err) 930 if (err)
@@ -2045,7 +2043,7 @@ static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
2045 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); 2043 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
2046 dseg->addr = cpu_to_be64(mfrpl->map); 2044 dseg->addr = cpu_to_be64(mfrpl->map);
2047 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); 2045 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
2048 dseg->lkey = cpu_to_be32(pd->pa_lkey); 2046 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
2049} 2047}
2050 2048
2051static __be32 send_ieth(struct ib_send_wr *wr) 2049static __be32 send_ieth(struct ib_send_wr *wr)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index ca2873698d75..4cd5428a2399 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -80,7 +80,7 @@ enum {
80 IPOIB_NUM_WC = 4, 80 IPOIB_NUM_WC = 4,
81 81
82 IPOIB_MAX_PATH_REC_QUEUE = 3, 82 IPOIB_MAX_PATH_REC_QUEUE = 3,
83 IPOIB_MAX_MCAST_QUEUE = 3, 83 IPOIB_MAX_MCAST_QUEUE = 64,
84 84
85 IPOIB_FLAG_OPER_UP = 0, 85 IPOIB_FLAG_OPER_UP = 0,
86 IPOIB_FLAG_INITIALIZED = 1, 86 IPOIB_FLAG_INITIALIZED = 1,
@@ -548,6 +548,8 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter,
548 548
549int ipoib_mcast_attach(struct net_device *dev, u16 mlid, 549int ipoib_mcast_attach(struct net_device *dev, u16 mlid,
550 union ib_gid *mgid, int set_qkey); 550 union ib_gid *mgid, int set_qkey);
551int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast);
552struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid);
551 553
552int ipoib_init_qp(struct net_device *dev); 554int ipoib_init_qp(struct net_device *dev);
553int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca); 555int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 36536ce5a3e2..f74316e679d2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1149,6 +1149,9 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
1149 unsigned long dt; 1149 unsigned long dt;
1150 unsigned long flags; 1150 unsigned long flags;
1151 int i; 1151 int i;
1152 LIST_HEAD(remove_list);
1153 struct ipoib_mcast *mcast, *tmcast;
1154 struct net_device *dev = priv->dev;
1152 1155
1153 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 1156 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
1154 return; 1157 return;
@@ -1176,6 +1179,19 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
1176 lockdep_is_held(&priv->lock))) != NULL) { 1179 lockdep_is_held(&priv->lock))) != NULL) {
1177 /* was the neigh idle for two GC periods */ 1180 /* was the neigh idle for two GC periods */
1178 if (time_after(neigh_obsolete, neigh->alive)) { 1181 if (time_after(neigh_obsolete, neigh->alive)) {
1182 u8 *mgid = neigh->daddr + 4;
1183
1184 /* Is this multicast ? */
1185 if (*mgid == 0xff) {
1186 mcast = __ipoib_mcast_find(dev, mgid);
1187
1188 if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
1189 list_del(&mcast->list);
1190 rb_erase(&mcast->rb_node, &priv->multicast_tree);
1191 list_add_tail(&mcast->list, &remove_list);
1192 }
1193 }
1194
1179 rcu_assign_pointer(*np, 1195 rcu_assign_pointer(*np,
1180 rcu_dereference_protected(neigh->hnext, 1196 rcu_dereference_protected(neigh->hnext,
1181 lockdep_is_held(&priv->lock))); 1197 lockdep_is_held(&priv->lock)));
@@ -1191,6 +1207,8 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
1191 1207
1192out_unlock: 1208out_unlock:
1193 spin_unlock_irqrestore(&priv->lock, flags); 1209 spin_unlock_irqrestore(&priv->lock, flags);
1210 list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
1211 ipoib_mcast_leave(dev, mcast);
1194} 1212}
1195 1213
1196static void ipoib_reap_neigh(struct work_struct *work) 1214static void ipoib_reap_neigh(struct work_struct *work)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 09a1748f9d13..136cbefe00f8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -153,7 +153,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
153 return mcast; 153 return mcast;
154} 154}
155 155
156static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) 156struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
157{ 157{
158 struct ipoib_dev_priv *priv = netdev_priv(dev); 158 struct ipoib_dev_priv *priv = netdev_priv(dev);
159 struct rb_node *n = priv->multicast_tree.rb_node; 159 struct rb_node *n = priv->multicast_tree.rb_node;
@@ -508,17 +508,19 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
508 rec.hop_limit = priv->broadcast->mcmember.hop_limit; 508 rec.hop_limit = priv->broadcast->mcmember.hop_limit;
509 509
510 /* 510 /*
511 * Historically Linux IPoIB has never properly supported SEND 511 * Send-only IB Multicast joins do not work at the core
512 * ONLY join. It emulated it by not providing all the required 512 * IB layer yet, so we can't use them here. However,
513 * attributes, which is enough to prevent group creation and 513 * we are emulating an Ethernet multicast send, which
514 * detect if there are full members or not. A major problem 514 * does not require a multicast subscription and will
515 * with supporting SEND ONLY is detecting when the group is 515 * still send properly. The most appropriate thing to
516 * auto-destroyed as IPoIB will cache the MLID.. 516 * do is to create the group if it doesn't exist as that
517 * most closely emulates the behavior, from a user space
518 * application perspecitive, of Ethernet multicast
519 * operation. For now, we do a full join, maybe later
520 * when the core IB layers support send only joins we
521 * will use them.
517 */ 522 */
518#if 1 523#if 0
519 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
520 comp_mask &= ~IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
521#else
522 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) 524 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
523 rec.join_state = 4; 525 rec.join_state = 4;
524#endif 526#endif
@@ -675,7 +677,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
675 return 0; 677 return 0;
676} 678}
677 679
678static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) 680int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
679{ 681{
680 struct ipoib_dev_priv *priv = netdev_priv(dev); 682 struct ipoib_dev_priv *priv = netdev_priv(dev);
681 int ret = 0; 683 int ret = 0;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 1ace5d83a4d7..f58ff96b6cbb 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -97,6 +97,11 @@ unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS;
97module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR); 97module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR);
98MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024"); 98MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024");
99 99
100bool iser_always_reg = true;
101module_param_named(always_register, iser_always_reg, bool, S_IRUGO);
102MODULE_PARM_DESC(always_register,
103 "Always register memory, even for continuous memory regions (default:true)");
104
100bool iser_pi_enable = false; 105bool iser_pi_enable = false;
101module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); 106module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO);
102MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); 107MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 86f6583485ef..a5edd6ede692 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -611,6 +611,7 @@ extern int iser_debug_level;
611extern bool iser_pi_enable; 611extern bool iser_pi_enable;
612extern int iser_pi_guard; 612extern int iser_pi_guard;
613extern unsigned int iser_max_sectors; 613extern unsigned int iser_max_sectors;
614extern bool iser_always_reg;
614 615
615int iser_assign_reg_ops(struct iser_device *device); 616int iser_assign_reg_ops(struct iser_device *device);
616 617
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 2493cc748db8..4c46d67d37a1 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -803,11 +803,12 @@ static int
803iser_reg_prot_sg(struct iscsi_iser_task *task, 803iser_reg_prot_sg(struct iscsi_iser_task *task,
804 struct iser_data_buf *mem, 804 struct iser_data_buf *mem,
805 struct iser_fr_desc *desc, 805 struct iser_fr_desc *desc,
806 bool use_dma_key,
806 struct iser_mem_reg *reg) 807 struct iser_mem_reg *reg)
807{ 808{
808 struct iser_device *device = task->iser_conn->ib_conn.device; 809 struct iser_device *device = task->iser_conn->ib_conn.device;
809 810
810 if (mem->dma_nents == 1) 811 if (use_dma_key)
811 return iser_reg_dma(device, mem, reg); 812 return iser_reg_dma(device, mem, reg);
812 813
813 return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); 814 return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg);
@@ -817,11 +818,12 @@ static int
817iser_reg_data_sg(struct iscsi_iser_task *task, 818iser_reg_data_sg(struct iscsi_iser_task *task,
818 struct iser_data_buf *mem, 819 struct iser_data_buf *mem,
819 struct iser_fr_desc *desc, 820 struct iser_fr_desc *desc,
821 bool use_dma_key,
820 struct iser_mem_reg *reg) 822 struct iser_mem_reg *reg)
821{ 823{
822 struct iser_device *device = task->iser_conn->ib_conn.device; 824 struct iser_device *device = task->iser_conn->ib_conn.device;
823 825
824 if (mem->dma_nents == 1) 826 if (use_dma_key)
825 return iser_reg_dma(device, mem, reg); 827 return iser_reg_dma(device, mem, reg);
826 828
827 return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); 829 return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
@@ -836,14 +838,17 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
836 struct iser_mem_reg *reg = &task->rdma_reg[dir]; 838 struct iser_mem_reg *reg = &task->rdma_reg[dir];
837 struct iser_mem_reg *data_reg; 839 struct iser_mem_reg *data_reg;
838 struct iser_fr_desc *desc = NULL; 840 struct iser_fr_desc *desc = NULL;
841 bool use_dma_key;
839 int err; 842 int err;
840 843
841 err = iser_handle_unaligned_buf(task, mem, dir); 844 err = iser_handle_unaligned_buf(task, mem, dir);
842 if (unlikely(err)) 845 if (unlikely(err))
843 return err; 846 return err;
844 847
845 if (mem->dma_nents != 1 || 848 use_dma_key = (mem->dma_nents == 1 && !iser_always_reg &&
846 scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { 849 scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL);
850
851 if (!use_dma_key) {
847 desc = device->reg_ops->reg_desc_get(ib_conn); 852 desc = device->reg_ops->reg_desc_get(ib_conn);
848 reg->mem_h = desc; 853 reg->mem_h = desc;
849 } 854 }
@@ -853,7 +858,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
853 else 858 else
854 data_reg = &task->desc.data_reg; 859 data_reg = &task->desc.data_reg;
855 860
856 err = iser_reg_data_sg(task, mem, desc, data_reg); 861 err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg);
857 if (unlikely(err)) 862 if (unlikely(err))
858 goto err_reg; 863 goto err_reg;
859 864
@@ -866,7 +871,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
866 if (unlikely(err)) 871 if (unlikely(err))
867 goto err_reg; 872 goto err_reg;
868 873
869 err = iser_reg_prot_sg(task, mem, desc, prot_reg); 874 err = iser_reg_prot_sg(task, mem, desc,
875 use_dma_key, prot_reg);
870 if (unlikely(err)) 876 if (unlikely(err))
871 goto err_reg; 877 goto err_reg;
872 } 878 }
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index ae70cc1463ac..85132d867bc8 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -133,11 +133,15 @@ static int iser_create_device_ib_res(struct iser_device *device)
133 (unsigned long)comp); 133 (unsigned long)comp);
134 } 134 }
135 135
136 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | 136 if (!iser_always_reg) {
137 IB_ACCESS_REMOTE_WRITE | 137 int access = IB_ACCESS_LOCAL_WRITE |
138 IB_ACCESS_REMOTE_READ); 138 IB_ACCESS_REMOTE_WRITE |
139 if (IS_ERR(device->mr)) 139 IB_ACCESS_REMOTE_READ;
140 goto dma_mr_err; 140
141 device->mr = ib_get_dma_mr(device->pd, access);
142 if (IS_ERR(device->mr))
143 goto dma_mr_err;
144 }
141 145
142 INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, 146 INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
143 iser_event_handler); 147 iser_event_handler);
@@ -147,7 +151,8 @@ static int iser_create_device_ib_res(struct iser_device *device)
147 return 0; 151 return 0;
148 152
149handler_err: 153handler_err:
150 ib_dereg_mr(device->mr); 154 if (device->mr)
155 ib_dereg_mr(device->mr);
151dma_mr_err: 156dma_mr_err:
152 for (i = 0; i < device->comps_used; i++) 157 for (i = 0; i < device->comps_used; i++)
153 tasklet_kill(&device->comps[i].tasklet); 158 tasklet_kill(&device->comps[i].tasklet);
@@ -173,7 +178,6 @@ comps_err:
173static void iser_free_device_ib_res(struct iser_device *device) 178static void iser_free_device_ib_res(struct iser_device *device)
174{ 179{
175 int i; 180 int i;
176 BUG_ON(device->mr == NULL);
177 181
178 for (i = 0; i < device->comps_used; i++) { 182 for (i = 0; i < device->comps_used; i++) {
179 struct iser_comp *comp = &device->comps[i]; 183 struct iser_comp *comp = &device->comps[i];
@@ -184,7 +188,8 @@ static void iser_free_device_ib_res(struct iser_device *device)
184 } 188 }
185 189
186 (void)ib_unregister_event_handler(&device->event_handler); 190 (void)ib_unregister_event_handler(&device->event_handler);
187 (void)ib_dereg_mr(device->mr); 191 if (device->mr)
192 (void)ib_dereg_mr(device->mr);
188 ib_dealloc_pd(device->pd); 193 ib_dealloc_pd(device->pd);
189 194
190 kfree(device->comps); 195 kfree(device->comps);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 403bd29443b8..aa59037d7504 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -238,8 +238,6 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
238 rx_sg->lkey = device->pd->local_dma_lkey; 238 rx_sg->lkey = device->pd->local_dma_lkey;
239 } 239 }
240 240
241 isert_conn->rx_desc_head = 0;
242
243 return 0; 241 return 0;
244 242
245dma_map_fail: 243dma_map_fail:
@@ -634,7 +632,7 @@ static void
634isert_init_conn(struct isert_conn *isert_conn) 632isert_init_conn(struct isert_conn *isert_conn)
635{ 633{
636 isert_conn->state = ISER_CONN_INIT; 634 isert_conn->state = ISER_CONN_INIT;
637 INIT_LIST_HEAD(&isert_conn->accept_node); 635 INIT_LIST_HEAD(&isert_conn->node);
638 init_completion(&isert_conn->login_comp); 636 init_completion(&isert_conn->login_comp);
639 init_completion(&isert_conn->login_req_comp); 637 init_completion(&isert_conn->login_req_comp);
640 init_completion(&isert_conn->wait); 638 init_completion(&isert_conn->wait);
@@ -762,28 +760,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
762 ret = isert_rdma_post_recvl(isert_conn); 760 ret = isert_rdma_post_recvl(isert_conn);
763 if (ret) 761 if (ret)
764 goto out_conn_dev; 762 goto out_conn_dev;
765 /*
766 * Obtain the second reference now before isert_rdma_accept() to
767 * ensure that any initiator generated REJECT CM event that occurs
768 * asynchronously won't drop the last reference until the error path
769 * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() ->
770 * isert_free_conn() -> isert_put_conn() -> kref_put().
771 */
772 if (!kref_get_unless_zero(&isert_conn->kref)) {
773 isert_warn("conn %p connect_release is running\n", isert_conn);
774 goto out_conn_dev;
775 }
776 763
777 ret = isert_rdma_accept(isert_conn); 764 ret = isert_rdma_accept(isert_conn);
778 if (ret) 765 if (ret)
779 goto out_conn_dev; 766 goto out_conn_dev;
780 767
781 mutex_lock(&isert_np->np_accept_mutex); 768 mutex_lock(&isert_np->mutex);
782 list_add_tail(&isert_conn->accept_node, &isert_np->np_accept_list); 769 list_add_tail(&isert_conn->node, &isert_np->accepted);
783 mutex_unlock(&isert_np->np_accept_mutex); 770 mutex_unlock(&isert_np->mutex);
784 771
785 isert_info("np %p: Allow accept_np to continue\n", np);
786 up(&isert_np->np_sem);
787 return 0; 772 return 0;
788 773
789out_conn_dev: 774out_conn_dev:
@@ -831,13 +816,21 @@ static void
831isert_connected_handler(struct rdma_cm_id *cma_id) 816isert_connected_handler(struct rdma_cm_id *cma_id)
832{ 817{
833 struct isert_conn *isert_conn = cma_id->qp->qp_context; 818 struct isert_conn *isert_conn = cma_id->qp->qp_context;
819 struct isert_np *isert_np = cma_id->context;
834 820
835 isert_info("conn %p\n", isert_conn); 821 isert_info("conn %p\n", isert_conn);
836 822
837 mutex_lock(&isert_conn->mutex); 823 mutex_lock(&isert_conn->mutex);
838 if (isert_conn->state != ISER_CONN_FULL_FEATURE) 824 isert_conn->state = ISER_CONN_UP;
839 isert_conn->state = ISER_CONN_UP; 825 kref_get(&isert_conn->kref);
840 mutex_unlock(&isert_conn->mutex); 826 mutex_unlock(&isert_conn->mutex);
827
828 mutex_lock(&isert_np->mutex);
829 list_move_tail(&isert_conn->node, &isert_np->pending);
830 mutex_unlock(&isert_np->mutex);
831
832 isert_info("np %p: Allow accept_np to continue\n", isert_np);
833 up(&isert_np->sem);
841} 834}
842 835
843static void 836static void
@@ -903,14 +896,14 @@ isert_np_cma_handler(struct isert_np *isert_np,
903 896
904 switch (event) { 897 switch (event) {
905 case RDMA_CM_EVENT_DEVICE_REMOVAL: 898 case RDMA_CM_EVENT_DEVICE_REMOVAL:
906 isert_np->np_cm_id = NULL; 899 isert_np->cm_id = NULL;
907 break; 900 break;
908 case RDMA_CM_EVENT_ADDR_CHANGE: 901 case RDMA_CM_EVENT_ADDR_CHANGE:
909 isert_np->np_cm_id = isert_setup_id(isert_np); 902 isert_np->cm_id = isert_setup_id(isert_np);
910 if (IS_ERR(isert_np->np_cm_id)) { 903 if (IS_ERR(isert_np->cm_id)) {
911 isert_err("isert np %p setup id failed: %ld\n", 904 isert_err("isert np %p setup id failed: %ld\n",
912 isert_np, PTR_ERR(isert_np->np_cm_id)); 905 isert_np, PTR_ERR(isert_np->cm_id));
913 isert_np->np_cm_id = NULL; 906 isert_np->cm_id = NULL;
914 } 907 }
915 break; 908 break;
916 default: 909 default:
@@ -929,7 +922,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
929 struct isert_conn *isert_conn; 922 struct isert_conn *isert_conn;
930 bool terminating = false; 923 bool terminating = false;
931 924
932 if (isert_np->np_cm_id == cma_id) 925 if (isert_np->cm_id == cma_id)
933 return isert_np_cma_handler(cma_id->context, event); 926 return isert_np_cma_handler(cma_id->context, event);
934 927
935 isert_conn = cma_id->qp->qp_context; 928 isert_conn = cma_id->qp->qp_context;
@@ -945,13 +938,13 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
945 if (terminating) 938 if (terminating)
946 goto out; 939 goto out;
947 940
948 mutex_lock(&isert_np->np_accept_mutex); 941 mutex_lock(&isert_np->mutex);
949 if (!list_empty(&isert_conn->accept_node)) { 942 if (!list_empty(&isert_conn->node)) {
950 list_del_init(&isert_conn->accept_node); 943 list_del_init(&isert_conn->node);
951 isert_put_conn(isert_conn); 944 isert_put_conn(isert_conn);
952 queue_work(isert_release_wq, &isert_conn->release_work); 945 queue_work(isert_release_wq, &isert_conn->release_work);
953 } 946 }
954 mutex_unlock(&isert_np->np_accept_mutex); 947 mutex_unlock(&isert_np->mutex);
955 948
956out: 949out:
957 return 0; 950 return 0;
@@ -962,6 +955,7 @@ isert_connect_error(struct rdma_cm_id *cma_id)
962{ 955{
963 struct isert_conn *isert_conn = cma_id->qp->qp_context; 956 struct isert_conn *isert_conn = cma_id->qp->qp_context;
964 957
958 list_del_init(&isert_conn->node);
965 isert_conn->cm_id = NULL; 959 isert_conn->cm_id = NULL;
966 isert_put_conn(isert_conn); 960 isert_put_conn(isert_conn);
967 961
@@ -1006,35 +1000,51 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
1006} 1000}
1007 1001
1008static int 1002static int
1009isert_post_recv(struct isert_conn *isert_conn, u32 count) 1003isert_post_recvm(struct isert_conn *isert_conn, u32 count)
1010{ 1004{
1011 struct ib_recv_wr *rx_wr, *rx_wr_failed; 1005 struct ib_recv_wr *rx_wr, *rx_wr_failed;
1012 int i, ret; 1006 int i, ret;
1013 unsigned int rx_head = isert_conn->rx_desc_head;
1014 struct iser_rx_desc *rx_desc; 1007 struct iser_rx_desc *rx_desc;
1015 1008
1016 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { 1009 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
1017 rx_desc = &isert_conn->rx_descs[rx_head]; 1010 rx_desc = &isert_conn->rx_descs[i];
1018 rx_wr->wr_id = (uintptr_t)rx_desc; 1011 rx_wr->wr_id = (uintptr_t)rx_desc;
1019 rx_wr->sg_list = &rx_desc->rx_sg; 1012 rx_wr->sg_list = &rx_desc->rx_sg;
1020 rx_wr->num_sge = 1; 1013 rx_wr->num_sge = 1;
1021 rx_wr->next = rx_wr + 1; 1014 rx_wr->next = rx_wr + 1;
1022 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
1023 } 1015 }
1024
1025 rx_wr--; 1016 rx_wr--;
1026 rx_wr->next = NULL; /* mark end of work requests list */ 1017 rx_wr->next = NULL; /* mark end of work requests list */
1027 1018
1028 isert_conn->post_recv_buf_count += count; 1019 isert_conn->post_recv_buf_count += count;
1029 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, 1020 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
1030 &rx_wr_failed); 1021 &rx_wr_failed);
1031 if (ret) { 1022 if (ret) {
1032 isert_err("ib_post_recv() failed with ret: %d\n", ret); 1023 isert_err("ib_post_recv() failed with ret: %d\n", ret);
1033 isert_conn->post_recv_buf_count -= count; 1024 isert_conn->post_recv_buf_count -= count;
1034 } else {
1035 isert_dbg("Posted %d RX buffers\n", count);
1036 isert_conn->rx_desc_head = rx_head;
1037 } 1025 }
1026
1027 return ret;
1028}
1029
1030static int
1031isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
1032{
1033 struct ib_recv_wr *rx_wr_failed, rx_wr;
1034 int ret;
1035
1036 rx_wr.wr_id = (uintptr_t)rx_desc;
1037 rx_wr.sg_list = &rx_desc->rx_sg;
1038 rx_wr.num_sge = 1;
1039 rx_wr.next = NULL;
1040
1041 isert_conn->post_recv_buf_count++;
1042 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
1043 if (ret) {
1044 isert_err("ib_post_recv() failed with ret: %d\n", ret);
1045 isert_conn->post_recv_buf_count--;
1046 }
1047
1038 return ret; 1048 return ret;
1039} 1049}
1040 1050
@@ -1205,7 +1215,8 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1205 if (ret) 1215 if (ret)
1206 return ret; 1216 return ret;
1207 1217
1208 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX); 1218 ret = isert_post_recvm(isert_conn,
1219 ISERT_QP_MAX_RECV_DTOS);
1209 if (ret) 1220 if (ret)
1210 return ret; 1221 return ret;
1211 1222
@@ -1278,7 +1289,7 @@ isert_rx_login_req(struct isert_conn *isert_conn)
1278} 1289}
1279 1290
1280static struct iscsi_cmd 1291static struct iscsi_cmd
1281*isert_allocate_cmd(struct iscsi_conn *conn) 1292*isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc)
1282{ 1293{
1283 struct isert_conn *isert_conn = conn->context; 1294 struct isert_conn *isert_conn = conn->context;
1284 struct isert_cmd *isert_cmd; 1295 struct isert_cmd *isert_cmd;
@@ -1292,6 +1303,7 @@ static struct iscsi_cmd
1292 isert_cmd = iscsit_priv_cmd(cmd); 1303 isert_cmd = iscsit_priv_cmd(cmd);
1293 isert_cmd->conn = isert_conn; 1304 isert_cmd->conn = isert_conn;
1294 isert_cmd->iscsi_cmd = cmd; 1305 isert_cmd->iscsi_cmd = cmd;
1306 isert_cmd->rx_desc = rx_desc;
1295 1307
1296 return cmd; 1308 return cmd;
1297} 1309}
@@ -1303,9 +1315,9 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1303{ 1315{
1304 struct iscsi_conn *conn = isert_conn->conn; 1316 struct iscsi_conn *conn = isert_conn->conn;
1305 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; 1317 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1306 struct scatterlist *sg;
1307 int imm_data, imm_data_len, unsol_data, sg_nents, rc; 1318 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1308 bool dump_payload = false; 1319 bool dump_payload = false;
1320 unsigned int data_len;
1309 1321
1310 rc = iscsit_setup_scsi_cmd(conn, cmd, buf); 1322 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1311 if (rc < 0) 1323 if (rc < 0)
@@ -1314,7 +1326,10 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1314 imm_data = cmd->immediate_data; 1326 imm_data = cmd->immediate_data;
1315 imm_data_len = cmd->first_burst_len; 1327 imm_data_len = cmd->first_burst_len;
1316 unsol_data = cmd->unsolicited_data; 1328 unsol_data = cmd->unsolicited_data;
1329 data_len = cmd->se_cmd.data_length;
1317 1330
1331 if (imm_data && imm_data_len == data_len)
1332 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1318 rc = iscsit_process_scsi_cmd(conn, cmd, hdr); 1333 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1319 if (rc < 0) { 1334 if (rc < 0) {
1320 return 0; 1335 return 0;
@@ -1326,13 +1341,20 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1326 if (!imm_data) 1341 if (!imm_data)
1327 return 0; 1342 return 0;
1328 1343
1329 sg = &cmd->se_cmd.t_data_sg[0]; 1344 if (imm_data_len != data_len) {
1330 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); 1345 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1331 1346 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
1332 isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n", 1347 &rx_desc->data[0], imm_data_len);
1333 sg, sg_nents, &rx_desc->data[0], imm_data_len); 1348 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
1334 1349 sg_nents, imm_data_len);
1335 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len); 1350 } else {
1351 sg_init_table(&isert_cmd->sg, 1);
1352 cmd->se_cmd.t_data_sg = &isert_cmd->sg;
1353 cmd->se_cmd.t_data_nents = 1;
1354 sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len);
1355 isert_dbg("Transfer Immediate imm_data_len: %d\n",
1356 imm_data_len);
1357 }
1336 1358
1337 cmd->write_data_done += imm_data_len; 1359 cmd->write_data_done += imm_data_len;
1338 1360
@@ -1407,6 +1429,15 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1407 if (rc < 0) 1429 if (rc < 0)
1408 return rc; 1430 return rc;
1409 1431
1432 /*
1433 * multiple data-outs on the same command can arrive -
1434 * so post the buffer before hand
1435 */
1436 rc = isert_post_recv(isert_conn, rx_desc);
1437 if (rc) {
1438 isert_err("ib_post_recv failed with %d\n", rc);
1439 return rc;
1440 }
1410 return 0; 1441 return 0;
1411} 1442}
1412 1443
@@ -1479,7 +1510,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1479 1510
1480 switch (opcode) { 1511 switch (opcode) {
1481 case ISCSI_OP_SCSI_CMD: 1512 case ISCSI_OP_SCSI_CMD:
1482 cmd = isert_allocate_cmd(conn); 1513 cmd = isert_allocate_cmd(conn, rx_desc);
1483 if (!cmd) 1514 if (!cmd)
1484 break; 1515 break;
1485 1516
@@ -1493,7 +1524,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1493 rx_desc, (unsigned char *)hdr); 1524 rx_desc, (unsigned char *)hdr);
1494 break; 1525 break;
1495 case ISCSI_OP_NOOP_OUT: 1526 case ISCSI_OP_NOOP_OUT:
1496 cmd = isert_allocate_cmd(conn); 1527 cmd = isert_allocate_cmd(conn, rx_desc);
1497 if (!cmd) 1528 if (!cmd)
1498 break; 1529 break;
1499 1530
@@ -1506,7 +1537,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1506 (unsigned char *)hdr); 1537 (unsigned char *)hdr);
1507 break; 1538 break;
1508 case ISCSI_OP_SCSI_TMFUNC: 1539 case ISCSI_OP_SCSI_TMFUNC:
1509 cmd = isert_allocate_cmd(conn); 1540 cmd = isert_allocate_cmd(conn, rx_desc);
1510 if (!cmd) 1541 if (!cmd)
1511 break; 1542 break;
1512 1543
@@ -1514,22 +1545,20 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1514 (unsigned char *)hdr); 1545 (unsigned char *)hdr);
1515 break; 1546 break;
1516 case ISCSI_OP_LOGOUT: 1547 case ISCSI_OP_LOGOUT:
1517 cmd = isert_allocate_cmd(conn); 1548 cmd = isert_allocate_cmd(conn, rx_desc);
1518 if (!cmd) 1549 if (!cmd)
1519 break; 1550 break;
1520 1551
1521 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); 1552 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1522 break; 1553 break;
1523 case ISCSI_OP_TEXT: 1554 case ISCSI_OP_TEXT:
1524 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) { 1555 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF)
1525 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); 1556 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1526 if (!cmd) 1557 else
1527 break; 1558 cmd = isert_allocate_cmd(conn, rx_desc);
1528 } else { 1559
1529 cmd = isert_allocate_cmd(conn); 1560 if (!cmd)
1530 if (!cmd) 1561 break;
1531 break;
1532 }
1533 1562
1534 isert_cmd = iscsit_priv_cmd(cmd); 1563 isert_cmd = iscsit_priv_cmd(cmd);
1535 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, 1564 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
@@ -1589,7 +1618,7 @@ isert_rcv_completion(struct iser_rx_desc *desc,
1589 struct ib_device *ib_dev = isert_conn->cm_id->device; 1618 struct ib_device *ib_dev = isert_conn->cm_id->device;
1590 struct iscsi_hdr *hdr; 1619 struct iscsi_hdr *hdr;
1591 u64 rx_dma; 1620 u64 rx_dma;
1592 int rx_buflen, outstanding; 1621 int rx_buflen;
1593 1622
1594 if ((char *)desc == isert_conn->login_req_buf) { 1623 if ((char *)desc == isert_conn->login_req_buf) {
1595 rx_dma = isert_conn->login_req_dma; 1624 rx_dma = isert_conn->login_req_dma;
@@ -1629,22 +1658,6 @@ isert_rcv_completion(struct iser_rx_desc *desc,
1629 DMA_FROM_DEVICE); 1658 DMA_FROM_DEVICE);
1630 1659
1631 isert_conn->post_recv_buf_count--; 1660 isert_conn->post_recv_buf_count--;
1632 isert_dbg("Decremented post_recv_buf_count: %d\n",
1633 isert_conn->post_recv_buf_count);
1634
1635 if ((char *)desc == isert_conn->login_req_buf)
1636 return;
1637
1638 outstanding = isert_conn->post_recv_buf_count;
1639 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1640 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1641 ISERT_MIN_POSTED_RX);
1642 err = isert_post_recv(isert_conn, count);
1643 if (err) {
1644 isert_err("isert_post_recv() count: %d failed, %d\n",
1645 count, err);
1646 }
1647 }
1648} 1661}
1649 1662
1650static int 1663static int
@@ -2156,6 +2169,12 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2156 struct ib_send_wr *wr_failed; 2169 struct ib_send_wr *wr_failed;
2157 int ret; 2170 int ret;
2158 2171
2172 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
2173 if (ret) {
2174 isert_err("ib_post_recv failed with %d\n", ret);
2175 return ret;
2176 }
2177
2159 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, 2178 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
2160 &wr_failed); 2179 &wr_failed);
2161 if (ret) { 2180 if (ret) {
@@ -2950,6 +2969,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2950 &isert_cmd->tx_desc.send_wr); 2969 &isert_cmd->tx_desc.send_wr);
2951 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; 2970 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
2952 wr->send_wr_num += 1; 2971 wr->send_wr_num += 1;
2972
2973 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
2974 if (rc) {
2975 isert_err("ib_post_recv failed with %d\n", rc);
2976 return rc;
2977 }
2953 } 2978 }
2954 2979
2955 rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed); 2980 rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
@@ -2999,9 +3024,16 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2999static int 3024static int
3000isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 3025isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3001{ 3026{
3002 int ret; 3027 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
3028 int ret = 0;
3003 3029
3004 switch (state) { 3030 switch (state) {
3031 case ISTATE_REMOVE:
3032 spin_lock_bh(&conn->cmd_lock);
3033 list_del_init(&cmd->i_conn_node);
3034 spin_unlock_bh(&conn->cmd_lock);
3035 isert_put_cmd(isert_cmd, true);
3036 break;
3005 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 3037 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3006 ret = isert_put_nopin(cmd, conn, false); 3038 ret = isert_put_nopin(cmd, conn, false);
3007 break; 3039 break;
@@ -3106,10 +3138,10 @@ isert_setup_np(struct iscsi_np *np,
3106 isert_err("Unable to allocate struct isert_np\n"); 3138 isert_err("Unable to allocate struct isert_np\n");
3107 return -ENOMEM; 3139 return -ENOMEM;
3108 } 3140 }
3109 sema_init(&isert_np->np_sem, 0); 3141 sema_init(&isert_np->sem, 0);
3110 mutex_init(&isert_np->np_accept_mutex); 3142 mutex_init(&isert_np->mutex);
3111 INIT_LIST_HEAD(&isert_np->np_accept_list); 3143 INIT_LIST_HEAD(&isert_np->accepted);
3112 init_completion(&isert_np->np_login_comp); 3144 INIT_LIST_HEAD(&isert_np->pending);
3113 isert_np->np = np; 3145 isert_np->np = np;
3114 3146
3115 /* 3147 /*
@@ -3125,7 +3157,7 @@ isert_setup_np(struct iscsi_np *np,
3125 goto out; 3157 goto out;
3126 } 3158 }
3127 3159
3128 isert_np->np_cm_id = isert_lid; 3160 isert_np->cm_id = isert_lid;
3129 np->np_context = isert_np; 3161 np->np_context = isert_np;
3130 3162
3131 return 0; 3163 return 0;
@@ -3214,7 +3246,7 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3214 int ret; 3246 int ret;
3215 3247
3216accept_wait: 3248accept_wait:
3217 ret = down_interruptible(&isert_np->np_sem); 3249 ret = down_interruptible(&isert_np->sem);
3218 if (ret) 3250 if (ret)
3219 return -ENODEV; 3251 return -ENODEV;
3220 3252
@@ -3231,15 +3263,15 @@ accept_wait:
3231 } 3263 }
3232 spin_unlock_bh(&np->np_thread_lock); 3264 spin_unlock_bh(&np->np_thread_lock);
3233 3265
3234 mutex_lock(&isert_np->np_accept_mutex); 3266 mutex_lock(&isert_np->mutex);
3235 if (list_empty(&isert_np->np_accept_list)) { 3267 if (list_empty(&isert_np->pending)) {
3236 mutex_unlock(&isert_np->np_accept_mutex); 3268 mutex_unlock(&isert_np->mutex);
3237 goto accept_wait; 3269 goto accept_wait;
3238 } 3270 }
3239 isert_conn = list_first_entry(&isert_np->np_accept_list, 3271 isert_conn = list_first_entry(&isert_np->pending,
3240 struct isert_conn, accept_node); 3272 struct isert_conn, node);
3241 list_del_init(&isert_conn->accept_node); 3273 list_del_init(&isert_conn->node);
3242 mutex_unlock(&isert_np->np_accept_mutex); 3274 mutex_unlock(&isert_np->mutex);
3243 3275
3244 conn->context = isert_conn; 3276 conn->context = isert_conn;
3245 isert_conn->conn = conn; 3277 isert_conn->conn = conn;
@@ -3257,28 +3289,39 @@ isert_free_np(struct iscsi_np *np)
3257 struct isert_np *isert_np = np->np_context; 3289 struct isert_np *isert_np = np->np_context;
3258 struct isert_conn *isert_conn, *n; 3290 struct isert_conn *isert_conn, *n;
3259 3291
3260 if (isert_np->np_cm_id) 3292 if (isert_np->cm_id)
3261 rdma_destroy_id(isert_np->np_cm_id); 3293 rdma_destroy_id(isert_np->cm_id);
3262 3294
3263 /* 3295 /*
3264 * FIXME: At this point we don't have a good way to insure 3296 * FIXME: At this point we don't have a good way to insure
3265 * that at this point we don't have hanging connections that 3297 * that at this point we don't have hanging connections that
3266 * completed RDMA establishment but didn't start iscsi login 3298 * completed RDMA establishment but didn't start iscsi login
3267 * process. So work-around this by cleaning up what ever piled 3299 * process. So work-around this by cleaning up what ever piled
3268 * up in np_accept_list. 3300 * up in accepted and pending lists.
3269 */ 3301 */
3270 mutex_lock(&isert_np->np_accept_mutex); 3302 mutex_lock(&isert_np->mutex);
3271 if (!list_empty(&isert_np->np_accept_list)) { 3303 if (!list_empty(&isert_np->pending)) {
3272 isert_info("Still have isert connections, cleaning up...\n"); 3304 isert_info("Still have isert pending connections\n");
3305 list_for_each_entry_safe(isert_conn, n,
3306 &isert_np->pending,
3307 node) {
3308 isert_info("cleaning isert_conn %p state (%d)\n",
3309 isert_conn, isert_conn->state);
3310 isert_connect_release(isert_conn);
3311 }
3312 }
3313
3314 if (!list_empty(&isert_np->accepted)) {
3315 isert_info("Still have isert accepted connections\n");
3273 list_for_each_entry_safe(isert_conn, n, 3316 list_for_each_entry_safe(isert_conn, n,
3274 &isert_np->np_accept_list, 3317 &isert_np->accepted,
3275 accept_node) { 3318 node) {
3276 isert_info("cleaning isert_conn %p state (%d)\n", 3319 isert_info("cleaning isert_conn %p state (%d)\n",
3277 isert_conn, isert_conn->state); 3320 isert_conn, isert_conn->state);
3278 isert_connect_release(isert_conn); 3321 isert_connect_release(isert_conn);
3279 } 3322 }
3280 } 3323 }
3281 mutex_unlock(&isert_np->np_accept_mutex); 3324 mutex_unlock(&isert_np->mutex);
3282 3325
3283 np->np_context = NULL; 3326 np->np_context = NULL;
3284 kfree(isert_np); 3327 kfree(isert_np);
@@ -3345,6 +3388,41 @@ isert_wait4flush(struct isert_conn *isert_conn)
3345 wait_for_completion(&isert_conn->wait_comp_err); 3388 wait_for_completion(&isert_conn->wait_comp_err);
3346} 3389}
3347 3390
3391/**
3392 * isert_put_unsol_pending_cmds() - Drop commands waiting for
3393 * unsolicitate dataout
3394 * @conn: iscsi connection
3395 *
3396 * We might still have commands that are waiting for unsolicited
3397 * dataouts messages. We must put the extra reference on those
3398 * before blocking on the target_wait_for_session_cmds
3399 */
3400static void
3401isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
3402{
3403 struct iscsi_cmd *cmd, *tmp;
3404 static LIST_HEAD(drop_cmd_list);
3405
3406 spin_lock_bh(&conn->cmd_lock);
3407 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
3408 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
3409 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
3410 (cmd->write_data_done < cmd->se_cmd.data_length))
3411 list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
3412 }
3413 spin_unlock_bh(&conn->cmd_lock);
3414
3415 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
3416 list_del_init(&cmd->i_conn_node);
3417 if (cmd->i_state != ISTATE_REMOVE) {
3418 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
3419
3420 isert_info("conn %p dropping cmd %p\n", conn, cmd);
3421 isert_put_cmd(isert_cmd, true);
3422 }
3423 }
3424}
3425
3348static void isert_wait_conn(struct iscsi_conn *conn) 3426static void isert_wait_conn(struct iscsi_conn *conn)
3349{ 3427{
3350 struct isert_conn *isert_conn = conn->context; 3428 struct isert_conn *isert_conn = conn->context;
@@ -3363,8 +3441,9 @@ static void isert_wait_conn(struct iscsi_conn *conn)
3363 isert_conn_terminate(isert_conn); 3441 isert_conn_terminate(isert_conn);
3364 mutex_unlock(&isert_conn->mutex); 3442 mutex_unlock(&isert_conn->mutex);
3365 3443
3366 isert_wait4cmds(conn);
3367 isert_wait4flush(isert_conn); 3444 isert_wait4flush(isert_conn);
3445 isert_put_unsol_pending_cmds(conn);
3446 isert_wait4cmds(conn);
3368 isert_wait4logout(isert_conn); 3447 isert_wait4logout(isert_conn);
3369 3448
3370 queue_work(isert_release_wq, &isert_conn->release_work); 3449 queue_work(isert_release_wq, &isert_conn->release_work);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 6a04ba3c0f72..c5b99bcecbcf 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -113,7 +113,6 @@ enum {
113}; 113};
114 114
115struct isert_rdma_wr { 115struct isert_rdma_wr {
116 struct list_head wr_list;
117 struct isert_cmd *isert_cmd; 116 struct isert_cmd *isert_cmd;
118 enum iser_ib_op_code iser_ib_op; 117 enum iser_ib_op_code iser_ib_op;
119 struct ib_sge *ib_sge; 118 struct ib_sge *ib_sge;
@@ -134,14 +133,13 @@ struct isert_cmd {
134 uint64_t write_va; 133 uint64_t write_va;
135 u64 pdu_buf_dma; 134 u64 pdu_buf_dma;
136 u32 pdu_buf_len; 135 u32 pdu_buf_len;
137 u32 read_va_off;
138 u32 write_va_off;
139 u32 rdma_wr_num;
140 struct isert_conn *conn; 136 struct isert_conn *conn;
141 struct iscsi_cmd *iscsi_cmd; 137 struct iscsi_cmd *iscsi_cmd;
142 struct iser_tx_desc tx_desc; 138 struct iser_tx_desc tx_desc;
139 struct iser_rx_desc *rx_desc;
143 struct isert_rdma_wr rdma_wr; 140 struct isert_rdma_wr rdma_wr;
144 struct work_struct comp_work; 141 struct work_struct comp_work;
142 struct scatterlist sg;
145}; 143};
146 144
147struct isert_device; 145struct isert_device;
@@ -159,11 +157,10 @@ struct isert_conn {
159 u64 login_req_dma; 157 u64 login_req_dma;
160 int login_req_len; 158 int login_req_len;
161 u64 login_rsp_dma; 159 u64 login_rsp_dma;
162 unsigned int rx_desc_head;
163 struct iser_rx_desc *rx_descs; 160 struct iser_rx_desc *rx_descs;
164 struct ib_recv_wr rx_wr[ISERT_MIN_POSTED_RX]; 161 struct ib_recv_wr rx_wr[ISERT_QP_MAX_RECV_DTOS];
165 struct iscsi_conn *conn; 162 struct iscsi_conn *conn;
166 struct list_head accept_node; 163 struct list_head node;
167 struct completion login_comp; 164 struct completion login_comp;
168 struct completion login_req_comp; 165 struct completion login_req_comp;
169 struct iser_tx_desc login_tx_desc; 166 struct iser_tx_desc login_tx_desc;
@@ -222,9 +219,9 @@ struct isert_device {
222 219
223struct isert_np { 220struct isert_np {
224 struct iscsi_np *np; 221 struct iscsi_np *np;
225 struct semaphore np_sem; 222 struct semaphore sem;
226 struct rdma_cm_id *np_cm_id; 223 struct rdma_cm_id *cm_id;
227 struct mutex np_accept_mutex; 224 struct mutex mutex;
228 struct list_head np_accept_list; 225 struct list_head accepted;
229 struct completion np_login_comp; 226 struct list_head pending;
230}; 227};
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 56eb471b5576..4215b5382092 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -196,6 +196,7 @@ config JOYSTICK_TWIDJOY
196config JOYSTICK_ZHENHUA 196config JOYSTICK_ZHENHUA
197 tristate "5-byte Zhenhua RC transmitter" 197 tristate "5-byte Zhenhua RC transmitter"
198 select SERIO 198 select SERIO
199 select BITREVERSE
199 help 200 help
200 Say Y here if you have a Zhen Hua PPM-4CH transmitter which is 201 Say Y here if you have a Zhen Hua PPM-4CH transmitter which is
201 supplied with a ready to fly micro electric indoor helicopters 202 supplied with a ready to fly micro electric indoor helicopters
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
index b76ac580703c..a8bc2fe170dd 100644
--- a/drivers/input/joystick/walkera0701.c
+++ b/drivers/input/joystick/walkera0701.c
@@ -150,7 +150,7 @@ static void walkera0701_irq_handler(void *handler_data)
150 if (w->counter == 24) { /* full frame */ 150 if (w->counter == 24) { /* full frame */
151 walkera0701_parse_frame(w); 151 walkera0701_parse_frame(w);
152 w->counter = NO_SYNC; 152 w->counter = NO_SYNC;
153 if (abs(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */ 153 if (abs64(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */
154 w->counter = 0; 154 w->counter = 0;
155 } else { 155 } else {
156 if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE) 156 if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE)
@@ -161,7 +161,7 @@ static void walkera0701_irq_handler(void *handler_data)
161 } else 161 } else
162 w->counter = NO_SYNC; 162 w->counter = NO_SYNC;
163 } 163 }
164 } else if (abs(pulse_time - SYNC_PULSE - BIN0_PULSE) < 164 } else if (abs64(pulse_time - SYNC_PULSE - BIN0_PULSE) <
165 RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */ 165 RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */
166 w->counter = 0; 166 w->counter = 0;
167 167
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index b052afec9a11..6639b2b8528a 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -266,7 +266,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
266 266
267 error = omap4_keypad_parse_dt(&pdev->dev, keypad_data); 267 error = omap4_keypad_parse_dt(&pdev->dev, keypad_data);
268 if (error) 268 if (error)
269 return error; 269 goto err_free_keypad;
270 270
271 res = request_mem_region(res->start, resource_size(res), pdev->name); 271 res = request_mem_region(res->start, resource_size(res), pdev->name);
272 if (!res) { 272 if (!res) {
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index 867db8a91372..e317b75357a0 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -93,7 +93,7 @@ static int pm8941_reboot_notify(struct notifier_block *nb,
93 default: 93 default:
94 reset_type = PON_PS_HOLD_TYPE_HARD_RESET; 94 reset_type = PON_PS_HOLD_TYPE_HARD_RESET;
95 break; 95 break;
96 }; 96 }
97 97
98 error = regmap_update_bits(pwrkey->regmap, 98 error = regmap_update_bits(pwrkey->regmap,
99 pwrkey->baseaddr + PON_PS_HOLD_RST_CTL, 99 pwrkey->baseaddr + PON_PS_HOLD_RST_CTL,
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 345df9b03aed..5adbcedcb81c 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -414,7 +414,7 @@ static int uinput_setup_device(struct uinput_device *udev,
414 dev->id.product = user_dev->id.product; 414 dev->id.product = user_dev->id.product;
415 dev->id.version = user_dev->id.version; 415 dev->id.version = user_dev->id.version;
416 416
417 for_each_set_bit(i, dev->absbit, ABS_CNT) { 417 for (i = 0; i < ABS_CNT; i++) {
418 input_abs_set_max(dev, i, user_dev->absmax[i]); 418 input_abs_set_max(dev, i, user_dev->absmax[i]);
419 input_abs_set_min(dev, i, user_dev->absmin[i]); 419 input_abs_set_min(dev, i, user_dev->absmin[i]);
420 input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]); 420 input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]);
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index 73670f2aebfd..c0ec26118732 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -60,7 +60,7 @@ struct elan_transport_ops {
60 int (*get_sm_version)(struct i2c_client *client, 60 int (*get_sm_version)(struct i2c_client *client,
61 u8* ic_type, u8 *version); 61 u8* ic_type, u8 *version);
62 int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum); 62 int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum);
63 int (*get_product_id)(struct i2c_client *client, u8 *id); 63 int (*get_product_id)(struct i2c_client *client, u16 *id);
64 64
65 int (*get_max)(struct i2c_client *client, 65 int (*get_max)(struct i2c_client *client,
66 unsigned int *max_x, unsigned int *max_y); 66 unsigned int *max_x, unsigned int *max_y);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index fa945304b9a5..5e1665bbaa0b 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -40,7 +40,7 @@
40#include "elan_i2c.h" 40#include "elan_i2c.h"
41 41
42#define DRIVER_NAME "elan_i2c" 42#define DRIVER_NAME "elan_i2c"
43#define ELAN_DRIVER_VERSION "1.6.0" 43#define ELAN_DRIVER_VERSION "1.6.1"
44#define ETP_MAX_PRESSURE 255 44#define ETP_MAX_PRESSURE 255
45#define ETP_FWIDTH_REDUCE 90 45#define ETP_FWIDTH_REDUCE 90
46#define ETP_FINGER_WIDTH 15 46#define ETP_FINGER_WIDTH 15
@@ -76,7 +76,7 @@ struct elan_tp_data {
76 unsigned int x_res; 76 unsigned int x_res;
77 unsigned int y_res; 77 unsigned int y_res;
78 78
79 u8 product_id; 79 u16 product_id;
80 u8 fw_version; 80 u8 fw_version;
81 u8 sm_version; 81 u8 sm_version;
82 u8 iap_version; 82 u8 iap_version;
@@ -98,15 +98,25 @@ static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count,
98 u16 *signature_address) 98 u16 *signature_address)
99{ 99{
100 switch (iap_version) { 100 switch (iap_version) {
101 case 0x00:
102 case 0x06:
101 case 0x08: 103 case 0x08:
102 *validpage_count = 512; 104 *validpage_count = 512;
103 break; 105 break;
106 case 0x03:
107 case 0x07:
104 case 0x09: 108 case 0x09:
109 case 0x0A:
110 case 0x0B:
111 case 0x0C:
105 *validpage_count = 768; 112 *validpage_count = 768;
106 break; 113 break;
107 case 0x0D: 114 case 0x0D:
108 *validpage_count = 896; 115 *validpage_count = 896;
109 break; 116 break;
117 case 0x0E:
118 *validpage_count = 640;
119 break;
110 default: 120 default:
111 /* unknown ic type clear value */ 121 /* unknown ic type clear value */
112 *validpage_count = 0; 122 *validpage_count = 0;
@@ -266,11 +276,10 @@ static int elan_query_device_info(struct elan_tp_data *data)
266 276
267 error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count, 277 error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count,
268 &data->fw_signature_address); 278 &data->fw_signature_address);
269 if (error) { 279 if (error)
270 dev_err(&data->client->dev, 280 dev_warn(&data->client->dev,
271 "unknown iap version %d\n", data->iap_version); 281 "unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n",
272 return error; 282 data->iap_version, data->ic_type);
273 }
274 283
275 return 0; 284 return 0;
276} 285}
@@ -486,6 +495,9 @@ static ssize_t elan_sysfs_update_fw(struct device *dev,
486 const u8 *fw_signature; 495 const u8 *fw_signature;
487 static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF}; 496 static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF};
488 497
498 if (data->fw_validpage_count == 0)
499 return -EINVAL;
500
489 /* Look for a firmware with the product id appended. */ 501 /* Look for a firmware with the product id appended. */
490 fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id); 502 fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id);
491 if (!fw_name) { 503 if (!fw_name) {
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index 683c840c9dd7..a679e56c44cd 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -276,7 +276,7 @@ static int elan_i2c_get_sm_version(struct i2c_client *client,
276 return 0; 276 return 0;
277} 277}
278 278
279static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id) 279static int elan_i2c_get_product_id(struct i2c_client *client, u16 *id)
280{ 280{
281 int error; 281 int error;
282 u8 val[3]; 282 u8 val[3];
@@ -287,7 +287,7 @@ static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id)
287 return error; 287 return error;
288 } 288 }
289 289
290 *id = val[0]; 290 *id = le16_to_cpup((__le16 *)val);
291 return 0; 291 return 0;
292} 292}
293 293
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index ff36a366b2aa..cb6aecbc1dc2 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -183,7 +183,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
183 return 0; 183 return 0;
184} 184}
185 185
186static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id) 186static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
187{ 187{
188 int error; 188 int error;
189 u8 val[3]; 189 u8 val[3];
@@ -195,7 +195,7 @@ static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id)
195 return error; 195 return error;
196 } 196 }
197 197
198 *id = val[1]; 198 *id = be16_to_cpup((__be16 *)val);
199 return 0; 199 return 0;
200} 200}
201 201
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 994ae7886156..6025eb430c0a 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -519,18 +519,14 @@ static int synaptics_set_mode(struct psmouse *psmouse)
519 struct synaptics_data *priv = psmouse->private; 519 struct synaptics_data *priv = psmouse->private;
520 520
521 priv->mode = 0; 521 priv->mode = 0;
522 522 if (priv->absolute_mode)
523 if (priv->absolute_mode) {
524 priv->mode |= SYN_BIT_ABSOLUTE_MODE; 523 priv->mode |= SYN_BIT_ABSOLUTE_MODE;
525 if (SYN_CAP_EXTENDED(priv->capabilities)) 524 if (priv->disable_gesture)
526 priv->mode |= SYN_BIT_W_MODE;
527 }
528
529 if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture)
530 priv->mode |= SYN_BIT_DISABLE_GESTURE; 525 priv->mode |= SYN_BIT_DISABLE_GESTURE;
531
532 if (psmouse->rate >= 80) 526 if (psmouse->rate >= 80)
533 priv->mode |= SYN_BIT_HIGH_RATE; 527 priv->mode |= SYN_BIT_HIGH_RATE;
528 if (SYN_CAP_EXTENDED(priv->capabilities))
529 priv->mode |= SYN_BIT_W_MODE;
534 530
535 if (synaptics_mode_cmd(psmouse, priv->mode)) 531 if (synaptics_mode_cmd(psmouse, priv->mode))
536 return -1; 532 return -1;
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 75516996db20..316f2c897101 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -212,12 +212,17 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
212 * time before the ACK arrives. 212 * time before the ACK arrives.
213 */ 213 */
214 if (ps2_sendbyte(ps2dev, command & 0xff, 214 if (ps2_sendbyte(ps2dev, command & 0xff,
215 command == PS2_CMD_RESET_BAT ? 1000 : 200)) 215 command == PS2_CMD_RESET_BAT ? 1000 : 200)) {
216 goto out; 216 serio_pause_rx(ps2dev->serio);
217 goto out_reset_flags;
218 }
217 219
218 for (i = 0; i < send; i++) 220 for (i = 0; i < send; i++) {
219 if (ps2_sendbyte(ps2dev, param[i], 200)) 221 if (ps2_sendbyte(ps2dev, param[i], 200)) {
220 goto out; 222 serio_pause_rx(ps2dev->serio);
223 goto out_reset_flags;
224 }
225 }
221 226
222 /* 227 /*
223 * The reset command takes a long time to execute. 228 * The reset command takes a long time to execute.
@@ -234,17 +239,18 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
234 !(ps2dev->flags & PS2_FLAG_CMD), timeout); 239 !(ps2dev->flags & PS2_FLAG_CMD), timeout);
235 } 240 }
236 241
242 serio_pause_rx(ps2dev->serio);
243
237 if (param) 244 if (param)
238 for (i = 0; i < receive; i++) 245 for (i = 0; i < receive; i++)
239 param[i] = ps2dev->cmdbuf[(receive - 1) - i]; 246 param[i] = ps2dev->cmdbuf[(receive - 1) - i];
240 247
241 if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1)) 248 if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1))
242 goto out; 249 goto out_reset_flags;
243 250
244 rc = 0; 251 rc = 0;
245 252
246 out: 253 out_reset_flags:
247 serio_pause_rx(ps2dev->serio);
248 ps2dev->flags = 0; 254 ps2dev->flags = 0;
249 serio_continue_rx(ps2dev->serio); 255 serio_continue_rx(ps2dev->serio);
250 256
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
index 26b45936f9fd..1e8cd6f1fe9e 100644
--- a/drivers/input/serio/parkbd.c
+++ b/drivers/input/serio/parkbd.c
@@ -194,6 +194,7 @@ static int __init parkbd_init(void)
194 parkbd_port = parkbd_allocate_serio(); 194 parkbd_port = parkbd_allocate_serio();
195 if (!parkbd_port) { 195 if (!parkbd_port) {
196 parport_release(parkbd_dev); 196 parport_release(parkbd_dev);
197 parport_unregister_device(parkbd_dev);
197 return -ENOMEM; 198 return -ENOMEM;
198 } 199 }
199 200
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
index ff0b75813daa..8275267eac25 100644
--- a/drivers/input/touchscreen/imx6ul_tsc.c
+++ b/drivers/input/touchscreen/imx6ul_tsc.c
@@ -94,7 +94,7 @@ struct imx6ul_tsc {
94 * TSC module need ADC to get the measure value. So 94 * TSC module need ADC to get the measure value. So
95 * before config TSC, we should initialize ADC module. 95 * before config TSC, we should initialize ADC module.
96 */ 96 */
97static void imx6ul_adc_init(struct imx6ul_tsc *tsc) 97static int imx6ul_adc_init(struct imx6ul_tsc *tsc)
98{ 98{
99 int adc_hc = 0; 99 int adc_hc = 0;
100 int adc_gc; 100 int adc_gc;
@@ -122,17 +122,23 @@ static void imx6ul_adc_init(struct imx6ul_tsc *tsc)
122 122
123 timeout = wait_for_completion_timeout 123 timeout = wait_for_completion_timeout
124 (&tsc->completion, ADC_TIMEOUT); 124 (&tsc->completion, ADC_TIMEOUT);
125 if (timeout == 0) 125 if (timeout == 0) {
126 dev_err(tsc->dev, "Timeout for adc calibration\n"); 126 dev_err(tsc->dev, "Timeout for adc calibration\n");
127 return -ETIMEDOUT;
128 }
127 129
128 adc_gs = readl(tsc->adc_regs + REG_ADC_GS); 130 adc_gs = readl(tsc->adc_regs + REG_ADC_GS);
129 if (adc_gs & ADC_CALF) 131 if (adc_gs & ADC_CALF) {
130 dev_err(tsc->dev, "ADC calibration failed\n"); 132 dev_err(tsc->dev, "ADC calibration failed\n");
133 return -EINVAL;
134 }
131 135
132 /* TSC need the ADC work in hardware trigger */ 136 /* TSC need the ADC work in hardware trigger */
133 adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG); 137 adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG);
134 adc_cfg |= ADC_HARDWARE_TRIGGER; 138 adc_cfg |= ADC_HARDWARE_TRIGGER;
135 writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG); 139 writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG);
140
141 return 0;
136} 142}
137 143
138/* 144/*
@@ -188,11 +194,17 @@ static void imx6ul_tsc_set(struct imx6ul_tsc *tsc)
188 writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL); 194 writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
189} 195}
190 196
191static void imx6ul_tsc_init(struct imx6ul_tsc *tsc) 197static int imx6ul_tsc_init(struct imx6ul_tsc *tsc)
192{ 198{
193 imx6ul_adc_init(tsc); 199 int err;
200
201 err = imx6ul_adc_init(tsc);
202 if (err)
203 return err;
194 imx6ul_tsc_channel_config(tsc); 204 imx6ul_tsc_channel_config(tsc);
195 imx6ul_tsc_set(tsc); 205 imx6ul_tsc_set(tsc);
206
207 return 0;
196} 208}
197 209
198static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc) 210static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc)
@@ -311,9 +323,7 @@ static int imx6ul_tsc_open(struct input_dev *input_dev)
311 return err; 323 return err;
312 } 324 }
313 325
314 imx6ul_tsc_init(tsc); 326 return imx6ul_tsc_init(tsc);
315
316 return 0;
317} 327}
318 328
319static void imx6ul_tsc_close(struct input_dev *input_dev) 329static void imx6ul_tsc_close(struct input_dev *input_dev)
@@ -337,7 +347,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
337 int tsc_irq; 347 int tsc_irq;
338 int adc_irq; 348 int adc_irq;
339 349
340 tsc = devm_kzalloc(&pdev->dev, sizeof(struct imx6ul_tsc), GFP_KERNEL); 350 tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL);
341 if (!tsc) 351 if (!tsc)
342 return -ENOMEM; 352 return -ENOMEM;
343 353
@@ -345,7 +355,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
345 if (!input_dev) 355 if (!input_dev)
346 return -ENOMEM; 356 return -ENOMEM;
347 357
348 input_dev->name = "iMX6UL TouchScreen Controller"; 358 input_dev->name = "iMX6UL Touchscreen Controller";
349 input_dev->id.bustype = BUS_HOST; 359 input_dev->id.bustype = BUS_HOST;
350 360
351 input_dev->open = imx6ul_tsc_open; 361 input_dev->open = imx6ul_tsc_open;
@@ -406,7 +416,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
406 } 416 }
407 417
408 adc_irq = platform_get_irq(pdev, 1); 418 adc_irq = platform_get_irq(pdev, 1);
409 if (adc_irq <= 0) { 419 if (adc_irq < 0) {
410 dev_err(&pdev->dev, "no adc irq resource?\n"); 420 dev_err(&pdev->dev, "no adc irq resource?\n");
411 return adc_irq; 421 return adc_irq;
412 } 422 }
@@ -491,7 +501,7 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev)
491 goto out; 501 goto out;
492 } 502 }
493 503
494 imx6ul_tsc_init(tsc); 504 retval = imx6ul_tsc_init(tsc);
495 } 505 }
496 506
497out: 507out:
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 7cce87650fc8..1fafc9f57af6 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -394,12 +394,12 @@ static struct mms114_platform_data *mms114_parse_dt(struct device *dev)
394 if (of_property_read_u32(np, "x-size", &pdata->x_size)) { 394 if (of_property_read_u32(np, "x-size", &pdata->x_size)) {
395 dev_err(dev, "failed to get x-size property\n"); 395 dev_err(dev, "failed to get x-size property\n");
396 return NULL; 396 return NULL;
397 }; 397 }
398 398
399 if (of_property_read_u32(np, "y-size", &pdata->y_size)) { 399 if (of_property_read_u32(np, "y-size", &pdata->y_size)) {
400 dev_err(dev, "failed to get y-size property\n"); 400 dev_err(dev, "failed to get y-size property\n");
401 return NULL; 401 return NULL;
402 }; 402 }
403 403
404 of_property_read_u32(np, "contact-threshold", 404 of_property_read_u32(np, "contact-threshold",
405 &pdata->contact_threshold); 405 &pdata->contact_threshold);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 4664c2a96c67..d9da766719c8 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -43,7 +43,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST
43endmenu 43endmenu
44 44
45config IOMMU_IOVA 45config IOMMU_IOVA
46 bool 46 tristate
47 47
48config OF_IOMMU 48config OF_IOMMU
49 def_bool y 49 def_bool y
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2d7349a3ee14..041bc1810a86 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3215,6 +3215,8 @@ static struct iova *intel_alloc_iova(struct device *dev,
3215 3215
3216 /* Restrict dma_mask to the width that the iommu can handle */ 3216 /* Restrict dma_mask to the width that the iommu can handle */
3217 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); 3217 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3218 /* Ensure we reserve the whole size-aligned region */
3219 nrpages = __roundup_pow_of_two(nrpages);
3218 3220
3219 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { 3221 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3220 /* 3222 /*
@@ -3711,7 +3713,7 @@ static inline int iommu_devinfo_cache_init(void)
3711static int __init iommu_init_mempool(void) 3713static int __init iommu_init_mempool(void)
3712{ 3714{
3713 int ret; 3715 int ret;
3714 ret = iommu_iova_cache_init(); 3716 ret = iova_cache_get();
3715 if (ret) 3717 if (ret)
3716 return ret; 3718 return ret;
3717 3719
@@ -3725,7 +3727,7 @@ static int __init iommu_init_mempool(void)
3725 3727
3726 kmem_cache_destroy(iommu_domain_cache); 3728 kmem_cache_destroy(iommu_domain_cache);
3727domain_error: 3729domain_error:
3728 iommu_iova_cache_destroy(); 3730 iova_cache_put();
3729 3731
3730 return -ENOMEM; 3732 return -ENOMEM;
3731} 3733}
@@ -3734,7 +3736,7 @@ static void __init iommu_exit_mempool(void)
3734{ 3736{
3735 kmem_cache_destroy(iommu_devinfo_cache); 3737 kmem_cache_destroy(iommu_devinfo_cache);
3736 kmem_cache_destroy(iommu_domain_cache); 3738 kmem_cache_destroy(iommu_domain_cache);
3737 iommu_iova_cache_destroy(); 3739 iova_cache_put();
3738} 3740}
3739 3741
3740static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) 3742static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index b7c3d923f3e1..fa0adef32bd6 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -18,42 +18,9 @@
18 */ 18 */
19 19
20#include <linux/iova.h> 20#include <linux/iova.h>
21#include <linux/module.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22 23
23static struct kmem_cache *iommu_iova_cache;
24
25int iommu_iova_cache_init(void)
26{
27 int ret = 0;
28
29 iommu_iova_cache = kmem_cache_create("iommu_iova",
30 sizeof(struct iova),
31 0,
32 SLAB_HWCACHE_ALIGN,
33 NULL);
34 if (!iommu_iova_cache) {
35 pr_err("Couldn't create iova cache\n");
36 ret = -ENOMEM;
37 }
38
39 return ret;
40}
41
42void iommu_iova_cache_destroy(void)
43{
44 kmem_cache_destroy(iommu_iova_cache);
45}
46
47struct iova *alloc_iova_mem(void)
48{
49 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
50}
51
52void free_iova_mem(struct iova *iova)
53{
54 kmem_cache_free(iommu_iova_cache, iova);
55}
56
57void 24void
58init_iova_domain(struct iova_domain *iovad, unsigned long granule, 25init_iova_domain(struct iova_domain *iovad, unsigned long granule,
59 unsigned long start_pfn, unsigned long pfn_32bit) 26 unsigned long start_pfn, unsigned long pfn_32bit)
@@ -72,6 +39,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
72 iovad->start_pfn = start_pfn; 39 iovad->start_pfn = start_pfn;
73 iovad->dma_32bit_pfn = pfn_32bit; 40 iovad->dma_32bit_pfn = pfn_32bit;
74} 41}
42EXPORT_SYMBOL_GPL(init_iova_domain);
75 43
76static struct rb_node * 44static struct rb_node *
77__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) 45__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
@@ -120,19 +88,14 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
120 } 88 }
121} 89}
122 90
123/* Computes the padding size required, to make the 91/*
124 * the start address naturally aligned on its size 92 * Computes the padding size required, to make the start address
93 * naturally aligned on the power-of-two order of its size
125 */ 94 */
126static int 95static unsigned int
127iova_get_pad_size(int size, unsigned int limit_pfn) 96iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
128{ 97{
129 unsigned int pad_size = 0; 98 return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1);
130 unsigned int order = ilog2(size);
131
132 if (order)
133 pad_size = (limit_pfn + 1) % (1 << order);
134
135 return pad_size;
136} 99}
137 100
138static int __alloc_and_insert_iova_range(struct iova_domain *iovad, 101static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
@@ -242,6 +205,57 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
242 rb_insert_color(&iova->node, root); 205 rb_insert_color(&iova->node, root);
243} 206}
244 207
208static struct kmem_cache *iova_cache;
209static unsigned int iova_cache_users;
210static DEFINE_MUTEX(iova_cache_mutex);
211
212struct iova *alloc_iova_mem(void)
213{
214 return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
215}
216EXPORT_SYMBOL(alloc_iova_mem);
217
218void free_iova_mem(struct iova *iova)
219{
220 kmem_cache_free(iova_cache, iova);
221}
222EXPORT_SYMBOL(free_iova_mem);
223
224int iova_cache_get(void)
225{
226 mutex_lock(&iova_cache_mutex);
227 if (!iova_cache_users) {
228 iova_cache = kmem_cache_create(
229 "iommu_iova", sizeof(struct iova), 0,
230 SLAB_HWCACHE_ALIGN, NULL);
231 if (!iova_cache) {
232 mutex_unlock(&iova_cache_mutex);
233 printk(KERN_ERR "Couldn't create iova cache\n");
234 return -ENOMEM;
235 }
236 }
237
238 iova_cache_users++;
239 mutex_unlock(&iova_cache_mutex);
240
241 return 0;
242}
243EXPORT_SYMBOL_GPL(iova_cache_get);
244
245void iova_cache_put(void)
246{
247 mutex_lock(&iova_cache_mutex);
248 if (WARN_ON(!iova_cache_users)) {
249 mutex_unlock(&iova_cache_mutex);
250 return;
251 }
252 iova_cache_users--;
253 if (!iova_cache_users)
254 kmem_cache_destroy(iova_cache);
255 mutex_unlock(&iova_cache_mutex);
256}
257EXPORT_SYMBOL_GPL(iova_cache_put);
258
245/** 259/**
246 * alloc_iova - allocates an iova 260 * alloc_iova - allocates an iova
247 * @iovad: - iova domain in question 261 * @iovad: - iova domain in question
@@ -265,12 +279,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
265 if (!new_iova) 279 if (!new_iova)
266 return NULL; 280 return NULL;
267 281
268 /* If size aligned is set then round the size to
269 * to next power of two.
270 */
271 if (size_aligned)
272 size = __roundup_pow_of_two(size);
273
274 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, 282 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
275 new_iova, size_aligned); 283 new_iova, size_aligned);
276 284
@@ -281,6 +289,7 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
281 289
282 return new_iova; 290 return new_iova;
283} 291}
292EXPORT_SYMBOL_GPL(alloc_iova);
284 293
285/** 294/**
286 * find_iova - find's an iova for a given pfn 295 * find_iova - find's an iova for a given pfn
@@ -321,6 +330,7 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
321 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 330 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
322 return NULL; 331 return NULL;
323} 332}
333EXPORT_SYMBOL_GPL(find_iova);
324 334
325/** 335/**
326 * __free_iova - frees the given iova 336 * __free_iova - frees the given iova
@@ -339,6 +349,7 @@ __free_iova(struct iova_domain *iovad, struct iova *iova)
339 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 349 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
340 free_iova_mem(iova); 350 free_iova_mem(iova);
341} 351}
352EXPORT_SYMBOL_GPL(__free_iova);
342 353
343/** 354/**
344 * free_iova - finds and frees the iova for a given pfn 355 * free_iova - finds and frees the iova for a given pfn
@@ -356,6 +367,7 @@ free_iova(struct iova_domain *iovad, unsigned long pfn)
356 __free_iova(iovad, iova); 367 __free_iova(iovad, iova);
357 368
358} 369}
370EXPORT_SYMBOL_GPL(free_iova);
359 371
360/** 372/**
361 * put_iova_domain - destroys the iova doamin 373 * put_iova_domain - destroys the iova doamin
@@ -378,6 +390,7 @@ void put_iova_domain(struct iova_domain *iovad)
378 } 390 }
379 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 391 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
380} 392}
393EXPORT_SYMBOL_GPL(put_iova_domain);
381 394
382static int 395static int
383__is_range_overlap(struct rb_node *node, 396__is_range_overlap(struct rb_node *node,
@@ -467,6 +480,7 @@ finish:
467 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 480 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
468 return iova; 481 return iova;
469} 482}
483EXPORT_SYMBOL_GPL(reserve_iova);
470 484
471/** 485/**
472 * copy_reserved_iova - copies the reserved between domains 486 * copy_reserved_iova - copies the reserved between domains
@@ -493,6 +507,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
493 } 507 }
494 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); 508 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
495} 509}
510EXPORT_SYMBOL_GPL(copy_reserved_iova);
496 511
497struct iova * 512struct iova *
498split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, 513split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
@@ -534,3 +549,6 @@ error:
534 free_iova_mem(prev); 549 free_iova_mem(prev);
535 return NULL; 550 return NULL;
536} 551}
552
553MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
554MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 9da9942ac83c..f6d680485bee 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -88,28 +88,36 @@ static void aic5_mask(struct irq_data *d)
88{ 88{
89 struct irq_domain *domain = d->domain; 89 struct irq_domain *domain = d->domain;
90 struct irq_domain_chip_generic *dgc = domain->gc; 90 struct irq_domain_chip_generic *dgc = domain->gc;
91 struct irq_chip_generic *gc = dgc->gc[0]; 91 struct irq_chip_generic *bgc = dgc->gc[0];
92 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
92 93
93 /* Disable interrupt on AIC5 */ 94 /*
94 irq_gc_lock(gc); 95 * Disable interrupt on AIC5. We always take the lock of the
96 * first irq chip as all chips share the same registers.
97 */
98 irq_gc_lock(bgc);
95 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); 99 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
96 irq_reg_writel(gc, 1, AT91_AIC5_IDCR); 100 irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
97 gc->mask_cache &= ~d->mask; 101 gc->mask_cache &= ~d->mask;
98 irq_gc_unlock(gc); 102 irq_gc_unlock(bgc);
99} 103}
100 104
101static void aic5_unmask(struct irq_data *d) 105static void aic5_unmask(struct irq_data *d)
102{ 106{
103 struct irq_domain *domain = d->domain; 107 struct irq_domain *domain = d->domain;
104 struct irq_domain_chip_generic *dgc = domain->gc; 108 struct irq_domain_chip_generic *dgc = domain->gc;
105 struct irq_chip_generic *gc = dgc->gc[0]; 109 struct irq_chip_generic *bgc = dgc->gc[0];
110 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
106 111
107 /* Enable interrupt on AIC5 */ 112 /*
108 irq_gc_lock(gc); 113 * Enable interrupt on AIC5. We always take the lock of the
114 * first irq chip as all chips share the same registers.
115 */
116 irq_gc_lock(bgc);
109 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); 117 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
110 irq_reg_writel(gc, 1, AT91_AIC5_IECR); 118 irq_reg_writel(gc, 1, AT91_AIC5_IECR);
111 gc->mask_cache |= d->mask; 119 gc->mask_cache |= d->mask;
112 irq_gc_unlock(gc); 120 irq_gc_unlock(bgc);
113} 121}
114 122
115static int aic5_retrigger(struct irq_data *d) 123static int aic5_retrigger(struct irq_data *d)
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index cf351c637464..a7c8c9ffbafd 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -62,7 +62,7 @@ static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
62 62
63 dev_alias->dev_id = alias; 63 dev_alias->dev_id = alias;
64 if (pdev != dev_alias->pdev) 64 if (pdev != dev_alias->pdev)
65 dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); 65 dev_alias->count += its_pci_msi_vec_count(pdev);
66 66
67 return 0; 67 return 0;
68} 68}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index ac7ae2b3cb83..25ceae9f7348 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -719,6 +719,9 @@ static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
719out: 719out:
720 spin_unlock(&lpi_lock); 720 spin_unlock(&lpi_lock);
721 721
722 if (!bitmap)
723 *base = *nr_ids = 0;
724
722 return bitmap; 725 return bitmap;
723} 726}
724 727
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index af2f16bb8a94..aeaa061f0dbf 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -320,6 +320,14 @@ static void gic_handle_shared_int(bool chained)
320 intrmask[i] = gic_read(intrmask_reg); 320 intrmask[i] = gic_read(intrmask_reg);
321 pending_reg += gic_reg_step; 321 pending_reg += gic_reg_step;
322 intrmask_reg += gic_reg_step; 322 intrmask_reg += gic_reg_step;
323
324 if (!config_enabled(CONFIG_64BIT) || mips_cm_is64)
325 continue;
326
327 pending[i] |= (u64)gic_read(pending_reg) << 32;
328 intrmask[i] |= (u64)gic_read(intrmask_reg) << 32;
329 pending_reg += gic_reg_step;
330 intrmask_reg += gic_reg_step;
323 } 331 }
324 332
325 bitmap_and(pending, pending, intrmask, gic_shared_intrs); 333 bitmap_and(pending, pending, intrmask, gic_shared_intrs);
@@ -426,7 +434,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
426 spin_lock_irqsave(&gic_lock, flags); 434 spin_lock_irqsave(&gic_lock, flags);
427 435
428 /* Re-route this IRQ */ 436 /* Re-route this IRQ */
429 gic_map_to_vpe(irq, cpumask_first(&tmp)); 437 gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
430 438
431 /* Update the pcpu_masks */ 439 /* Update the pcpu_masks */
432 for (i = 0; i < NR_CPUS; i++) 440 for (i = 0; i < NR_CPUS; i++)
@@ -599,7 +607,7 @@ static __init void gic_ipi_init_one(unsigned int intr, int cpu,
599 GIC_SHARED_TO_HWIRQ(intr)); 607 GIC_SHARED_TO_HWIRQ(intr));
600 int i; 608 int i;
601 609
602 gic_map_to_vpe(intr, cpu); 610 gic_map_to_vpe(intr, mips_cm_vp_id(cpu));
603 for (i = 0; i < NR_CPUS; i++) 611 for (i = 0; i < NR_CPUS; i++)
604 clear_bit(intr, pcpu_masks[i].pcpu_mask); 612 clear_bit(intr, pcpu_masks[i].pcpu_mask);
605 set_bit(intr, pcpu_masks[cpu].pcpu_mask); 613 set_bit(intr, pcpu_masks[cpu].pcpu_mask);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index e51de52eeb94..48b5890c28e3 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1997,7 +1997,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
1997 if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) 1997 if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
1998 ret = bitmap_storage_alloc(&store, chunks, 1998 ret = bitmap_storage_alloc(&store, chunks,
1999 !bitmap->mddev->bitmap_info.external, 1999 !bitmap->mddev->bitmap_info.external,
2000 bitmap->cluster_slot); 2000 mddev_is_clustered(bitmap->mddev)
2001 ? bitmap->cluster_slot : 0);
2001 if (ret) 2002 if (ret)
2002 goto err; 2003 goto err;
2003 2004
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d60c88df5234..4b3b6f8aff0c 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -968,7 +968,8 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
968 968
969/* 969/*
970 * Generate a new unfragmented bio with the given size 970 * Generate a new unfragmented bio with the given size
971 * This should never violate the device limitations 971 * This should never violate the device limitations (but only because
972 * max_segment_size is being constrained to PAGE_SIZE).
972 * 973 *
973 * This function may be called concurrently. If we allocate from the mempool 974 * This function may be called concurrently. If we allocate from the mempool
974 * concurrently, there is a possibility of deadlock. For example, if we have 975 * concurrently, there is a possibility of deadlock. For example, if we have
@@ -2045,9 +2046,20 @@ static int crypt_iterate_devices(struct dm_target *ti,
2045 return fn(ti, cc->dev, cc->start, ti->len, data); 2046 return fn(ti, cc->dev, cc->start, ti->len, data);
2046} 2047}
2047 2048
2049static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
2050{
2051 /*
2052 * Unfortunate constraint that is required to avoid the potential
2053 * for exceeding underlying device's max_segments limits -- due to
2054 * crypt_alloc_buffer() possibly allocating pages for the encryption
2055 * bio that are not as physically contiguous as the original bio.
2056 */
2057 limits->max_segment_size = PAGE_SIZE;
2058}
2059
2048static struct target_type crypt_target = { 2060static struct target_type crypt_target = {
2049 .name = "crypt", 2061 .name = "crypt",
2050 .version = {1, 14, 0}, 2062 .version = {1, 14, 1},
2051 .module = THIS_MODULE, 2063 .module = THIS_MODULE,
2052 .ctr = crypt_ctr, 2064 .ctr = crypt_ctr,
2053 .dtr = crypt_dtr, 2065 .dtr = crypt_dtr,
@@ -2058,6 +2070,7 @@ static struct target_type crypt_target = {
2058 .resume = crypt_resume, 2070 .resume = crypt_resume,
2059 .message = crypt_message, 2071 .message = crypt_message,
2060 .iterate_devices = crypt_iterate_devices, 2072 .iterate_devices = crypt_iterate_devices,
2073 .io_hints = crypt_io_hints,
2061}; 2074};
2062 2075
2063static int __init dm_crypt_init(void) 2076static int __init dm_crypt_init(void)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 6578b7bc1fbb..6fcbfb063366 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -4249,6 +4249,10 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
4249{ 4249{
4250 struct thin_c *tc = ti->private; 4250 struct thin_c *tc = ti->private;
4251 struct pool *pool = tc->pool; 4251 struct pool *pool = tc->pool;
4252 struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md);
4253
4254 if (!pool_limits->discard_granularity)
4255 return; /* pool's discard support is disabled */
4252 4256
4253 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; 4257 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
4254 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ 4258 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4f5ecbe94ccb..c702de18207a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5409,9 +5409,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5409 * which will now never happen */ 5409 * which will now never happen */
5410 wake_up_process(mddev->sync_thread->tsk); 5410 wake_up_process(mddev->sync_thread->tsk);
5411 5411
5412 if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
5413 return -EBUSY;
5412 mddev_unlock(mddev); 5414 mddev_unlock(mddev);
5413 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, 5415 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
5414 &mddev->recovery)); 5416 &mddev->recovery));
5417 wait_event(mddev->sb_wait,
5418 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
5415 mddev_lock_nointr(mddev); 5419 mddev_lock_nointr(mddev);
5416 5420
5417 mutex_lock(&mddev->open_mutex); 5421 mutex_lock(&mddev->open_mutex);
@@ -8160,6 +8164,7 @@ void md_check_recovery(struct mddev *mddev)
8160 md_reap_sync_thread(mddev); 8164 md_reap_sync_thread(mddev);
8161 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8165 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8162 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8166 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8167 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
8163 goto unlock; 8168 goto unlock;
8164 } 8169 }
8165 8170
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index d222522c52e0..d132f06afdd1 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -470,8 +470,7 @@ static int multipath_run (struct mddev *mddev)
470 return 0; 470 return 0;
471 471
472out_free_conf: 472out_free_conf:
473 if (conf->pool) 473 mempool_destroy(conf->pool);
474 mempool_destroy(conf->pool);
475 kfree(conf->multipaths); 474 kfree(conf->multipaths);
476 kfree(conf); 475 kfree(conf);
477 mddev->private = NULL; 476 mddev->private = NULL;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 63e619b2f44e..f8e5db0cb5aa 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -376,12 +376,6 @@ static int raid0_run(struct mddev *mddev)
376 struct md_rdev *rdev; 376 struct md_rdev *rdev;
377 bool discard_supported = false; 377 bool discard_supported = false;
378 378
379 rdev_for_each(rdev, mddev) {
380 disk_stack_limits(mddev->gendisk, rdev->bdev,
381 rdev->data_offset << 9);
382 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
383 discard_supported = true;
384 }
385 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); 379 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
386 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); 380 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
387 blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); 381 blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
@@ -390,6 +384,12 @@ static int raid0_run(struct mddev *mddev)
390 blk_queue_io_opt(mddev->queue, 384 blk_queue_io_opt(mddev->queue,
391 (mddev->chunk_sectors << 9) * mddev->raid_disks); 385 (mddev->chunk_sectors << 9) * mddev->raid_disks);
392 386
387 rdev_for_each(rdev, mddev) {
388 disk_stack_limits(mddev->gendisk, rdev->bdev,
389 rdev->data_offset << 9);
390 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
391 discard_supported = true;
392 }
393 if (!discard_supported) 393 if (!discard_supported)
394 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 394 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
395 else 395 else
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4517f06c41ba..049df6c4a8cc 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -881,8 +881,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
881 } 881 }
882 882
883 if (bio && bio_data_dir(bio) == WRITE) { 883 if (bio && bio_data_dir(bio) == WRITE) {
884 if (bio->bi_iter.bi_sector >= 884 if (bio->bi_iter.bi_sector >= conf->next_resync) {
885 conf->mddev->curr_resync_completed) {
886 if (conf->start_next_window == MaxSector) 885 if (conf->start_next_window == MaxSector)
887 conf->start_next_window = 886 conf->start_next_window =
888 conf->next_resync + 887 conf->next_resync +
@@ -1516,7 +1515,7 @@ static void close_sync(struct r1conf *conf)
1516 conf->r1buf_pool = NULL; 1515 conf->r1buf_pool = NULL;
1517 1516
1518 spin_lock_irq(&conf->resync_lock); 1517 spin_lock_irq(&conf->resync_lock);
1519 conf->next_resync = 0; 1518 conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE;
1520 conf->start_next_window = MaxSector; 1519 conf->start_next_window = MaxSector;
1521 conf->current_window_requests += 1520 conf->current_window_requests +=
1522 conf->next_window_requests; 1521 conf->next_window_requests;
@@ -2843,8 +2842,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
2843 2842
2844 abort: 2843 abort:
2845 if (conf) { 2844 if (conf) {
2846 if (conf->r1bio_pool) 2845 mempool_destroy(conf->r1bio_pool);
2847 mempool_destroy(conf->r1bio_pool);
2848 kfree(conf->mirrors); 2846 kfree(conf->mirrors);
2849 safe_put_page(conf->tmppage); 2847 safe_put_page(conf->tmppage);
2850 kfree(conf->poolinfo); 2848 kfree(conf->poolinfo);
@@ -2946,8 +2944,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
2946{ 2944{
2947 struct r1conf *conf = priv; 2945 struct r1conf *conf = priv;
2948 2946
2949 if (conf->r1bio_pool) 2947 mempool_destroy(conf->r1bio_pool);
2950 mempool_destroy(conf->r1bio_pool);
2951 kfree(conf->mirrors); 2948 kfree(conf->mirrors);
2952 safe_put_page(conf->tmppage); 2949 safe_put_page(conf->tmppage);
2953 kfree(conf->poolinfo); 2950 kfree(conf->poolinfo);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 0fc33eb88855..7c99a4037715 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3486,8 +3486,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
3486 printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", 3486 printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
3487 mdname(mddev)); 3487 mdname(mddev));
3488 if (conf) { 3488 if (conf) {
3489 if (conf->r10bio_pool) 3489 mempool_destroy(conf->r10bio_pool);
3490 mempool_destroy(conf->r10bio_pool);
3491 kfree(conf->mirrors); 3490 kfree(conf->mirrors);
3492 safe_put_page(conf->tmppage); 3491 safe_put_page(conf->tmppage);
3493 kfree(conf); 3492 kfree(conf);
@@ -3682,8 +3681,7 @@ static int run(struct mddev *mddev)
3682 3681
3683out_free_conf: 3682out_free_conf:
3684 md_unregister_thread(&mddev->thread); 3683 md_unregister_thread(&mddev->thread);
3685 if (conf->r10bio_pool) 3684 mempool_destroy(conf->r10bio_pool);
3686 mempool_destroy(conf->r10bio_pool);
3687 safe_put_page(conf->tmppage); 3685 safe_put_page(conf->tmppage);
3688 kfree(conf->mirrors); 3686 kfree(conf->mirrors);
3689 kfree(conf); 3687 kfree(conf);
@@ -3696,8 +3694,7 @@ static void raid10_free(struct mddev *mddev, void *priv)
3696{ 3694{
3697 struct r10conf *conf = priv; 3695 struct r10conf *conf = priv;
3698 3696
3699 if (conf->r10bio_pool) 3697 mempool_destroy(conf->r10bio_pool);
3700 mempool_destroy(conf->r10bio_pool);
3701 safe_put_page(conf->tmppage); 3698 safe_put_page(conf->tmppage);
3702 kfree(conf->mirrors); 3699 kfree(conf->mirrors);
3703 kfree(conf->mirrors_old); 3700 kfree(conf->mirrors_old);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 15ef2c641b2b..49bb8d3ff9be 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2271,8 +2271,7 @@ static void shrink_stripes(struct r5conf *conf)
2271 drop_one_stripe(conf)) 2271 drop_one_stripe(conf))
2272 ; 2272 ;
2273 2273
2274 if (conf->slab_cache) 2274 kmem_cache_destroy(conf->slab_cache);
2275 kmem_cache_destroy(conf->slab_cache);
2276 conf->slab_cache = NULL; 2275 conf->slab_cache = NULL;
2277} 2276}
2278 2277
@@ -3150,6 +3149,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3150 spin_unlock_irq(&sh->stripe_lock); 3149 spin_unlock_irq(&sh->stripe_lock);
3151 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 3150 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
3152 wake_up(&conf->wait_for_overlap); 3151 wake_up(&conf->wait_for_overlap);
3152 if (bi)
3153 s->to_read--;
3153 while (bi && bi->bi_iter.bi_sector < 3154 while (bi && bi->bi_iter.bi_sector <
3154 sh->dev[i].sector + STRIPE_SECTORS) { 3155 sh->dev[i].sector + STRIPE_SECTORS) {
3155 struct bio *nextbi = 3156 struct bio *nextbi =
@@ -3169,6 +3170,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3169 */ 3170 */
3170 clear_bit(R5_LOCKED, &sh->dev[i].flags); 3171 clear_bit(R5_LOCKED, &sh->dev[i].flags);
3171 } 3172 }
3173 s->to_write = 0;
3174 s->written = 0;
3172 3175
3173 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 3176 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
3174 if (atomic_dec_and_test(&conf->pending_full_writes)) 3177 if (atomic_dec_and_test(&conf->pending_full_writes))
@@ -3300,7 +3303,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
3300 */ 3303 */
3301 return 0; 3304 return 0;
3302 3305
3303 for (i = 0; i < s->failed; i++) { 3306 for (i = 0; i < s->failed && i < 2; i++) {
3304 if (fdev[i]->towrite && 3307 if (fdev[i]->towrite &&
3305 !test_bit(R5_UPTODATE, &fdev[i]->flags) && 3308 !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
3306 !test_bit(R5_OVERWRITE, &fdev[i]->flags)) 3309 !test_bit(R5_OVERWRITE, &fdev[i]->flags))
@@ -3324,7 +3327,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
3324 sh->sector < sh->raid_conf->mddev->recovery_cp) 3327 sh->sector < sh->raid_conf->mddev->recovery_cp)
3325 /* reconstruct-write isn't being forced */ 3328 /* reconstruct-write isn't being forced */
3326 return 0; 3329 return 0;
3327 for (i = 0; i < s->failed; i++) { 3330 for (i = 0; i < s->failed && i < 2; i++) {
3328 if (s->failed_num[i] != sh->pd_idx && 3331 if (s->failed_num[i] != sh->pd_idx &&
3329 s->failed_num[i] != sh->qd_idx && 3332 s->failed_num[i] != sh->qd_idx &&
3330 !test_bit(R5_UPTODATE, &fdev[i]->flags) && 3333 !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 25868c2ec03e..02006f7109a8 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -592,6 +592,8 @@ int cxl_sysfs_afu_add(struct cxl_afu *afu)
592 592
593 /* conditionally create the add the binary file for error info buffer */ 593 /* conditionally create the add the binary file for error info buffer */
594 if (afu->eb_len) { 594 if (afu->eb_len) {
595 sysfs_attr_init(&afu->attr_eb.attr);
596
595 afu->attr_eb.attr.name = "afu_err_buff"; 597 afu->attr_eb.attr.name = "afu_err_buff";
596 afu->attr_eb.attr.mode = S_IRUGO; 598 afu->attr_eb.attr.mode = S_IRUGO;
597 afu->attr_eb.size = afu->eb_len; 599 afu->attr_eb.size = afu->eb_len;
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index 4b469cf9e60f..8504dbeacd3b 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -204,6 +204,8 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name)
204 if (!dir) 204 if (!dir)
205 return -ENOMEM; 205 return -ENOMEM;
206 206
207 dev->dbgfs_dir = dir;
208
207 f = debugfs_create_file("meclients", S_IRUSR, dir, 209 f = debugfs_create_file("meclients", S_IRUSR, dir,
208 dev, &mei_dbgfs_fops_meclients); 210 dev, &mei_dbgfs_fops_meclients);
209 if (!f) { 211 if (!f) {
@@ -228,7 +230,6 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name)
228 dev_err(dev->dev, "allow_fixed_address: registration failed\n"); 230 dev_err(dev->dev, "allow_fixed_address: registration failed\n");
229 goto err; 231 goto err;
230 } 232 }
231 dev->dbgfs_dir = dir;
232 return 0; 233 return 0;
233err: 234err:
234 mei_dbgfs_deregister(dev); 235 mei_dbgfs_deregister(dev);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 0520064dc33b..a3eb20bdcd97 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -134,9 +134,11 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
134 int err = cmd->error; 134 int err = cmd->error;
135 135
136 /* Flag re-tuning needed on CRC errors */ 136 /* Flag re-tuning needed on CRC errors */
137 if (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || 137 if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
138 cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
139 (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
138 (mrq->data && mrq->data->error == -EILSEQ) || 140 (mrq->data && mrq->data->error == -EILSEQ) ||
139 (mrq->stop && mrq->stop->error == -EILSEQ)) 141 (mrq->stop && mrq->stop->error == -EILSEQ)))
140 mmc_retune_needed(host); 142 mmc_retune_needed(host);
141 143
142 if (err && cmd->retries && mmc_host_is_spi(host)) { 144 if (err && cmd->retries && mmc_host_is_spi(host)) {
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index abd933b7029b..5466f25f0281 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -457,7 +457,7 @@ int mmc_of_parse(struct mmc_host *host)
457 0, &cd_gpio_invert); 457 0, &cd_gpio_invert);
458 if (!ret) 458 if (!ret)
459 dev_info(host->parent, "Got CD GPIO\n"); 459 dev_info(host->parent, "Got CD GPIO\n");
460 else if (ret != -ENOENT) 460 else if (ret != -ENOENT && ret != -ENOSYS)
461 return ret; 461 return ret;
462 462
463 /* 463 /*
@@ -481,7 +481,7 @@ int mmc_of_parse(struct mmc_host *host)
481 ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); 481 ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
482 if (!ret) 482 if (!ret)
483 dev_info(host->parent, "Got WP GPIO\n"); 483 dev_info(host->parent, "Got WP GPIO\n");
484 else if (ret != -ENOENT) 484 else if (ret != -ENOENT && ret != -ENOSYS)
485 return ret; 485 return ret;
486 486
487 if (of_property_read_bool(np, "disable-wp")) 487 if (of_property_read_bool(np, "disable-wp"))
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 1420f29628c7..8cadd74e8407 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -28,6 +28,7 @@
28#include <linux/clk.h> 28#include <linux/clk.h>
29#include <linux/err.h> 29#include <linux/err.h>
30#include <linux/mmc/host.h> 30#include <linux/mmc/host.h>
31#include <linux/mmc/slot-gpio.h>
31#include <linux/io.h> 32#include <linux/io.h>
32#include <linux/regulator/consumer.h> 33#include <linux/regulator/consumer.h>
33#include <linux/gpio.h> 34#include <linux/gpio.h>
@@ -454,12 +455,8 @@ static int pxamci_get_ro(struct mmc_host *mmc)
454{ 455{
455 struct pxamci_host *host = mmc_priv(mmc); 456 struct pxamci_host *host = mmc_priv(mmc);
456 457
457 if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) { 458 if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro))
458 if (host->pdata->gpio_card_ro_invert) 459 return mmc_gpio_get_ro(mmc);
459 return !gpio_get_value(host->pdata->gpio_card_ro);
460 else
461 return gpio_get_value(host->pdata->gpio_card_ro);
462 }
463 if (host->pdata && host->pdata->get_ro) 460 if (host->pdata && host->pdata->get_ro)
464 return !!host->pdata->get_ro(mmc_dev(mmc)); 461 return !!host->pdata->get_ro(mmc_dev(mmc));
465 /* 462 /*
@@ -551,6 +548,7 @@ static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
551 548
552static const struct mmc_host_ops pxamci_ops = { 549static const struct mmc_host_ops pxamci_ops = {
553 .request = pxamci_request, 550 .request = pxamci_request,
551 .get_cd = mmc_gpio_get_cd,
554 .get_ro = pxamci_get_ro, 552 .get_ro = pxamci_get_ro,
555 .set_ios = pxamci_set_ios, 553 .set_ios = pxamci_set_ios,
556 .enable_sdio_irq = pxamci_enable_sdio_irq, 554 .enable_sdio_irq = pxamci_enable_sdio_irq,
@@ -790,37 +788,31 @@ static int pxamci_probe(struct platform_device *pdev)
790 gpio_power = host->pdata->gpio_power; 788 gpio_power = host->pdata->gpio_power;
791 } 789 }
792 if (gpio_is_valid(gpio_power)) { 790 if (gpio_is_valid(gpio_power)) {
793 ret = gpio_request(gpio_power, "mmc card power"); 791 ret = devm_gpio_request(&pdev->dev, gpio_power,
792 "mmc card power");
794 if (ret) { 793 if (ret) {
795 dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power); 794 dev_err(&pdev->dev, "Failed requesting gpio_power %d\n",
795 gpio_power);
796 goto out; 796 goto out;
797 } 797 }
798 gpio_direction_output(gpio_power, 798 gpio_direction_output(gpio_power,
799 host->pdata->gpio_power_invert); 799 host->pdata->gpio_power_invert);
800 } 800 }
801 if (gpio_is_valid(gpio_ro)) { 801 if (gpio_is_valid(gpio_ro))
802 ret = gpio_request(gpio_ro, "mmc card read only"); 802 ret = mmc_gpio_request_ro(mmc, gpio_ro);
803 if (ret) { 803 if (ret) {
804 dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); 804 dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
805 goto err_gpio_ro; 805 goto out;
806 } 806 } else {
807 gpio_direction_input(gpio_ro); 807 mmc->caps |= host->pdata->gpio_card_ro_invert ?
808 MMC_CAP2_RO_ACTIVE_HIGH : 0;
808 } 809 }
809 if (gpio_is_valid(gpio_cd)) {
810 ret = gpio_request(gpio_cd, "mmc card detect");
811 if (ret) {
812 dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
813 goto err_gpio_cd;
814 }
815 gpio_direction_input(gpio_cd);
816 810
817 ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq, 811 if (gpio_is_valid(gpio_cd))
818 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 812 ret = mmc_gpio_request_cd(mmc, gpio_cd, 0);
819 "mmc card detect", mmc); 813 if (ret) {
820 if (ret) { 814 dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
821 dev_err(&pdev->dev, "failed to request card detect IRQ\n"); 815 goto out;
822 goto err_request_irq;
823 }
824 } 816 }
825 817
826 if (host->pdata && host->pdata->init) 818 if (host->pdata && host->pdata->init)
@@ -835,13 +827,7 @@ static int pxamci_probe(struct platform_device *pdev)
835 827
836 return 0; 828 return 0;
837 829
838err_request_irq: 830out:
839 gpio_free(gpio_cd);
840err_gpio_cd:
841 gpio_free(gpio_ro);
842err_gpio_ro:
843 gpio_free(gpio_power);
844 out:
845 if (host) { 831 if (host) {
846 if (host->dma_chan_rx) 832 if (host->dma_chan_rx)
847 dma_release_channel(host->dma_chan_rx); 833 dma_release_channel(host->dma_chan_rx);
@@ -873,14 +859,6 @@ static int pxamci_remove(struct platform_device *pdev)
873 gpio_ro = host->pdata->gpio_card_ro; 859 gpio_ro = host->pdata->gpio_card_ro;
874 gpio_power = host->pdata->gpio_power; 860 gpio_power = host->pdata->gpio_power;
875 } 861 }
876 if (gpio_is_valid(gpio_cd)) {
877 free_irq(gpio_to_irq(gpio_cd), mmc);
878 gpio_free(gpio_cd);
879 }
880 if (gpio_is_valid(gpio_ro))
881 gpio_free(gpio_ro);
882 if (gpio_is_valid(gpio_power))
883 gpio_free(gpio_power);
884 if (host->vcc) 862 if (host->vcc)
885 regulator_put(host->vcc); 863 regulator_put(host->vcc);
886 864
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index a7b7a6771598..b981b8552e43 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -210,6 +210,16 @@
210#define SDXC_IDMAC_DES0_CES BIT(30) /* card error summary */ 210#define SDXC_IDMAC_DES0_CES BIT(30) /* card error summary */
211#define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */ 211#define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */
212 212
213#define SDXC_CLK_400K 0
214#define SDXC_CLK_25M 1
215#define SDXC_CLK_50M 2
216#define SDXC_CLK_50M_DDR 3
217
218struct sunxi_mmc_clk_delay {
219 u32 output;
220 u32 sample;
221};
222
213struct sunxi_idma_des { 223struct sunxi_idma_des {
214 u32 config; 224 u32 config;
215 u32 buf_size; 225 u32 buf_size;
@@ -229,6 +239,7 @@ struct sunxi_mmc_host {
229 struct clk *clk_mmc; 239 struct clk *clk_mmc;
230 struct clk *clk_sample; 240 struct clk *clk_sample;
231 struct clk *clk_output; 241 struct clk *clk_output;
242 const struct sunxi_mmc_clk_delay *clk_delays;
232 243
233 /* irq */ 244 /* irq */
234 spinlock_t lock; 245 spinlock_t lock;
@@ -654,25 +665,19 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
654 665
655 /* determine delays */ 666 /* determine delays */
656 if (rate <= 400000) { 667 if (rate <= 400000) {
657 oclk_dly = 180; 668 oclk_dly = host->clk_delays[SDXC_CLK_400K].output;
658 sclk_dly = 42; 669 sclk_dly = host->clk_delays[SDXC_CLK_400K].sample;
659 } else if (rate <= 25000000) { 670 } else if (rate <= 25000000) {
660 oclk_dly = 180; 671 oclk_dly = host->clk_delays[SDXC_CLK_25M].output;
661 sclk_dly = 75; 672 sclk_dly = host->clk_delays[SDXC_CLK_25M].sample;
662 } else if (rate <= 50000000) { 673 } else if (rate <= 50000000) {
663 if (ios->timing == MMC_TIMING_UHS_DDR50) { 674 if (ios->timing == MMC_TIMING_UHS_DDR50) {
664 oclk_dly = 60; 675 oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output;
665 sclk_dly = 120; 676 sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample;
666 } else { 677 } else {
667 oclk_dly = 90; 678 oclk_dly = host->clk_delays[SDXC_CLK_50M].output;
668 sclk_dly = 150; 679 sclk_dly = host->clk_delays[SDXC_CLK_50M].sample;
669 } 680 }
670 } else if (rate <= 100000000) {
671 oclk_dly = 6;
672 sclk_dly = 24;
673 } else if (rate <= 200000000) {
674 oclk_dly = 3;
675 sclk_dly = 12;
676 } else { 681 } else {
677 return -EINVAL; 682 return -EINVAL;
678 } 683 }
@@ -871,6 +876,7 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
871static const struct of_device_id sunxi_mmc_of_match[] = { 876static const struct of_device_id sunxi_mmc_of_match[] = {
872 { .compatible = "allwinner,sun4i-a10-mmc", }, 877 { .compatible = "allwinner,sun4i-a10-mmc", },
873 { .compatible = "allwinner,sun5i-a13-mmc", }, 878 { .compatible = "allwinner,sun5i-a13-mmc", },
879 { .compatible = "allwinner,sun9i-a80-mmc", },
874 { /* sentinel */ } 880 { /* sentinel */ }
875}; 881};
876MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match); 882MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
@@ -884,6 +890,20 @@ static struct mmc_host_ops sunxi_mmc_ops = {
884 .hw_reset = sunxi_mmc_hw_reset, 890 .hw_reset = sunxi_mmc_hw_reset,
885}; 891};
886 892
893static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = {
894 [SDXC_CLK_400K] = { .output = 180, .sample = 180 },
895 [SDXC_CLK_25M] = { .output = 180, .sample = 75 },
896 [SDXC_CLK_50M] = { .output = 90, .sample = 120 },
897 [SDXC_CLK_50M_DDR] = { .output = 60, .sample = 120 },
898};
899
900static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
901 [SDXC_CLK_400K] = { .output = 180, .sample = 180 },
902 [SDXC_CLK_25M] = { .output = 180, .sample = 75 },
903 [SDXC_CLK_50M] = { .output = 150, .sample = 120 },
904 [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 },
905};
906
887static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, 907static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
888 struct platform_device *pdev) 908 struct platform_device *pdev)
889{ 909{
@@ -895,6 +915,11 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
895 else 915 else
896 host->idma_des_size_bits = 16; 916 host->idma_des_size_bits = 16;
897 917
918 if (of_device_is_compatible(np, "allwinner,sun9i-a80-mmc"))
919 host->clk_delays = sun9i_mmc_clk_delays;
920 else
921 host->clk_delays = sunxi_mmc_clk_delays;
922
898 ret = mmc_regulator_get_supply(host->mmc); 923 ret = mmc_regulator_get_supply(host->mmc);
899 if (ret) { 924 if (ret) {
900 if (ret != -EPROBE_DEFER) 925 if (ret != -EPROBE_DEFER)
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 5bbd1f094f4e..1fc23e48fe8e 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -926,6 +926,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
926 goto bad; 926 goto bad;
927 } 927 }
928 928
929 if (data_size > ubi->leb_size) {
930 ubi_err(ubi, "bad data_size");
931 goto bad;
932 }
933
929 if (vol_type == UBI_VID_STATIC) { 934 if (vol_type == UBI_VID_STATIC) {
930 /* 935 /*
931 * Although from high-level point of view static volumes may 936 * Although from high-level point of view static volumes may
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 80bdd5b88bac..d85c19762160 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -649,6 +649,7 @@ static int init_volumes(struct ubi_device *ubi,
649 if (ubi->corr_peb_count) 649 if (ubi->corr_peb_count)
650 ubi_err(ubi, "%d PEBs are corrupted and not used", 650 ubi_err(ubi, "%d PEBs are corrupted and not used",
651 ubi->corr_peb_count); 651 ubi->corr_peb_count);
652 return -ENOSPC;
652 } 653 }
653 ubi->rsvd_pebs += reserved_pebs; 654 ubi->rsvd_pebs += reserved_pebs;
654 ubi->avail_pebs -= reserved_pebs; 655 ubi->avail_pebs -= reserved_pebs;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 275d9fb6fe5c..eb4489f9082f 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1601,6 +1601,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1601 if (ubi->corr_peb_count) 1601 if (ubi->corr_peb_count)
1602 ubi_err(ubi, "%d PEBs are corrupted and not used", 1602 ubi_err(ubi, "%d PEBs are corrupted and not used",
1603 ubi->corr_peb_count); 1603 ubi->corr_peb_count);
1604 err = -ENOSPC;
1604 goto out_free; 1605 goto out_free;
1605 } 1606 }
1606 ubi->avail_pebs -= reserved_pebs; 1607 ubi->avail_pebs -= reserved_pebs;
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 10f71c732b59..816d0e94961c 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -326,7 +326,7 @@ static void arcdev_setup(struct net_device *dev)
326 dev->type = ARPHRD_ARCNET; 326 dev->type = ARPHRD_ARCNET;
327 dev->netdev_ops = &arcnet_netdev_ops; 327 dev->netdev_ops = &arcnet_netdev_ops;
328 dev->header_ops = &arcnet_header_ops; 328 dev->header_ops = &arcnet_header_ops;
329 dev->hard_header_len = sizeof(struct archdr); 329 dev->hard_header_len = sizeof(struct arc_hardware);
330 dev->mtu = choose_mtu(); 330 dev->mtu = choose_mtu();
331 331
332 dev->addr_len = ARCNET_ALEN; 332 dev->addr_len = ARCNET_ALEN;
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 6f13f7206762..1f7dd927cc5e 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2000,6 +2000,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2000 */ 2000 */
2001 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL); 2001 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
2002 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { 2002 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
2003 reg &= ~PORT_PCS_CTRL_UNFORCED;
2003 reg |= PORT_PCS_CTRL_FORCE_LINK | 2004 reg |= PORT_PCS_CTRL_FORCE_LINK |
2004 PORT_PCS_CTRL_LINK_UP | 2005 PORT_PCS_CTRL_LINK_UP |
2005 PORT_PCS_CTRL_DUPLEX_FULL | 2006 PORT_PCS_CTRL_DUPLEX_FULL |
@@ -2050,6 +2051,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2050 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA; 2051 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
2051 else 2052 else
2052 reg |= PORT_CONTROL_FRAME_MODE_DSA; 2053 reg |= PORT_CONTROL_FRAME_MODE_DSA;
2054 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2055 PORT_CONTROL_FORWARD_UNKNOWN_MC;
2053 } 2056 }
2054 2057
2055 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || 2058 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index cfa37041ab71..c4bb8027b3fb 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -689,16 +689,24 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
689 netdev_dbg(ndev, "No phy-handle found in DT\n"); 689 netdev_dbg(ndev, "No phy-handle found in DT\n");
690 return -ENODEV; 690 return -ENODEV;
691 } 691 }
692 pdata->phy_dev = of_phy_find_device(phy_np);
693 }
694 692
695 phy_dev = pdata->phy_dev; 693 phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
694 0, pdata->phy_mode);
695 if (!phy_dev) {
696 netdev_err(ndev, "Could not connect to PHY\n");
697 return -ENODEV;
698 }
699
700 pdata->phy_dev = phy_dev;
701 } else {
702 phy_dev = pdata->phy_dev;
696 703
697 if (!phy_dev || 704 if (!phy_dev ||
698 phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link, 705 phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
699 pdata->phy_mode)) { 706 pdata->phy_mode)) {
700 netdev_err(ndev, "Could not connect to PHY\n"); 707 netdev_err(ndev, "Could not connect to PHY\n");
701 return -ENODEV; 708 return -ENODEV;
709 }
702 } 710 }
703 711
704 pdata->phy_speed = SPEED_UNKNOWN; 712 pdata->phy_speed = SPEED_UNKNOWN;
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
index f9cb99bfb511..ffd180570920 100644
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ b/drivers/net/ethernet/arc/emac_arc.c
@@ -78,6 +78,7 @@ static const struct of_device_id emac_arc_dt_ids[] = {
78 { .compatible = "snps,arc-emac" }, 78 { .compatible = "snps,arc-emac" },
79 { /* Sentinel */ } 79 { /* Sentinel */ }
80}; 80};
81MODULE_DEVICE_TABLE(of, emac_arc_dt_ids);
81 82
82static struct platform_driver emac_arc_driver = { 83static struct platform_driver emac_arc_driver = {
83 .probe = emac_arc_probe, 84 .probe = emac_arc_probe,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index b9a5a97ed4dd..f1b5364f3521 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2079,6 +2079,7 @@ static const struct of_device_id bcm_sysport_of_match[] = {
2079 { .compatible = "brcm,systemport" }, 2079 { .compatible = "brcm,systemport" },
2080 { /* sentinel */ } 2080 { /* sentinel */ }
2081}; 2081};
2082MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
2082 2083
2083static struct platform_driver bcm_sysport_driver = { 2084static struct platform_driver bcm_sysport_driver = {
2084 .probe = bcm_sysport_probe, 2085 .probe = bcm_sysport_probe,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index ba936635322a..b5e64b02200c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1946,6 +1946,7 @@ struct bnx2x {
1946 u16 vlan_cnt; 1946 u16 vlan_cnt;
1947 u16 vlan_credit; 1947 u16 vlan_credit;
1948 u16 vxlan_dst_port; 1948 u16 vxlan_dst_port;
1949 u8 vxlan_dst_port_count;
1949 bool accept_any_vlan; 1950 bool accept_any_vlan;
1950}; 1951};
1951 1952
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e3da2bddf143..f1d62d5dbaff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -3705,16 +3705,14 @@ out:
3705 3705
3706void bnx2x_update_mfw_dump(struct bnx2x *bp) 3706void bnx2x_update_mfw_dump(struct bnx2x *bp)
3707{ 3707{
3708 struct timeval epoc;
3709 u32 drv_ver; 3708 u32 drv_ver;
3710 u32 valid_dump; 3709 u32 valid_dump;
3711 3710
3712 if (!SHMEM2_HAS(bp, drv_info)) 3711 if (!SHMEM2_HAS(bp, drv_info))
3713 return; 3712 return;
3714 3713
3715 /* Update Driver load time */ 3714 /* Update Driver load time, possibly broken in y2038 */
3716 do_gettimeofday(&epoc); 3715 SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
3717 SHMEM2_WR(bp, drv_info.epoc, epoc.tv_sec);
3718 3716
3719 drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); 3717 drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3720 SHMEM2_WR(bp, drv_info.drv_ver, drv_ver); 3718 SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
@@ -10110,12 +10108,18 @@ static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port)
10110 if (!netif_running(bp->dev)) 10108 if (!netif_running(bp->dev))
10111 return; 10109 return;
10112 10110
10113 if (bp->vxlan_dst_port || !IS_PF(bp)) { 10111 if (bp->vxlan_dst_port_count && bp->vxlan_dst_port == port) {
10112 bp->vxlan_dst_port_count++;
10113 return;
10114 }
10115
10116 if (bp->vxlan_dst_port_count || !IS_PF(bp)) {
10114 DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n"); 10117 DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n");
10115 return; 10118 return;
10116 } 10119 }
10117 10120
10118 bp->vxlan_dst_port = port; 10121 bp->vxlan_dst_port = port;
10122 bp->vxlan_dst_port_count = 1;
10119 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0); 10123 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0);
10120} 10124}
10121 10125
@@ -10130,10 +10134,14 @@ static void bnx2x_add_vxlan_port(struct net_device *netdev,
10130 10134
10131static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port) 10135static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
10132{ 10136{
10133 if (!bp->vxlan_dst_port || bp->vxlan_dst_port != port || !IS_PF(bp)) { 10137 if (!bp->vxlan_dst_port_count || bp->vxlan_dst_port != port ||
10138 !IS_PF(bp)) {
10134 DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); 10139 DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
10135 return; 10140 return;
10136 } 10141 }
10142 bp->vxlan_dst_port--;
10143 if (bp->vxlan_dst_port)
10144 return;
10137 10145
10138 if (netif_running(bp->dev)) { 10146 if (netif_running(bp->dev)) {
10139 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0); 10147 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index c9bd7f16018e..ff702a707a91 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4319,8 +4319,16 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
4319 4319
4320 /* RSS keys */ 4320 /* RSS keys */
4321 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { 4321 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4322 memcpy(&data->rss_key[0], &p->rss_key[0], 4322 u8 *dst = (u8 *)(data->rss_key) + sizeof(data->rss_key);
4323 sizeof(data->rss_key)); 4323 const u8 *src = (const u8 *)p->rss_key;
4324 int i;
4325
4326 /* Apparently, bnx2x reads this array in reverse order
4327 * We need to byte swap rss_key to comply with Toeplitz specs.
4328 */
4329 for (i = 0; i < sizeof(data->rss_key); i++)
4330 *--dst = *src++;
4331
4324 caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; 4332 caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4325 } 4333 }
4326 4334
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index fadbd0088d3e..3bc701e4c59e 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3155,6 +3155,7 @@ static const struct of_device_id bcmgenet_match[] = {
3155 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, 3155 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
3156 { }, 3156 { },
3157}; 3157};
3158MODULE_DEVICE_TABLE(of, bcmgenet_match);
3158 3159
3159static int bcmgenet_probe(struct platform_device *pdev) 3160static int bcmgenet_probe(struct platform_device *pdev)
3160{ 3161{
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index b7a0f7879de2..9e59663a6ead 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1543,7 +1543,7 @@ bfa_flash_cmd_act_check(void __iomem *pci_bar)
1543} 1543}
1544 1544
1545/* Flush FLI data fifo. */ 1545/* Flush FLI data fifo. */
1546static u32 1546static int
1547bfa_flash_fifo_flush(void __iomem *pci_bar) 1547bfa_flash_fifo_flush(void __iomem *pci_bar)
1548{ 1548{
1549 u32 i; 1549 u32 i;
@@ -1573,11 +1573,11 @@ bfa_flash_fifo_flush(void __iomem *pci_bar)
1573} 1573}
1574 1574
1575/* Read flash status. */ 1575/* Read flash status. */
1576static u32 1576static int
1577bfa_flash_status_read(void __iomem *pci_bar) 1577bfa_flash_status_read(void __iomem *pci_bar)
1578{ 1578{
1579 union bfa_flash_dev_status_reg dev_status; 1579 union bfa_flash_dev_status_reg dev_status;
1580 u32 status; 1580 int status;
1581 u32 ret_status; 1581 u32 ret_status;
1582 int i; 1582 int i;
1583 1583
@@ -1611,11 +1611,11 @@ bfa_flash_status_read(void __iomem *pci_bar)
1611} 1611}
1612 1612
1613/* Start flash read operation. */ 1613/* Start flash read operation. */
1614static u32 1614static int
1615bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, 1615bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
1616 char *buf) 1616 char *buf)
1617{ 1617{
1618 u32 status; 1618 int status;
1619 1619
1620 /* len must be mutiple of 4 and not exceeding fifo size */ 1620 /* len must be mutiple of 4 and not exceeding fifo size */
1621 if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) 1621 if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
@@ -1703,7 +1703,8 @@ static enum bfa_status
1703bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, 1703bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
1704 u32 len) 1704 u32 len)
1705{ 1705{
1706 u32 n, status; 1706 u32 n;
1707 int status;
1707 u32 off, l, s, residue, fifo_sz; 1708 u32 off, l, s, residue, fifo_sz;
1708 1709
1709 residue = len; 1710 residue = len;
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 5d0753cc7e73..04b0d16b210e 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -2400,6 +2400,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2400 q0->rcb->id = 0; 2400 q0->rcb->id = 0;
2401 q0->rx_packets = q0->rx_bytes = 0; 2401 q0->rx_packets = q0->rx_bytes = 0;
2402 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; 2402 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2403 q0->rxbuf_map_failed = 0;
2403 2404
2404 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE, 2405 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2405 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]); 2406 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
@@ -2428,6 +2429,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2428 : rx_cfg->q1_buf_size; 2429 : rx_cfg->q1_buf_size;
2429 q1->rx_packets = q1->rx_bytes = 0; 2430 q1->rx_packets = q1->rx_bytes = 0;
2430 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; 2431 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2432 q1->rxbuf_map_failed = 0;
2431 2433
2432 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE, 2434 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2433 &hqpt_mem[i], &hsqpt_mem[i], 2435 &hqpt_mem[i], &hsqpt_mem[i],
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index e0e797f2ea14..c438d032e8bf 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -587,6 +587,7 @@ struct bna_rxq {
587 u64 rx_bytes; 587 u64 rx_bytes;
588 u64 rx_packets_with_error; 588 u64 rx_packets_with_error;
589 u64 rxbuf_alloc_failed; 589 u64 rxbuf_alloc_failed;
590 u64 rxbuf_map_failed;
590}; 591};
591 592
592/* RxQ pair */ 593/* RxQ pair */
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 506047c38607..21a0cfc3e7ec 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -399,7 +399,13 @@ bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
399 } 399 }
400 400
401 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, 401 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
402 unmap_q->map_size, DMA_FROM_DEVICE); 402 unmap_q->map_size, DMA_FROM_DEVICE);
403 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
404 put_page(page);
405 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
406 rcb->rxq->rxbuf_map_failed++;
407 goto finishing;
408 }
403 409
404 unmap->page = page; 410 unmap->page = page;
405 unmap->page_offset = page_offset; 411 unmap->page_offset = page_offset;
@@ -454,8 +460,15 @@ bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
454 rcb->rxq->rxbuf_alloc_failed++; 460 rcb->rxq->rxbuf_alloc_failed++;
455 goto finishing; 461 goto finishing;
456 } 462 }
463
457 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, 464 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
458 buff_sz, DMA_FROM_DEVICE); 465 buff_sz, DMA_FROM_DEVICE);
466 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
467 dev_kfree_skb_any(skb);
468 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
469 rcb->rxq->rxbuf_map_failed++;
470 goto finishing;
471 }
459 472
460 unmap->skb = skb; 473 unmap->skb = skb;
461 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); 474 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
@@ -3025,6 +3038,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3025 unmap = head_unmap; 3038 unmap = head_unmap;
3026 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, 3039 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3027 len, DMA_TO_DEVICE); 3040 len, DMA_TO_DEVICE);
3041 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3042 dev_kfree_skb_any(skb);
3043 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3044 return NETDEV_TX_OK;
3045 }
3028 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); 3046 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3029 txqent->vector[0].length = htons(len); 3047 txqent->vector[0].length = htons(len);
3030 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr); 3048 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
@@ -3056,6 +3074,15 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3056 3074
3057 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, 3075 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3058 0, size, DMA_TO_DEVICE); 3076 0, size, DMA_TO_DEVICE);
3077 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3078 /* Undo the changes starting at tcb->producer_index */
3079 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3080 tcb->producer_index);
3081 dev_kfree_skb_any(skb);
3082 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3083 return NETDEV_TX_OK;
3084 }
3085
3059 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size); 3086 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3060 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); 3087 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3061 txqent->vector[vect_id].length = htons(size); 3088 txqent->vector[vect_id].length = htons(size);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index faedbf24777e..f4ed816b93ee 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -175,6 +175,7 @@ struct bnad_drv_stats {
175 u64 tx_skb_headlen_zero; 175 u64 tx_skb_headlen_zero;
176 u64 tx_skb_frag_zero; 176 u64 tx_skb_frag_zero;
177 u64 tx_skb_len_mismatch; 177 u64 tx_skb_len_mismatch;
178 u64 tx_skb_map_failed;
178 179
179 u64 hw_stats_updates; 180 u64 hw_stats_updates;
180 u64 netif_rx_dropped; 181 u64 netif_rx_dropped;
@@ -189,6 +190,7 @@ struct bnad_drv_stats {
189 u64 rx_unmap_q_alloc_failed; 190 u64 rx_unmap_q_alloc_failed;
190 191
191 u64 rxbuf_alloc_failed; 192 u64 rxbuf_alloc_failed;
193 u64 rxbuf_map_failed;
192}; 194};
193 195
194/* Complete driver stats */ 196/* Complete driver stats */
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 2bdfc5dff4b1..0e4fdc3dd729 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -90,6 +90,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
90 "tx_skb_headlen_zero", 90 "tx_skb_headlen_zero",
91 "tx_skb_frag_zero", 91 "tx_skb_frag_zero",
92 "tx_skb_len_mismatch", 92 "tx_skb_len_mismatch",
93 "tx_skb_map_failed",
93 "hw_stats_updates", 94 "hw_stats_updates",
94 "netif_rx_dropped", 95 "netif_rx_dropped",
95 96
@@ -102,6 +103,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
102 "tx_unmap_q_alloc_failed", 103 "tx_unmap_q_alloc_failed",
103 "rx_unmap_q_alloc_failed", 104 "rx_unmap_q_alloc_failed",
104 "rxbuf_alloc_failed", 105 "rxbuf_alloc_failed",
106 "rxbuf_map_failed",
105 107
106 "mac_stats_clr_cnt", 108 "mac_stats_clr_cnt",
107 "mac_frame_64", 109 "mac_frame_64",
@@ -807,6 +809,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
807 rx_packets_with_error; 809 rx_packets_with_error;
808 buf[bi++] = rcb->rxq-> 810 buf[bi++] = rcb->rxq->
809 rxbuf_alloc_failed; 811 rxbuf_alloc_failed;
812 buf[bi++] = rcb->rxq->rxbuf_map_failed;
810 buf[bi++] = rcb->producer_index; 813 buf[bi++] = rcb->producer_index;
811 buf[bi++] = rcb->consumer_index; 814 buf[bi++] = rcb->consumer_index;
812 } 815 }
@@ -821,6 +824,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
821 rx_packets_with_error; 824 rx_packets_with_error;
822 buf[bi++] = rcb->rxq-> 825 buf[bi++] = rcb->rxq->
823 rxbuf_alloc_failed; 826 rxbuf_alloc_failed;
827 buf[bi++] = rcb->rxq->rxbuf_map_failed;
824 buf[bi++] = rcb->producer_index; 828 buf[bi++] = rcb->producer_index;
825 buf[bi++] = rcb->consumer_index; 829 buf[bi++] = rcb->consumer_index;
826 } 830 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 8353a6cbfcc2..03ed00c49823 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -157,6 +157,11 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
157 CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */ 157 CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
158 CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */ 158 CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */
159 CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */ 159 CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */
160 CH_PCI_ID_TABLE_FENTRY(0x5093), /* Custom T580-LP-CR */
161 CH_PCI_ID_TABLE_FENTRY(0x5094), /* Custom T540-CR */
162 CH_PCI_ID_TABLE_FENTRY(0x5095), /* Custom T540-CR-SO */
163 CH_PCI_ID_TABLE_FENTRY(0x5096), /* Custom T580-CR */
164 CH_PCI_ID_TABLE_FENTRY(0x5097), /* Custom T520-KR */
160 165
161 /* T6 adapters: 166 /* T6 adapters:
162 */ 167 */
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 0a27805cbbbd..821540913343 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -582,6 +582,7 @@ struct be_adapter {
582 u16 pvid; 582 u16 pvid;
583 __be16 vxlan_port; 583 __be16 vxlan_port;
584 int vxlan_port_count; 584 int vxlan_port_count;
585 int vxlan_port_aliases;
585 struct phy_info phy; 586 struct phy_info phy;
586 u8 wol_cap; 587 u8 wol_cap;
587 bool wol_en; 588 bool wol_en;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 12687bf52b95..7bf51a1a0a77 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5176,6 +5176,11 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5176 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) 5176 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5177 return; 5177 return;
5178 5178
5179 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
5180 adapter->vxlan_port_aliases++;
5181 return;
5182 }
5183
5179 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { 5184 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
5180 dev_info(dev, 5185 dev_info(dev,
5181 "Only one UDP port supported for VxLAN offloads\n"); 5186 "Only one UDP port supported for VxLAN offloads\n");
@@ -5226,6 +5231,11 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5226 if (adapter->vxlan_port != port) 5231 if (adapter->vxlan_port != port)
5227 goto done; 5232 goto done;
5228 5233
5234 if (adapter->vxlan_port_aliases) {
5235 adapter->vxlan_port_aliases--;
5236 return;
5237 }
5238
5229 be_disable_vxlan_offloads(adapter); 5239 be_disable_vxlan_offloads(adapter);
5230 5240
5231 dev_info(&adapter->pdev->dev, 5241 dev_info(&adapter->pdev->dev,
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4b69d061d90f..710715fcb23d 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1710,8 +1710,10 @@ static void gfar_configure_serdes(struct net_device *dev)
1710 * everything for us? Resetting it takes the link down and requires 1710 * everything for us? Resetting it takes the link down and requires
1711 * several seconds for it to come back. 1711 * several seconds for it to come back.
1712 */ 1712 */
1713 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) 1713 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
1714 put_device(&tbiphy->dev);
1714 return; 1715 return;
1716 }
1715 1717
1716 /* Single clk mode, mii mode off(for serdes communication) */ 1718 /* Single clk mode, mii mode off(for serdes communication) */
1717 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); 1719 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
@@ -1723,6 +1725,8 @@ static void gfar_configure_serdes(struct net_device *dev)
1723 phy_write(tbiphy, MII_BMCR, 1725 phy_write(tbiphy, MII_BMCR,
1724 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | 1726 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1725 BMCR_SPEED1000); 1727 BMCR_SPEED1000);
1728
1729 put_device(&tbiphy->dev);
1726} 1730}
1727 1731
1728static int __gfar_is_rx_idle(struct gfar_private *priv) 1732static int __gfar_is_rx_idle(struct gfar_private *priv)
@@ -1970,8 +1974,7 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
1970 /* Install our interrupt handlers for Error, 1974 /* Install our interrupt handlers for Error,
1971 * Transmit, and Receive 1975 * Transmit, and Receive
1972 */ 1976 */
1973 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 1977 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
1974 IRQF_NO_SUSPEND,
1975 gfar_irq(grp, ER)->name, grp); 1978 gfar_irq(grp, ER)->name, grp);
1976 if (err < 0) { 1979 if (err < 0) {
1977 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1980 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -1979,6 +1982,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
1979 1982
1980 goto err_irq_fail; 1983 goto err_irq_fail;
1981 } 1984 }
1985 enable_irq_wake(gfar_irq(grp, ER)->irq);
1986
1982 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, 1987 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
1983 gfar_irq(grp, TX)->name, grp); 1988 gfar_irq(grp, TX)->name, grp);
1984 if (err < 0) { 1989 if (err < 0) {
@@ -1994,14 +1999,14 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
1994 goto rx_irq_fail; 1999 goto rx_irq_fail;
1995 } 2000 }
1996 } else { 2001 } else {
1997 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 2002 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
1998 IRQF_NO_SUSPEND,
1999 gfar_irq(grp, TX)->name, grp); 2003 gfar_irq(grp, TX)->name, grp);
2000 if (err < 0) { 2004 if (err < 0) {
2001 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2005 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2002 gfar_irq(grp, TX)->irq); 2006 gfar_irq(grp, TX)->irq);
2003 goto err_irq_fail; 2007 goto err_irq_fail;
2004 } 2008 }
2009 enable_irq_wake(gfar_irq(grp, TX)->irq);
2005 } 2010 }
2006 2011
2007 return 0; 2012 return 0;
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 8e3cd77aa347..664d0c261269 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -557,6 +557,7 @@ static const struct of_device_id match_table[] = {
557 { .compatible = "fsl,etsec-ptp" }, 557 { .compatible = "fsl,etsec-ptp" },
558 {}, 558 {},
559}; 559};
560MODULE_DEVICE_TABLE(of, match_table);
560 561
561static struct platform_driver gianfar_ptp_driver = { 562static struct platform_driver gianfar_ptp_driver = {
562 .driver = { 563 .driver = {
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 4dd40e057f40..650f7888e32b 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1384,6 +1384,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1384 value = phy_read(tbiphy, ENET_TBI_MII_CR); 1384 value = phy_read(tbiphy, ENET_TBI_MII_CR);
1385 value &= ~0x1000; /* Turn off autonegotiation */ 1385 value &= ~0x1000; /* Turn off autonegotiation */
1386 phy_write(tbiphy, ENET_TBI_MII_CR, value); 1386 phy_write(tbiphy, ENET_TBI_MII_CR, value);
1387
1388 put_device(&tbiphy->dev);
1387 } 1389 }
1388 1390
1389 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); 1391 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
@@ -1702,8 +1704,10 @@ static void uec_configure_serdes(struct net_device *dev)
1702 * everything for us? Resetting it takes the link down and requires 1704 * everything for us? Resetting it takes the link down and requires
1703 * several seconds for it to come back. 1705 * several seconds for it to come back.
1704 */ 1706 */
1705 if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) 1707 if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) {
1708 put_device(&tbiphy->dev);
1706 return; 1709 return;
1710 }
1707 1711
1708 /* Single clk mode, mii mode off(for serdes communication) */ 1712 /* Single clk mode, mii mode off(for serdes communication) */
1709 phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS); 1713 phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
@@ -1711,6 +1715,8 @@ static void uec_configure_serdes(struct net_device *dev)
1711 phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); 1715 phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
1712 1716
1713 phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); 1717 phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
1718
1719 put_device(&tbiphy->dev);
1714} 1720}
1715 1721
1716/* Configure the PHY for dev. 1722/* Configure the PHY for dev.
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index cc2d8b4b18e3..253f8ed0537a 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -816,7 +816,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
816 struct net_device *ndev; 816 struct net_device *ndev;
817 struct hip04_priv *priv; 817 struct hip04_priv *priv;
818 struct resource *res; 818 struct resource *res;
819 unsigned int irq; 819 int irq;
820 int ret; 820 int ret;
821 821
822 ndev = alloc_etherdev(sizeof(struct hip04_priv)); 822 ndev = alloc_etherdev(sizeof(struct hip04_priv));
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 28df37420da9..ac02c675c59c 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -460,8 +460,8 @@ struct emac_ethtool_regs_subhdr {
460 u32 index; 460 u32 index;
461}; 461};
462 462
463#define EMAC_ETHTOOL_REGS_VER 0 463#define EMAC_ETHTOOL_REGS_VER 3
464#define EMAC4_ETHTOOL_REGS_VER 1 464#define EMAC4_ETHTOOL_REGS_VER 4
465#define EMAC4SYNC_ETHTOOL_REGS_VER 2 465#define EMAC4SYNC_ETHTOOL_REGS_VER 5
466 466
467#endif /* __IBM_NEWEMAC_CORE_H */ 467#endif /* __IBM_NEWEMAC_CORE_H */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 3e0d20037675..62488a67149d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -946,6 +946,13 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
946 /* take the lock before we start messing with the ring */ 946 /* take the lock before we start messing with the ring */
947 mutex_lock(&hw->aq.arq_mutex); 947 mutex_lock(&hw->aq.arq_mutex);
948 948
949 if (hw->aq.arq.count == 0) {
950 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
951 "AQRX: Admin queue not initialized.\n");
952 ret_code = I40E_ERR_QUEUE_EMPTY;
953 goto clean_arq_element_err;
954 }
955
949 /* set next_to_use to head */ 956 /* set next_to_use to head */
950 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); 957 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
951 if (ntu == ntc) { 958 if (ntu == ntc) {
@@ -1007,6 +1014,8 @@ clean_arq_element_out:
1007 /* Set pending if needed, unlock and return */ 1014 /* Set pending if needed, unlock and return */
1008 if (pending != NULL) 1015 if (pending != NULL)
1009 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); 1016 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1017
1018clean_arq_element_err:
1010 mutex_unlock(&hw->aq.arq_mutex); 1019 mutex_unlock(&hw->aq.arq_mutex);
1011 1020
1012 if (i40e_is_nvm_update_op(&e->desc)) { 1021 if (i40e_is_nvm_update_op(&e->desc)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 851c1a159be8..2fdf978ae6a5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2672,7 +2672,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
2672 rx_ctx.lrxqthresh = 2; 2672 rx_ctx.lrxqthresh = 2;
2673 rx_ctx.crcstrip = 1; 2673 rx_ctx.crcstrip = 1;
2674 rx_ctx.l2tsel = 1; 2674 rx_ctx.l2tsel = 1;
2675 rx_ctx.showiv = 1; 2675 /* this controls whether VLAN is stripped from inner headers */
2676 rx_ctx.showiv = 0;
2676#ifdef I40E_FCOE 2677#ifdef I40E_FCOE
2677 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2678 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2678#endif 2679#endif
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index f08450b90774..929d47152bf2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -887,6 +887,13 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
887 /* take the lock before we start messing with the ring */ 887 /* take the lock before we start messing with the ring */
888 mutex_lock(&hw->aq.arq_mutex); 888 mutex_lock(&hw->aq.arq_mutex);
889 889
890 if (hw->aq.arq.count == 0) {
891 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
892 "AQRX: Admin queue not initialized.\n");
893 ret_code = I40E_ERR_QUEUE_EMPTY;
894 goto clean_arq_element_err;
895 }
896
890 /* set next_to_use to head */ 897 /* set next_to_use to head */
891 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); 898 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
892 if (ntu == ntc) { 899 if (ntu == ntc) {
@@ -948,6 +955,8 @@ clean_arq_element_out:
948 /* Set pending if needed, unlock and return */ 955 /* Set pending if needed, unlock and return */
949 if (pending != NULL) 956 if (pending != NULL)
950 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); 957 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
958
959clean_arq_element_err:
951 mutex_unlock(&hw->aq.arq_mutex); 960 mutex_unlock(&hw->aq.arq_mutex);
952 961
953 return ret_code; 962 return ret_code;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index fe2299ac4f5c..514df76fc70f 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1479,6 +1479,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1479 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); 1479 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1480 struct sk_buff *skb; 1480 struct sk_buff *skb;
1481 unsigned char *data; 1481 unsigned char *data;
1482 dma_addr_t phys_addr;
1482 u32 rx_status; 1483 u32 rx_status;
1483 int rx_bytes, err; 1484 int rx_bytes, err;
1484 1485
@@ -1486,6 +1487,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1486 rx_status = rx_desc->status; 1487 rx_status = rx_desc->status;
1487 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); 1488 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1488 data = (unsigned char *)rx_desc->buf_cookie; 1489 data = (unsigned char *)rx_desc->buf_cookie;
1490 phys_addr = rx_desc->buf_phys_addr;
1489 1491
1490 if (!mvneta_rxq_desc_is_first_last(rx_status) || 1492 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
1491 (rx_status & MVNETA_RXD_ERR_SUMMARY)) { 1493 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
@@ -1534,7 +1536,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1534 if (!skb) 1536 if (!skb)
1535 goto err_drop_frame; 1537 goto err_drop_frame;
1536 1538
1537 dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr, 1539 dma_unmap_single(dev->dev.parent, phys_addr,
1538 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); 1540 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1539 1541
1540 rcvd_pkts++; 1542 rcvd_pkts++;
@@ -3173,6 +3175,8 @@ static int mvneta_probe(struct platform_device *pdev)
3173 struct phy_device *phy = of_phy_find_device(dn); 3175 struct phy_device *phy = of_phy_find_device(dn);
3174 3176
3175 mvneta_fixed_link_update(pp, phy); 3177 mvneta_fixed_link_update(pp, phy);
3178
3179 put_device(&phy->dev);
3176 } 3180 }
3177 3181
3178 return 0; 3182 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 4c7de8c44659..e7a5000aa12c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1270,8 +1270,6 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1270 rss_context->hash_fn = MLX4_RSS_HASH_TOP; 1270 rss_context->hash_fn = MLX4_RSS_HASH_TOP;
1271 memcpy(rss_context->rss_key, priv->rss_key, 1271 memcpy(rss_context->rss_key, priv->rss_key,
1272 MLX4_EN_RSS_KEY_SIZE); 1272 MLX4_EN_RSS_KEY_SIZE);
1273 netdev_rss_key_fill(rss_context->rss_key,
1274 MLX4_EN_RSS_KEY_SIZE);
1275 } else { 1273 } else {
1276 en_err(priv, "Unknown RSS hash function requested\n"); 1274 en_err(priv, "Unknown RSS hash function requested\n");
1277 err = -EINVAL; 1275 err = -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index bd9ea0d01aae..1d4e2e054647 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1184,10 +1184,11 @@ out:
1184 if (prot == MLX4_PROT_ETH) { 1184 if (prot == MLX4_PROT_ETH) {
1185 /* manage the steering entry for promisc mode */ 1185 /* manage the steering entry for promisc mode */
1186 if (new_entry) 1186 if (new_entry)
1187 new_steering_entry(dev, port, steer, index, qp->qpn); 1187 err = new_steering_entry(dev, port, steer,
1188 index, qp->qpn);
1188 else 1189 else
1189 existing_steering_entry(dev, port, steer, 1190 err = existing_steering_entry(dev, port, steer,
1190 index, qp->qpn); 1191 index, qp->qpn);
1191 } 1192 }
1192 if (err && link && index != -1) { 1193 if (err && link && index != -1) {
1193 if (index < dev->caps.num_mgms) 1194 if (index < dev->caps.num_mgms)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index aa0d5ffe92d8..9335e5ae18cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -200,25 +200,3 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
200 200
201 return err; 201 return err;
202} 202}
203
204int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey)
205{
206 struct mlx5_cmd_query_special_contexts_mbox_in in;
207 struct mlx5_cmd_query_special_contexts_mbox_out out;
208 int err;
209
210 memset(&in, 0, sizeof(in));
211 memset(&out, 0, sizeof(out));
212 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
213 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
214 if (err)
215 return err;
216
217 if (out.hdr.status)
218 err = mlx5_cmd_status_to_err(&out.hdr);
219
220 *rsvd_lkey = be32_to_cpu(out.resd_lkey);
221
222 return err;
223}
224EXPORT_SYMBOL(mlx5_core_query_special_context);
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 66d4ab703f45..60f43ec22175 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -1601,6 +1601,7 @@ static const struct of_device_id ks8851_match_table[] = {
1601 { .compatible = "micrel,ks8851" }, 1601 { .compatible = "micrel,ks8851" },
1602 { } 1602 { }
1603}; 1603};
1604MODULE_DEVICE_TABLE(of, ks8851_match_table);
1604 1605
1605static struct spi_driver ks8851_driver = { 1606static struct spi_driver ks8851_driver = {
1606 .driver = { 1607 .driver = {
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index becbb5f1f5a7..a10c928bbd6b 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -552,6 +552,7 @@ static const struct of_device_id moxart_mac_match[] = {
552 { .compatible = "moxa,moxart-mac" }, 552 { .compatible = "moxa,moxart-mac" },
553 { } 553 { }
554}; 554};
555MODULE_DEVICE_TABLE(of, moxart_mac_match);
555 556
556static struct platform_driver moxart_mac_driver = { 557static struct platform_driver moxart_mac_driver = {
557 .probe = moxart_mac_probe, 558 .probe = moxart_mac_probe,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 06bcc734fe8d..d6696cfa11d2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -536,6 +536,7 @@ struct qlcnic_hardware_context {
536 u8 extend_lb_time; 536 u8 extend_lb_time;
537 u8 phys_port_id[ETH_ALEN]; 537 u8 phys_port_id[ETH_ALEN];
538 u8 lb_mode; 538 u8 lb_mode;
539 u8 vxlan_port_count;
539 u16 vxlan_port; 540 u16 vxlan_port;
540 struct device *hwmon_dev; 541 struct device *hwmon_dev;
541 u32 post_mode; 542 u32 post_mode;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 8b08b20e8b30..d4481454b5f8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -483,11 +483,17 @@ static void qlcnic_add_vxlan_port(struct net_device *netdev,
483 /* Adapter supports only one VXLAN port. Use very first port 483 /* Adapter supports only one VXLAN port. Use very first port
484 * for enabling offload 484 * for enabling offload
485 */ 485 */
486 if (!qlcnic_encap_rx_offload(adapter) || ahw->vxlan_port) 486 if (!qlcnic_encap_rx_offload(adapter))
487 return; 487 return;
488 if (!ahw->vxlan_port_count) {
489 ahw->vxlan_port_count = 1;
490 ahw->vxlan_port = ntohs(port);
491 adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
492 return;
493 }
494 if (ahw->vxlan_port == ntohs(port))
495 ahw->vxlan_port_count++;
488 496
489 ahw->vxlan_port = ntohs(port);
490 adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
491} 497}
492 498
493static void qlcnic_del_vxlan_port(struct net_device *netdev, 499static void qlcnic_del_vxlan_port(struct net_device *netdev,
@@ -496,11 +502,13 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev,
496 struct qlcnic_adapter *adapter = netdev_priv(netdev); 502 struct qlcnic_adapter *adapter = netdev_priv(netdev);
497 struct qlcnic_hardware_context *ahw = adapter->ahw; 503 struct qlcnic_hardware_context *ahw = adapter->ahw;
498 504
499 if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port || 505 if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count ||
500 (ahw->vxlan_port != ntohs(port))) 506 (ahw->vxlan_port != ntohs(port)))
501 return; 507 return;
502 508
503 adapter->flags |= QLCNIC_DEL_VXLAN_PORT; 509 ahw->vxlan_port_count--;
510 if (!ahw->vxlan_port_count)
511 adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
504} 512}
505 513
506static netdev_features_t qlcnic_features_check(struct sk_buff *skb, 514static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index d79e33b3c191..686334f4588d 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -157,6 +157,7 @@ enum {
157 NWayAdvert = 0x66, /* MII ADVERTISE */ 157 NWayAdvert = 0x66, /* MII ADVERTISE */
158 NWayLPAR = 0x68, /* MII LPA */ 158 NWayLPAR = 0x68, /* MII LPA */
159 NWayExpansion = 0x6A, /* MII Expansion */ 159 NWayExpansion = 0x6A, /* MII Expansion */
160 TxDmaOkLowDesc = 0x82, /* Low 16 bit address of a Tx descriptor. */
160 Config5 = 0xD8, /* Config5 */ 161 Config5 = 0xD8, /* Config5 */
161 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */ 162 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
162 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */ 163 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
@@ -341,6 +342,7 @@ struct cp_private {
341 unsigned tx_tail; 342 unsigned tx_tail;
342 struct cp_desc *tx_ring; 343 struct cp_desc *tx_ring;
343 struct sk_buff *tx_skb[CP_TX_RING_SIZE]; 344 struct sk_buff *tx_skb[CP_TX_RING_SIZE];
345 u32 tx_opts[CP_TX_RING_SIZE];
344 346
345 unsigned rx_buf_sz; 347 unsigned rx_buf_sz;
346 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */ 348 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
@@ -665,7 +667,7 @@ static void cp_tx (struct cp_private *cp)
665 BUG_ON(!skb); 667 BUG_ON(!skb);
666 668
667 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), 669 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
668 le32_to_cpu(txd->opts1) & 0xffff, 670 cp->tx_opts[tx_tail] & 0xffff,
669 PCI_DMA_TODEVICE); 671 PCI_DMA_TODEVICE);
670 672
671 if (status & LastFrag) { 673 if (status & LastFrag) {
@@ -733,7 +735,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
733{ 735{
734 struct cp_private *cp = netdev_priv(dev); 736 struct cp_private *cp = netdev_priv(dev);
735 unsigned entry; 737 unsigned entry;
736 u32 eor, flags; 738 u32 eor, opts1;
737 unsigned long intr_flags; 739 unsigned long intr_flags;
738 __le32 opts2; 740 __le32 opts2;
739 int mss = 0; 741 int mss = 0;
@@ -753,6 +755,21 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
753 mss = skb_shinfo(skb)->gso_size; 755 mss = skb_shinfo(skb)->gso_size;
754 756
755 opts2 = cpu_to_le32(cp_tx_vlan_tag(skb)); 757 opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
758 opts1 = DescOwn;
759 if (mss)
760 opts1 |= LargeSend | ((mss & MSSMask) << MSSShift);
761 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
762 const struct iphdr *ip = ip_hdr(skb);
763 if (ip->protocol == IPPROTO_TCP)
764 opts1 |= IPCS | TCPCS;
765 else if (ip->protocol == IPPROTO_UDP)
766 opts1 |= IPCS | UDPCS;
767 else {
768 WARN_ONCE(1,
769 "Net bug: asked to checksum invalid Legacy IP packet\n");
770 goto out_dma_error;
771 }
772 }
756 773
757 if (skb_shinfo(skb)->nr_frags == 0) { 774 if (skb_shinfo(skb)->nr_frags == 0) {
758 struct cp_desc *txd = &cp->tx_ring[entry]; 775 struct cp_desc *txd = &cp->tx_ring[entry];
@@ -768,31 +785,20 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
768 txd->addr = cpu_to_le64(mapping); 785 txd->addr = cpu_to_le64(mapping);
769 wmb(); 786 wmb();
770 787
771 flags = eor | len | DescOwn | FirstFrag | LastFrag; 788 opts1 |= eor | len | FirstFrag | LastFrag;
772
773 if (mss)
774 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
775 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
776 const struct iphdr *ip = ip_hdr(skb);
777 if (ip->protocol == IPPROTO_TCP)
778 flags |= IPCS | TCPCS;
779 else if (ip->protocol == IPPROTO_UDP)
780 flags |= IPCS | UDPCS;
781 else
782 WARN_ON(1); /* we need a WARN() */
783 }
784 789
785 txd->opts1 = cpu_to_le32(flags); 790 txd->opts1 = cpu_to_le32(opts1);
786 wmb(); 791 wmb();
787 792
788 cp->tx_skb[entry] = skb; 793 cp->tx_skb[entry] = skb;
789 entry = NEXT_TX(entry); 794 cp->tx_opts[entry] = opts1;
795 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
796 entry, skb->len);
790 } else { 797 } else {
791 struct cp_desc *txd; 798 struct cp_desc *txd;
792 u32 first_len, first_eor; 799 u32 first_len, first_eor, ctrl;
793 dma_addr_t first_mapping; 800 dma_addr_t first_mapping;
794 int frag, first_entry = entry; 801 int frag, first_entry = entry;
795 const struct iphdr *ip = ip_hdr(skb);
796 802
797 /* We must give this initial chunk to the device last. 803 /* We must give this initial chunk to the device last.
798 * Otherwise we could race with the device. 804 * Otherwise we could race with the device.
@@ -805,14 +811,14 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
805 goto out_dma_error; 811 goto out_dma_error;
806 812
807 cp->tx_skb[entry] = skb; 813 cp->tx_skb[entry] = skb;
808 entry = NEXT_TX(entry);
809 814
810 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 815 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
811 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 816 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
812 u32 len; 817 u32 len;
813 u32 ctrl;
814 dma_addr_t mapping; 818 dma_addr_t mapping;
815 819
820 entry = NEXT_TX(entry);
821
816 len = skb_frag_size(this_frag); 822 len = skb_frag_size(this_frag);
817 mapping = dma_map_single(&cp->pdev->dev, 823 mapping = dma_map_single(&cp->pdev->dev,
818 skb_frag_address(this_frag), 824 skb_frag_address(this_frag),
@@ -824,19 +830,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
824 830
825 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; 831 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
826 832
827 ctrl = eor | len | DescOwn; 833 ctrl = opts1 | eor | len;
828
829 if (mss)
830 ctrl |= LargeSend |
831 ((mss & MSSMask) << MSSShift);
832 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
833 if (ip->protocol == IPPROTO_TCP)
834 ctrl |= IPCS | TCPCS;
835 else if (ip->protocol == IPPROTO_UDP)
836 ctrl |= IPCS | UDPCS;
837 else
838 BUG();
839 }
840 834
841 if (frag == skb_shinfo(skb)->nr_frags - 1) 835 if (frag == skb_shinfo(skb)->nr_frags - 1)
842 ctrl |= LastFrag; 836 ctrl |= LastFrag;
@@ -849,8 +843,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
849 txd->opts1 = cpu_to_le32(ctrl); 843 txd->opts1 = cpu_to_le32(ctrl);
850 wmb(); 844 wmb();
851 845
846 cp->tx_opts[entry] = ctrl;
852 cp->tx_skb[entry] = skb; 847 cp->tx_skb[entry] = skb;
853 entry = NEXT_TX(entry);
854 } 848 }
855 849
856 txd = &cp->tx_ring[first_entry]; 850 txd = &cp->tx_ring[first_entry];
@@ -858,27 +852,17 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
858 txd->addr = cpu_to_le64(first_mapping); 852 txd->addr = cpu_to_le64(first_mapping);
859 wmb(); 853 wmb();
860 854
861 if (skb->ip_summed == CHECKSUM_PARTIAL) { 855 ctrl = opts1 | first_eor | first_len | FirstFrag;
862 if (ip->protocol == IPPROTO_TCP) 856 txd->opts1 = cpu_to_le32(ctrl);
863 txd->opts1 = cpu_to_le32(first_eor | first_len |
864 FirstFrag | DescOwn |
865 IPCS | TCPCS);
866 else if (ip->protocol == IPPROTO_UDP)
867 txd->opts1 = cpu_to_le32(first_eor | first_len |
868 FirstFrag | DescOwn |
869 IPCS | UDPCS);
870 else
871 BUG();
872 } else
873 txd->opts1 = cpu_to_le32(first_eor | first_len |
874 FirstFrag | DescOwn);
875 wmb(); 857 wmb();
858
859 cp->tx_opts[first_entry] = ctrl;
860 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n",
861 first_entry, entry, skb->len);
876 } 862 }
877 cp->tx_head = entry; 863 cp->tx_head = NEXT_TX(entry);
878 864
879 netdev_sent_queue(dev, skb->len); 865 netdev_sent_queue(dev, skb->len);
880 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
881 entry, skb->len);
882 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) 866 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
883 netif_stop_queue(dev); 867 netif_stop_queue(dev);
884 868
@@ -1115,6 +1099,7 @@ static int cp_init_rings (struct cp_private *cp)
1115{ 1099{
1116 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); 1100 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1117 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); 1101 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1102 memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
1118 1103
1119 cp_init_rings_index(cp); 1104 cp_init_rings_index(cp);
1120 1105
@@ -1151,7 +1136,7 @@ static void cp_clean_rings (struct cp_private *cp)
1151 desc = cp->rx_ring + i; 1136 desc = cp->rx_ring + i;
1152 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), 1137 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1153 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1138 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1154 dev_kfree_skb(cp->rx_skb[i]); 1139 dev_kfree_skb_any(cp->rx_skb[i]);
1155 } 1140 }
1156 } 1141 }
1157 1142
@@ -1164,7 +1149,7 @@ static void cp_clean_rings (struct cp_private *cp)
1164 le32_to_cpu(desc->opts1) & 0xffff, 1149 le32_to_cpu(desc->opts1) & 0xffff,
1165 PCI_DMA_TODEVICE); 1150 PCI_DMA_TODEVICE);
1166 if (le32_to_cpu(desc->opts1) & LastFrag) 1151 if (le32_to_cpu(desc->opts1) & LastFrag)
1167 dev_kfree_skb(skb); 1152 dev_kfree_skb_any(skb);
1168 cp->dev->stats.tx_dropped++; 1153 cp->dev->stats.tx_dropped++;
1169 } 1154 }
1170 } 1155 }
@@ -1172,6 +1157,7 @@ static void cp_clean_rings (struct cp_private *cp)
1172 1157
1173 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); 1158 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1174 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); 1159 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1160 memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
1175 1161
1176 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); 1162 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1177 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); 1163 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
@@ -1249,7 +1235,7 @@ static void cp_tx_timeout(struct net_device *dev)
1249{ 1235{
1250 struct cp_private *cp = netdev_priv(dev); 1236 struct cp_private *cp = netdev_priv(dev);
1251 unsigned long flags; 1237 unsigned long flags;
1252 int rc; 1238 int rc, i;
1253 1239
1254 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n", 1240 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1255 cpr8(Cmd), cpr16(CpCmd), 1241 cpr8(Cmd), cpr16(CpCmd),
@@ -1257,13 +1243,26 @@ static void cp_tx_timeout(struct net_device *dev)
1257 1243
1258 spin_lock_irqsave(&cp->lock, flags); 1244 spin_lock_irqsave(&cp->lock, flags);
1259 1245
1246 netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n",
1247 cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc));
1248 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1249 netif_dbg(cp, tx_err, cp->dev,
1250 "TX slot %d @%p: %08x (%08x) %08x %llx %p\n",
1251 i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1),
1252 cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2),
1253 le64_to_cpu(cp->tx_ring[i].addr),
1254 cp->tx_skb[i]);
1255 }
1256
1260 cp_stop_hw(cp); 1257 cp_stop_hw(cp);
1261 cp_clean_rings(cp); 1258 cp_clean_rings(cp);
1262 rc = cp_init_rings(cp); 1259 rc = cp_init_rings(cp);
1263 cp_start_hw(cp); 1260 cp_start_hw(cp);
1264 cp_enable_irq(cp); 1261 __cp_set_rx_mode(dev);
1262 cpw16_f(IntrMask, cp_norx_intr_mask);
1265 1263
1266 netif_wake_queue(dev); 1264 netif_wake_queue(dev);
1265 napi_schedule_irqoff(&cp->napi);
1267 1266
1268 spin_unlock_irqrestore(&cp->lock, flags); 1267 spin_unlock_irqrestore(&cp->lock, flags);
1269} 1268}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 2b32e0c5a0b4..b4f21232019a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6081,7 +6081,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
6081{ 6081{
6082 void __iomem *ioaddr = tp->mmio_addr; 6082 void __iomem *ioaddr = tp->mmio_addr;
6083 struct pci_dev *pdev = tp->pci_dev; 6083 struct pci_dev *pdev = tp->pci_dev;
6084 u16 rg_saw_cnt; 6084 int rg_saw_cnt;
6085 u32 data; 6085 u32 data;
6086 static const struct ephy_info e_info_8168h_1[] = { 6086 static const struct ephy_info e_info_8168h_1[] = {
6087 { 0x1e, 0x0800, 0x0001 }, 6087 { 0x1e, 0x0800, 0x0001 },
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index b735fa22ac95..ebf6abc4853f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -161,11 +161,16 @@ int stmmac_mdio_reset(struct mii_bus *bus)
161 161
162 if (!gpio_request(reset_gpio, "mdio-reset")) { 162 if (!gpio_request(reset_gpio, "mdio-reset")) {
163 gpio_direction_output(reset_gpio, active_low ? 1 : 0); 163 gpio_direction_output(reset_gpio, active_low ? 1 : 0);
164 udelay(data->delays[0]); 164 if (data->delays[0])
165 msleep(DIV_ROUND_UP(data->delays[0], 1000));
166
165 gpio_set_value(reset_gpio, active_low ? 0 : 1); 167 gpio_set_value(reset_gpio, active_low ? 0 : 1);
166 udelay(data->delays[1]); 168 if (data->delays[1])
169 msleep(DIV_ROUND_UP(data->delays[1], 1000));
170
167 gpio_set_value(reset_gpio, active_low ? 1 : 0); 171 gpio_set_value(reset_gpio, active_low ? 1 : 0);
168 udelay(data->delays[2]); 172 if (data->delays[2])
173 msleep(DIV_ROUND_UP(data->delays[2], 1000));
169 } 174 }
170 } 175 }
171#endif 176#endif
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 53fe200e0b79..cc106d892e29 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1756,7 +1756,8 @@ static const struct net_device_ops vnet_ops = {
1756#endif 1756#endif
1757}; 1757};
1758 1758
1759static struct vnet *vnet_new(const u64 *local_mac) 1759static struct vnet *vnet_new(const u64 *local_mac,
1760 struct vio_dev *vdev)
1760{ 1761{
1761 struct net_device *dev; 1762 struct net_device *dev;
1762 struct vnet *vp; 1763 struct vnet *vp;
@@ -1790,6 +1791,8 @@ static struct vnet *vnet_new(const u64 *local_mac)
1790 NETIF_F_HW_CSUM | NETIF_F_SG; 1791 NETIF_F_HW_CSUM | NETIF_F_SG;
1791 dev->features = dev->hw_features; 1792 dev->features = dev->hw_features;
1792 1793
1794 SET_NETDEV_DEV(dev, &vdev->dev);
1795
1793 err = register_netdev(dev); 1796 err = register_netdev(dev);
1794 if (err) { 1797 if (err) {
1795 pr_err("Cannot register net device, aborting\n"); 1798 pr_err("Cannot register net device, aborting\n");
@@ -1808,7 +1811,8 @@ err_out_free_dev:
1808 return ERR_PTR(err); 1811 return ERR_PTR(err);
1809} 1812}
1810 1813
1811static struct vnet *vnet_find_or_create(const u64 *local_mac) 1814static struct vnet *vnet_find_or_create(const u64 *local_mac,
1815 struct vio_dev *vdev)
1812{ 1816{
1813 struct vnet *iter, *vp; 1817 struct vnet *iter, *vp;
1814 1818
@@ -1821,7 +1825,7 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
1821 } 1825 }
1822 } 1826 }
1823 if (!vp) 1827 if (!vp)
1824 vp = vnet_new(local_mac); 1828 vp = vnet_new(local_mac, vdev);
1825 mutex_unlock(&vnet_list_mutex); 1829 mutex_unlock(&vnet_list_mutex);
1826 1830
1827 return vp; 1831 return vp;
@@ -1848,7 +1852,8 @@ static void vnet_cleanup(void)
1848static const char *local_mac_prop = "local-mac-address"; 1852static const char *local_mac_prop = "local-mac-address";
1849 1853
1850static struct vnet *vnet_find_parent(struct mdesc_handle *hp, 1854static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
1851 u64 port_node) 1855 u64 port_node,
1856 struct vio_dev *vdev)
1852{ 1857{
1853 const u64 *local_mac = NULL; 1858 const u64 *local_mac = NULL;
1854 u64 a; 1859 u64 a;
@@ -1869,7 +1874,7 @@ static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
1869 if (!local_mac) 1874 if (!local_mac)
1870 return ERR_PTR(-ENODEV); 1875 return ERR_PTR(-ENODEV);
1871 1876
1872 return vnet_find_or_create(local_mac); 1877 return vnet_find_or_create(local_mac, vdev);
1873} 1878}
1874 1879
1875static struct ldc_channel_config vnet_ldc_cfg = { 1880static struct ldc_channel_config vnet_ldc_cfg = {
@@ -1923,7 +1928,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1923 1928
1924 hp = mdesc_grab(); 1929 hp = mdesc_grab();
1925 1930
1926 vp = vnet_find_parent(hp, vdev->mp); 1931 vp = vnet_find_parent(hp, vdev->mp, vdev);
1927 if (IS_ERR(vp)) { 1932 if (IS_ERR(vp)) {
1928 pr_err("Cannot find port parent vnet\n"); 1933 pr_err("Cannot find port parent vnet\n");
1929 err = PTR_ERR(vp); 1934 err = PTR_ERR(vp);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 1a5aca55ea9f..9f9832f0dea9 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -291,13 +291,6 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
291 interface_list) { 291 interface_list) {
292 struct netcp_intf_modpriv *intf_modpriv; 292 struct netcp_intf_modpriv *intf_modpriv;
293 293
294 /* If interface not registered then register now */
295 if (!netcp_intf->netdev_registered)
296 ret = netcp_register_interface(netcp_intf);
297
298 if (ret)
299 return -ENODEV;
300
301 intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv), 294 intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv),
302 GFP_KERNEL); 295 GFP_KERNEL);
303 if (!intf_modpriv) 296 if (!intf_modpriv)
@@ -306,6 +299,11 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
306 interface = of_parse_phandle(netcp_intf->node_interface, 299 interface = of_parse_phandle(netcp_intf->node_interface,
307 module->name, 0); 300 module->name, 0);
308 301
302 if (!interface) {
303 devm_kfree(dev, intf_modpriv);
304 continue;
305 }
306
309 intf_modpriv->netcp_priv = netcp_intf; 307 intf_modpriv->netcp_priv = netcp_intf;
310 intf_modpriv->netcp_module = module; 308 intf_modpriv->netcp_module = module;
311 list_add_tail(&intf_modpriv->intf_list, 309 list_add_tail(&intf_modpriv->intf_list,
@@ -323,6 +321,18 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
323 continue; 321 continue;
324 } 322 }
325 } 323 }
324
325 /* Now register the interface with netdev */
326 list_for_each_entry(netcp_intf,
327 &netcp_device->interface_head,
328 interface_list) {
329 /* If interface not registered then register now */
330 if (!netcp_intf->netdev_registered) {
331 ret = netcp_register_interface(netcp_intf);
332 if (ret)
333 return -ENODEV;
334 }
335 }
326 return 0; 336 return 0;
327} 337}
328 338
@@ -357,7 +367,6 @@ int netcp_register_module(struct netcp_module *module)
357 if (ret < 0) 367 if (ret < 0)
358 goto fail; 368 goto fail;
359 } 369 }
360
361 mutex_unlock(&netcp_modules_lock); 370 mutex_unlock(&netcp_modules_lock);
362 return 0; 371 return 0;
363 372
@@ -796,7 +805,7 @@ static void netcp_rxpool_free(struct netcp_intf *netcp)
796 netcp->rx_pool = NULL; 805 netcp->rx_pool = NULL;
797} 806}
798 807
799static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) 808static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
800{ 809{
801 struct knav_dma_desc *hwdesc; 810 struct knav_dma_desc *hwdesc;
802 unsigned int buf_len, dma_sz; 811 unsigned int buf_len, dma_sz;
@@ -810,7 +819,7 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
810 hwdesc = knav_pool_desc_get(netcp->rx_pool); 819 hwdesc = knav_pool_desc_get(netcp->rx_pool);
811 if (IS_ERR_OR_NULL(hwdesc)) { 820 if (IS_ERR_OR_NULL(hwdesc)) {
812 dev_dbg(netcp->ndev_dev, "out of rx pool desc\n"); 821 dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
813 return; 822 return -ENOMEM;
814 } 823 }
815 824
816 if (likely(fdq == 0)) { 825 if (likely(fdq == 0)) {
@@ -862,25 +871,26 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
862 knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma, 871 knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
863 &dma_sz); 872 &dma_sz);
864 knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0); 873 knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
865 return; 874 return 0;
866 875
867fail: 876fail:
868 knav_pool_desc_put(netcp->rx_pool, hwdesc); 877 knav_pool_desc_put(netcp->rx_pool, hwdesc);
878 return -ENOMEM;
869} 879}
870 880
871/* Refill Rx FDQ with descriptors & attached buffers */ 881/* Refill Rx FDQ with descriptors & attached buffers */
872static void netcp_rxpool_refill(struct netcp_intf *netcp) 882static void netcp_rxpool_refill(struct netcp_intf *netcp)
873{ 883{
874 u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0}; 884 u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
875 int i; 885 int i, ret = 0;
876 886
877 /* Calculate the FDQ deficit and refill */ 887 /* Calculate the FDQ deficit and refill */
878 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) { 888 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
879 fdq_deficit[i] = netcp->rx_queue_depths[i] - 889 fdq_deficit[i] = netcp->rx_queue_depths[i] -
880 knav_queue_get_count(netcp->rx_fdq[i]); 890 knav_queue_get_count(netcp->rx_fdq[i]);
881 891
882 while (fdq_deficit[i]--) 892 while (fdq_deficit[i]-- && !ret)
883 netcp_allocate_rx_buf(netcp, i); 893 ret = netcp_allocate_rx_buf(netcp, i);
884 } /* end for fdqs */ 894 } /* end for fdqs */
885} 895}
886 896
@@ -893,12 +903,12 @@ static int netcp_rx_poll(struct napi_struct *napi, int budget)
893 903
894 packets = netcp_process_rx_packets(netcp, budget); 904 packets = netcp_process_rx_packets(netcp, budget);
895 905
906 netcp_rxpool_refill(netcp);
896 if (packets < budget) { 907 if (packets < budget) {
897 napi_complete(&netcp->rx_napi); 908 napi_complete(&netcp->rx_napi);
898 knav_queue_enable_notify(netcp->rx_queue); 909 knav_queue_enable_notify(netcp->rx_queue);
899 } 910 }
900 911
901 netcp_rxpool_refill(netcp);
902 return packets; 912 return packets;
903} 913}
904 914
@@ -1384,7 +1394,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp)
1384 continue; 1394 continue;
1385 dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n", 1395 dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n",
1386 naddr->addr, naddr->type); 1396 naddr->addr, naddr->type);
1387 mutex_lock(&netcp_modules_lock);
1388 for_each_module(netcp, priv) { 1397 for_each_module(netcp, priv) {
1389 module = priv->netcp_module; 1398 module = priv->netcp_module;
1390 if (!module->del_addr) 1399 if (!module->del_addr)
@@ -1393,7 +1402,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp)
1393 naddr); 1402 naddr);
1394 WARN_ON(error); 1403 WARN_ON(error);
1395 } 1404 }
1396 mutex_unlock(&netcp_modules_lock);
1397 netcp_addr_del(netcp, naddr); 1405 netcp_addr_del(netcp, naddr);
1398 } 1406 }
1399} 1407}
@@ -1410,7 +1418,7 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp)
1410 continue; 1418 continue;
1411 dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n", 1419 dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n",
1412 naddr->addr, naddr->type); 1420 naddr->addr, naddr->type);
1413 mutex_lock(&netcp_modules_lock); 1421
1414 for_each_module(netcp, priv) { 1422 for_each_module(netcp, priv) {
1415 module = priv->netcp_module; 1423 module = priv->netcp_module;
1416 if (!module->add_addr) 1424 if (!module->add_addr)
@@ -1418,7 +1426,6 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp)
1418 error = module->add_addr(priv->module_priv, naddr); 1426 error = module->add_addr(priv->module_priv, naddr);
1419 WARN_ON(error); 1427 WARN_ON(error);
1420 } 1428 }
1421 mutex_unlock(&netcp_modules_lock);
1422 } 1429 }
1423} 1430}
1424 1431
@@ -1432,6 +1439,7 @@ static void netcp_set_rx_mode(struct net_device *ndev)
1432 ndev->flags & IFF_ALLMULTI || 1439 ndev->flags & IFF_ALLMULTI ||
1433 netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR); 1440 netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR);
1434 1441
1442 spin_lock(&netcp->lock);
1435 /* first clear all marks */ 1443 /* first clear all marks */
1436 netcp_addr_clear_mark(netcp); 1444 netcp_addr_clear_mark(netcp);
1437 1445
@@ -1450,6 +1458,7 @@ static void netcp_set_rx_mode(struct net_device *ndev)
1450 /* finally sweep and callout into modules */ 1458 /* finally sweep and callout into modules */
1451 netcp_addr_sweep_del(netcp); 1459 netcp_addr_sweep_del(netcp);
1452 netcp_addr_sweep_add(netcp); 1460 netcp_addr_sweep_add(netcp);
1461 spin_unlock(&netcp->lock);
1453} 1462}
1454 1463
1455static void netcp_free_navigator_resources(struct netcp_intf *netcp) 1464static void netcp_free_navigator_resources(struct netcp_intf *netcp)
@@ -1614,7 +1623,6 @@ static int netcp_ndo_open(struct net_device *ndev)
1614 goto fail; 1623 goto fail;
1615 } 1624 }
1616 1625
1617 mutex_lock(&netcp_modules_lock);
1618 for_each_module(netcp, intf_modpriv) { 1626 for_each_module(netcp, intf_modpriv) {
1619 module = intf_modpriv->netcp_module; 1627 module = intf_modpriv->netcp_module;
1620 if (module->open) { 1628 if (module->open) {
@@ -1625,7 +1633,6 @@ static int netcp_ndo_open(struct net_device *ndev)
1625 } 1633 }
1626 } 1634 }
1627 } 1635 }
1628 mutex_unlock(&netcp_modules_lock);
1629 1636
1630 napi_enable(&netcp->rx_napi); 1637 napi_enable(&netcp->rx_napi);
1631 napi_enable(&netcp->tx_napi); 1638 napi_enable(&netcp->tx_napi);
@@ -1642,7 +1649,6 @@ fail_open:
1642 if (module->close) 1649 if (module->close)
1643 module->close(intf_modpriv->module_priv, ndev); 1650 module->close(intf_modpriv->module_priv, ndev);
1644 } 1651 }
1645 mutex_unlock(&netcp_modules_lock);
1646 1652
1647fail: 1653fail:
1648 netcp_free_navigator_resources(netcp); 1654 netcp_free_navigator_resources(netcp);
@@ -1666,7 +1672,6 @@ static int netcp_ndo_stop(struct net_device *ndev)
1666 napi_disable(&netcp->rx_napi); 1672 napi_disable(&netcp->rx_napi);
1667 napi_disable(&netcp->tx_napi); 1673 napi_disable(&netcp->tx_napi);
1668 1674
1669 mutex_lock(&netcp_modules_lock);
1670 for_each_module(netcp, intf_modpriv) { 1675 for_each_module(netcp, intf_modpriv) {
1671 module = intf_modpriv->netcp_module; 1676 module = intf_modpriv->netcp_module;
1672 if (module->close) { 1677 if (module->close) {
@@ -1675,7 +1680,6 @@ static int netcp_ndo_stop(struct net_device *ndev)
1675 dev_err(netcp->ndev_dev, "Close failed\n"); 1680 dev_err(netcp->ndev_dev, "Close failed\n");
1676 } 1681 }
1677 } 1682 }
1678 mutex_unlock(&netcp_modules_lock);
1679 1683
1680 /* Recycle Rx descriptors from completion queue */ 1684 /* Recycle Rx descriptors from completion queue */
1681 netcp_empty_rx_queue(netcp); 1685 netcp_empty_rx_queue(netcp);
@@ -1703,7 +1707,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev,
1703 if (!netif_running(ndev)) 1707 if (!netif_running(ndev))
1704 return -EINVAL; 1708 return -EINVAL;
1705 1709
1706 mutex_lock(&netcp_modules_lock);
1707 for_each_module(netcp, intf_modpriv) { 1710 for_each_module(netcp, intf_modpriv) {
1708 module = intf_modpriv->netcp_module; 1711 module = intf_modpriv->netcp_module;
1709 if (!module->ioctl) 1712 if (!module->ioctl)
@@ -1719,7 +1722,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev,
1719 } 1722 }
1720 1723
1721out: 1724out:
1722 mutex_unlock(&netcp_modules_lock);
1723 return (ret == 0) ? 0 : err; 1725 return (ret == 0) ? 0 : err;
1724} 1726}
1725 1727
@@ -1754,11 +1756,12 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
1754 struct netcp_intf *netcp = netdev_priv(ndev); 1756 struct netcp_intf *netcp = netdev_priv(ndev);
1755 struct netcp_intf_modpriv *intf_modpriv; 1757 struct netcp_intf_modpriv *intf_modpriv;
1756 struct netcp_module *module; 1758 struct netcp_module *module;
1759 unsigned long flags;
1757 int err = 0; 1760 int err = 0;
1758 1761
1759 dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid); 1762 dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid);
1760 1763
1761 mutex_lock(&netcp_modules_lock); 1764 spin_lock_irqsave(&netcp->lock, flags);
1762 for_each_module(netcp, intf_modpriv) { 1765 for_each_module(netcp, intf_modpriv) {
1763 module = intf_modpriv->netcp_module; 1766 module = intf_modpriv->netcp_module;
1764 if ((module->add_vid) && (vid != 0)) { 1767 if ((module->add_vid) && (vid != 0)) {
@@ -1770,7 +1773,8 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
1770 } 1773 }
1771 } 1774 }
1772 } 1775 }
1773 mutex_unlock(&netcp_modules_lock); 1776 spin_unlock_irqrestore(&netcp->lock, flags);
1777
1774 return err; 1778 return err;
1775} 1779}
1776 1780
@@ -1779,11 +1783,12 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
1779 struct netcp_intf *netcp = netdev_priv(ndev); 1783 struct netcp_intf *netcp = netdev_priv(ndev);
1780 struct netcp_intf_modpriv *intf_modpriv; 1784 struct netcp_intf_modpriv *intf_modpriv;
1781 struct netcp_module *module; 1785 struct netcp_module *module;
1786 unsigned long flags;
1782 int err = 0; 1787 int err = 0;
1783 1788
1784 dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid); 1789 dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid);
1785 1790
1786 mutex_lock(&netcp_modules_lock); 1791 spin_lock_irqsave(&netcp->lock, flags);
1787 for_each_module(netcp, intf_modpriv) { 1792 for_each_module(netcp, intf_modpriv) {
1788 module = intf_modpriv->netcp_module; 1793 module = intf_modpriv->netcp_module;
1789 if (module->del_vid) { 1794 if (module->del_vid) {
@@ -1795,7 +1800,7 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
1795 } 1800 }
1796 } 1801 }
1797 } 1802 }
1798 mutex_unlock(&netcp_modules_lock); 1803 spin_unlock_irqrestore(&netcp->lock, flags);
1799 return err; 1804 return err;
1800} 1805}
1801 1806
@@ -2040,7 +2045,6 @@ static int netcp_probe(struct platform_device *pdev)
2040 struct device_node *child, *interfaces; 2045 struct device_node *child, *interfaces;
2041 struct netcp_device *netcp_device; 2046 struct netcp_device *netcp_device;
2042 struct device *dev = &pdev->dev; 2047 struct device *dev = &pdev->dev;
2043 struct netcp_module *module;
2044 int ret; 2048 int ret;
2045 2049
2046 if (!node) { 2050 if (!node) {
@@ -2087,14 +2091,6 @@ static int netcp_probe(struct platform_device *pdev)
2087 /* Add the device instance to the list */ 2091 /* Add the device instance to the list */
2088 list_add_tail(&netcp_device->device_list, &netcp_devices); 2092 list_add_tail(&netcp_device->device_list, &netcp_devices);
2089 2093
2090 /* Probe & attach any modules already registered */
2091 mutex_lock(&netcp_modules_lock);
2092 for_each_netcp_module(module) {
2093 ret = netcp_module_probe(netcp_device, module);
2094 if (ret < 0)
2095 dev_err(dev, "module(%s) probe failed\n", module->name);
2096 }
2097 mutex_unlock(&netcp_modules_lock);
2098 return 0; 2094 return 0;
2099 2095
2100probe_quit_interface: 2096probe_quit_interface:
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 6f16d6aaf7b7..6bff8d82ceab 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -77,6 +77,7 @@
77#define GBENU_ALE_OFFSET 0x1e000 77#define GBENU_ALE_OFFSET 0x1e000
78#define GBENU_HOST_PORT_NUM 0 78#define GBENU_HOST_PORT_NUM 0
79#define GBENU_NUM_ALE_ENTRIES 1024 79#define GBENU_NUM_ALE_ENTRIES 1024
80#define GBENU_SGMII_MODULE_SIZE 0x100
80 81
81/* 10G Ethernet SS defines */ 82/* 10G Ethernet SS defines */
82#define XGBE_MODULE_NAME "netcp-xgbe" 83#define XGBE_MODULE_NAME "netcp-xgbe"
@@ -149,8 +150,8 @@
149#define XGBE_STATS2_MODULE 2 150#define XGBE_STATS2_MODULE 2
150 151
151/* s: 0-based slave_port */ 152/* s: 0-based slave_port */
152#define SGMII_BASE(s) \ 153#define SGMII_BASE(d, s) \
153 (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs) 154 (((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
154 155
155#define GBE_TX_QUEUE 648 156#define GBE_TX_QUEUE 648
156#define GBE_TXHOOK_ORDER 0 157#define GBE_TXHOOK_ORDER 0
@@ -1997,13 +1998,8 @@ static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
1997 return; 1998 return;
1998 1999
1999 if (!SLAVE_LINK_IS_XGMII(slave)) { 2000 if (!SLAVE_LINK_IS_XGMII(slave)) {
2000 if (gbe_dev->ss_version == GBE_SS_VERSION_14) 2001 sgmii_link_state =
2001 sgmii_link_state = 2002 netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
2002 netcp_sgmii_get_port_link(SGMII_BASE(sp), sp);
2003 else
2004 sgmii_link_state =
2005 netcp_sgmii_get_port_link(
2006 gbe_dev->sgmii_port_regs, sp);
2007 } 2003 }
2008 2004
2009 phy_link_state = gbe_phy_link_status(slave); 2005 phy_link_state = gbe_phy_link_status(slave);
@@ -2100,17 +2096,11 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2100static void gbe_sgmii_rtreset(struct gbe_priv *priv, 2096static void gbe_sgmii_rtreset(struct gbe_priv *priv,
2101 struct gbe_slave *slave, bool set) 2097 struct gbe_slave *slave, bool set)
2102{ 2098{
2103 void __iomem *sgmii_port_regs;
2104
2105 if (SLAVE_LINK_IS_XGMII(slave)) 2099 if (SLAVE_LINK_IS_XGMII(slave))
2106 return; 2100 return;
2107 2101
2108 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2)) 2102 netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
2109 sgmii_port_regs = priv->sgmii_port34_regs; 2103 slave->slave_num, set);
2110 else
2111 sgmii_port_regs = priv->sgmii_port_regs;
2112
2113 netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set);
2114} 2104}
2115 2105
2116static void gbe_slave_stop(struct gbe_intf *intf) 2106static void gbe_slave_stop(struct gbe_intf *intf)
@@ -2136,17 +2126,12 @@ static void gbe_slave_stop(struct gbe_intf *intf)
2136 2126
2137static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave) 2127static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
2138{ 2128{
2139 void __iomem *sgmii_port_regs; 2129 if (SLAVE_LINK_IS_XGMII(slave))
2140 2130 return;
2141 sgmii_port_regs = priv->sgmii_port_regs;
2142 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
2143 sgmii_port_regs = priv->sgmii_port34_regs;
2144 2131
2145 if (!SLAVE_LINK_IS_XGMII(slave)) { 2132 netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
2146 netcp_sgmii_reset(sgmii_port_regs, slave->slave_num); 2133 netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
2147 netcp_sgmii_config(sgmii_port_regs, slave->slave_num, 2134 slave->link_interface);
2148 slave->link_interface);
2149 }
2150} 2135}
2151 2136
2152static int gbe_slave_open(struct gbe_intf *gbe_intf) 2137static int gbe_slave_open(struct gbe_intf *gbe_intf)
@@ -2997,6 +2982,14 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
2997 gbe_dev->switch_regs = regs; 2982 gbe_dev->switch_regs = regs;
2998 2983
2999 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET; 2984 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
2985
2986 /* Although sgmii modules are mem mapped to one contiguous
2987 * region on GBENU devices, setting sgmii_port34_regs allows
2988 * consistent code when accessing sgmii api
2989 */
2990 gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
2991 (2 * GBENU_SGMII_MODULE_SIZE);
2992
3000 gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET; 2993 gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
3001 2994
3002 for (i = 0; i < (gbe_dev->max_num_ports); i++) 2995 for (i = 0; i < (gbe_dev->max_num_ports); i++)
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index 2f1264b882b9..d3d094742a7e 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -17,7 +17,7 @@ if NET_VENDOR_VIA
17 17
18config VIA_RHINE 18config VIA_RHINE
19 tristate "VIA Rhine support" 19 tristate "VIA Rhine support"
20 depends on (PCI || OF_IRQ) 20 depends on PCI || (OF_IRQ && GENERIC_PCI_IOMAP)
21 depends on HAS_DMA 21 depends on HAS_DMA
22 select CRC32 22 select CRC32
23 select MII 23 select MII
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 6008eee01a33..cf468c87ce57 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -828,6 +828,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
828 if (!phydev) 828 if (!phydev)
829 dev_info(dev, 829 dev_info(dev,
830 "MDIO of the phy is not registered yet\n"); 830 "MDIO of the phy is not registered yet\n");
831 else
832 put_device(&phydev->dev);
831 return 0; 833 return 0;
832 } 834 }
833 835
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index b5f4a78da828..2d3848c9dc35 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -1011,11 +1011,11 @@ static void fjes_hw_update_zone_task(struct work_struct *work)
1011 set_bit(epidx, &irq_bit); 1011 set_bit(epidx, &irq_bit);
1012 break; 1012 break;
1013 } 1013 }
1014 }
1015
1016 hw->ep_shm_info[epidx].es_status = info[epidx].es_status;
1017 hw->ep_shm_info[epidx].zone = info[epidx].zone;
1018 1014
1015 hw->ep_shm_info[epidx].es_status =
1016 info[epidx].es_status;
1017 hw->ep_shm_info[epidx].zone = info[epidx].zone;
1018 }
1019 break; 1019 break;
1020 } 1020 }
1021 1021
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index da3259ce7c8d..8f5c02eed47d 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -126,6 +126,8 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
126 __be32 addr; 126 __be32 addr;
127 int err; 127 int err;
128 128
129 iph = ip_hdr(skb); /* outer IP header... */
130
129 if (gs->collect_md) { 131 if (gs->collect_md) {
130 static u8 zero_vni[3]; 132 static u8 zero_vni[3];
131 133
@@ -133,7 +135,6 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
133 addr = 0; 135 addr = 0;
134 } else { 136 } else {
135 vni = gnvh->vni; 137 vni = gnvh->vni;
136 iph = ip_hdr(skb); /* Still outer IP header... */
137 addr = iph->saddr; 138 addr = iph->saddr;
138 } 139 }
139 140
@@ -178,7 +179,6 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
178 179
179 skb_reset_network_header(skb); 180 skb_reset_network_header(skb);
180 181
181 iph = ip_hdr(skb); /* Now inner IP header... */
182 err = IP_ECN_decapsulate(iph, skb); 182 err = IP_ECN_decapsulate(iph, skb);
183 183
184 if (unlikely(err)) { 184 if (unlikely(err)) {
@@ -626,6 +626,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
626 struct geneve_sock *gs = geneve->sock; 626 struct geneve_sock *gs = geneve->sock;
627 struct ip_tunnel_info *info = NULL; 627 struct ip_tunnel_info *info = NULL;
628 struct rtable *rt = NULL; 628 struct rtable *rt = NULL;
629 const struct iphdr *iip; /* interior IP header */
629 struct flowi4 fl4; 630 struct flowi4 fl4;
630 __u8 tos, ttl; 631 __u8 tos, ttl;
631 __be16 sport; 632 __be16 sport;
@@ -653,6 +654,8 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
653 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 654 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
654 skb_reset_mac_header(skb); 655 skb_reset_mac_header(skb);
655 656
657 iip = ip_hdr(skb);
658
656 if (info) { 659 if (info) {
657 const struct ip_tunnel_key *key = &info->key; 660 const struct ip_tunnel_key *key = &info->key;
658 u8 *opts = NULL; 661 u8 *opts = NULL;
@@ -668,19 +671,16 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
668 if (unlikely(err)) 671 if (unlikely(err))
669 goto err; 672 goto err;
670 673
671 tos = key->tos; 674 tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
672 ttl = key->ttl; 675 ttl = key->ttl;
673 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 676 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
674 } else { 677 } else {
675 const struct iphdr *iip; /* interior IP header */
676
677 udp_csum = false; 678 udp_csum = false;
678 err = geneve_build_skb(rt, skb, 0, geneve->vni, 679 err = geneve_build_skb(rt, skb, 0, geneve->vni,
679 0, NULL, udp_csum); 680 0, NULL, udp_csum);
680 if (unlikely(err)) 681 if (unlikely(err))
681 goto err; 682 goto err;
682 683
683 iip = ip_hdr(skb);
684 tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb); 684 tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
685 ttl = geneve->ttl; 685 ttl = geneve->ttl;
686 if (!ttl && IN_MULTICAST(ntohl(fl4.daddr))) 686 if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
@@ -748,12 +748,8 @@ static void geneve_setup(struct net_device *dev)
748 dev->features |= NETIF_F_RXCSUM; 748 dev->features |= NETIF_F_RXCSUM;
749 dev->features |= NETIF_F_GSO_SOFTWARE; 749 dev->features |= NETIF_F_GSO_SOFTWARE;
750 750
751 dev->vlan_features = dev->features;
752 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
753
754 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 751 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
755 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 752 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
756 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
757 753
758 netif_keep_dst(dev); 754 netif_keep_dst(dev);
759 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 755 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
@@ -819,7 +815,7 @@ static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
819 815
820static int geneve_configure(struct net *net, struct net_device *dev, 816static int geneve_configure(struct net *net, struct net_device *dev,
821 __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos, 817 __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos,
822 __u16 dst_port, bool metadata) 818 __be16 dst_port, bool metadata)
823{ 819{
824 struct geneve_net *gn = net_generic(net, geneve_net_id); 820 struct geneve_net *gn = net_generic(net, geneve_net_id);
825 struct geneve_dev *t, *geneve = netdev_priv(dev); 821 struct geneve_dev *t, *geneve = netdev_priv(dev);
@@ -844,10 +840,10 @@ static int geneve_configure(struct net *net, struct net_device *dev,
844 840
845 geneve->ttl = ttl; 841 geneve->ttl = ttl;
846 geneve->tos = tos; 842 geneve->tos = tos;
847 geneve->dst_port = htons(dst_port); 843 geneve->dst_port = dst_port;
848 geneve->collect_md = metadata; 844 geneve->collect_md = metadata;
849 845
850 t = geneve_find_dev(gn, htons(dst_port), rem_addr, geneve->vni, 846 t = geneve_find_dev(gn, dst_port, rem_addr, geneve->vni,
851 &tun_on_same_port, &tun_collect_md); 847 &tun_on_same_port, &tun_collect_md);
852 if (t) 848 if (t)
853 return -EBUSY; 849 return -EBUSY;
@@ -871,7 +867,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
871static int geneve_newlink(struct net *net, struct net_device *dev, 867static int geneve_newlink(struct net *net, struct net_device *dev,
872 struct nlattr *tb[], struct nlattr *data[]) 868 struct nlattr *tb[], struct nlattr *data[])
873{ 869{
874 __u16 dst_port = GENEVE_UDP_PORT; 870 __be16 dst_port = htons(GENEVE_UDP_PORT);
875 __u8 ttl = 0, tos = 0; 871 __u8 ttl = 0, tos = 0;
876 bool metadata = false; 872 bool metadata = false;
877 __be32 rem_addr; 873 __be32 rem_addr;
@@ -890,7 +886,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
890 tos = nla_get_u8(data[IFLA_GENEVE_TOS]); 886 tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
891 887
892 if (data[IFLA_GENEVE_PORT]) 888 if (data[IFLA_GENEVE_PORT])
893 dst_port = nla_get_u16(data[IFLA_GENEVE_PORT]); 889 dst_port = nla_get_be16(data[IFLA_GENEVE_PORT]);
894 890
895 if (data[IFLA_GENEVE_COLLECT_METADATA]) 891 if (data[IFLA_GENEVE_COLLECT_METADATA])
896 metadata = true; 892 metadata = true;
@@ -913,7 +909,7 @@ static size_t geneve_get_size(const struct net_device *dev)
913 nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */ 909 nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */
914 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ 910 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */
915 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ 911 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */
916 nla_total_size(sizeof(__u16)) + /* IFLA_GENEVE_PORT */ 912 nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */
917 nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */ 913 nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */
918 0; 914 0;
919} 915}
@@ -935,7 +931,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
935 nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos)) 931 nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos))
936 goto nla_put_failure; 932 goto nla_put_failure;
937 933
938 if (nla_put_u16(skb, IFLA_GENEVE_PORT, ntohs(geneve->dst_port))) 934 if (nla_put_be16(skb, IFLA_GENEVE_PORT, geneve->dst_port))
939 goto nla_put_failure; 935 goto nla_put_failure;
940 936
941 if (geneve->collect_md) { 937 if (geneve->collect_md) {
@@ -975,7 +971,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
975 if (IS_ERR(dev)) 971 if (IS_ERR(dev))
976 return dev; 972 return dev;
977 973
978 err = geneve_configure(net, dev, 0, 0, 0, 0, dst_port, true); 974 err = geneve_configure(net, dev, 0, 0, 0, 0, htons(dst_port), true);
979 if (err) { 975 if (err) {
980 free_netdev(dev); 976 free_netdev(dev);
981 return ERR_PTR(err); 977 return ERR_PTR(err);
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 58ae11a14bb6..64bb44d5d867 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -1031,7 +1031,6 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
1031static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) 1031static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
1032{ 1032{
1033 struct ali_ircc_cb *self = priv; 1033 struct ali_ircc_cb *self = priv;
1034 unsigned long flags;
1035 int iobase; 1034 int iobase;
1036 int fcr; /* FIFO control reg */ 1035 int fcr; /* FIFO control reg */
1037 int lcr; /* Line control reg */ 1036 int lcr; /* Line control reg */
@@ -1061,8 +1060,6 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
1061 /* Update accounting for new speed */ 1060 /* Update accounting for new speed */
1062 self->io.speed = speed; 1061 self->io.speed = speed;
1063 1062
1064 spin_lock_irqsave(&self->lock, flags);
1065
1066 divisor = 115200/speed; 1063 divisor = 115200/speed;
1067 1064
1068 fcr = UART_FCR_ENABLE_FIFO; 1065 fcr = UART_FCR_ENABLE_FIFO;
@@ -1089,9 +1086,6 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
1089 /* without this, the connection will be broken after come back from FIR speed, 1086 /* without this, the connection will be broken after come back from FIR speed,
1090 but with this, the SIR connection is harder to established */ 1087 but with this, the SIR connection is harder to established */
1091 outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR); 1088 outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR);
1092
1093 spin_unlock_irqrestore(&self->lock, flags);
1094
1095} 1089}
1096 1090
1097static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed) 1091static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index edd77342773a..248478c6f6e4 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -1111,10 +1111,10 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
1111 return 0; 1111 return 0;
1112 1112
1113 case TUNSETSNDBUF: 1113 case TUNSETSNDBUF:
1114 if (get_user(u, up)) 1114 if (get_user(s, sp))
1115 return -EFAULT; 1115 return -EFAULT;
1116 1116
1117 q->sk.sk_sndbuf = u; 1117 q->sk.sk_sndbuf = s;
1118 return 0; 1118 return 0;
1119 1119
1120 case TUNGETVNETHDRSZ: 1120 case TUNGETVNETHDRSZ:
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index fb1299c6326e..e23bf5b90e17 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -220,7 +220,7 @@ int fixed_phy_update_state(struct phy_device *phydev,
220 struct fixed_mdio_bus *fmb = &platform_fmb; 220 struct fixed_mdio_bus *fmb = &platform_fmb;
221 struct fixed_phy *fp; 221 struct fixed_phy *fp;
222 222
223 if (!phydev || !phydev->bus) 223 if (!phydev || phydev->bus != fmb->mii_bus)
224 return -EINVAL; 224 return -EINVAL;
225 225
226 list_for_each_entry(fp, &fmb->phys, node) { 226 list_for_each_entry(fp, &fmb->phys, node) {
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e6897b6a8a53..5de8d5827536 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -785,6 +785,7 @@ static int marvell_read_status(struct phy_device *phydev)
785 int adv; 785 int adv;
786 int err; 786 int err;
787 int lpa; 787 int lpa;
788 int lpagb;
788 int status = 0; 789 int status = 0;
789 790
790 /* Update the link, but return if there 791 /* Update the link, but return if there
@@ -802,10 +803,17 @@ static int marvell_read_status(struct phy_device *phydev)
802 if (lpa < 0) 803 if (lpa < 0)
803 return lpa; 804 return lpa;
804 805
806 lpagb = phy_read(phydev, MII_STAT1000);
807 if (lpagb < 0)
808 return lpagb;
809
805 adv = phy_read(phydev, MII_ADVERTISE); 810 adv = phy_read(phydev, MII_ADVERTISE);
806 if (adv < 0) 811 if (adv < 0)
807 return adv; 812 return adv;
808 813
814 phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb) |
815 mii_lpa_to_ethtool_lpa_t(lpa);
816
809 lpa &= adv; 817 lpa &= adv;
810 818
811 if (status & MII_M1011_PHY_STATUS_FULLDUPLEX) 819 if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
@@ -853,6 +861,7 @@ static int marvell_read_status(struct phy_device *phydev)
853 phydev->speed = SPEED_10; 861 phydev->speed = SPEED_10;
854 862
855 phydev->pause = phydev->asym_pause = 0; 863 phydev->pause = phydev->asym_pause = 0;
864 phydev->lp_advertising = 0;
856 } 865 }
857 866
858 return 0; 867 return 0;
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 6a52a7f0fa0d..4bde5e728fe0 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -244,6 +244,7 @@ static const struct of_device_id unimac_mdio_ids[] = {
244 { .compatible = "brcm,unimac-mdio", }, 244 { .compatible = "brcm,unimac-mdio", },
245 { /* sentinel */ }, 245 { /* sentinel */ },
246}; 246};
247MODULE_DEVICE_TABLE(of, unimac_mdio_ids);
247 248
248static struct platform_driver unimac_mdio_driver = { 249static struct platform_driver unimac_mdio_driver = {
249 .driver = { 250 .driver = {
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 7dc21e56a7aa..3bc9f03349f3 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -261,6 +261,7 @@ static const struct of_device_id mdio_gpio_of_match[] = {
261 { .compatible = "virtual,mdio-gpio", }, 261 { .compatible = "virtual,mdio-gpio", },
262 { /* sentinel */ } 262 { /* sentinel */ }
263}; 263};
264MODULE_DEVICE_TABLE(of, mdio_gpio_of_match);
264 265
265static struct platform_driver mdio_gpio_driver = { 266static struct platform_driver mdio_gpio_driver = {
266 .probe = mdio_gpio_probe, 267 .probe = mdio_gpio_probe,
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 4d4d25efc1e1..280c7c311f72 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -113,18 +113,18 @@ int mdio_mux_init(struct device *dev,
113 if (!parent_bus_node) 113 if (!parent_bus_node)
114 return -ENODEV; 114 return -ENODEV;
115 115
116 parent_bus = of_mdio_find_bus(parent_bus_node);
117 if (parent_bus == NULL) {
118 ret_val = -EPROBE_DEFER;
119 goto err_parent_bus;
120 }
121
122 pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); 116 pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL);
123 if (pb == NULL) { 117 if (pb == NULL) {
124 ret_val = -ENOMEM; 118 ret_val = -ENOMEM;
125 goto err_parent_bus; 119 goto err_parent_bus;
126 } 120 }
127 121
122 parent_bus = of_mdio_find_bus(parent_bus_node);
123 if (parent_bus == NULL) {
124 ret_val = -EPROBE_DEFER;
125 goto err_parent_bus;
126 }
127
128 pb->switch_data = data; 128 pb->switch_data = data;
129 pb->switch_fn = switch_fn; 129 pb->switch_fn = switch_fn;
130 pb->current_child = -1; 130 pb->current_child = -1;
@@ -173,6 +173,10 @@ int mdio_mux_init(struct device *dev,
173 dev_info(dev, "Version " DRV_VERSION "\n"); 173 dev_info(dev, "Version " DRV_VERSION "\n");
174 return 0; 174 return 0;
175 } 175 }
176
177 /* balance the reference of_mdio_find_bus() took */
178 put_device(&pb->mii_bus->dev);
179
176err_parent_bus: 180err_parent_bus:
177 of_node_put(parent_bus_node); 181 of_node_put(parent_bus_node);
178 return ret_val; 182 return ret_val;
@@ -189,6 +193,9 @@ void mdio_mux_uninit(void *mux_handle)
189 mdiobus_free(cb->mii_bus); 193 mdiobus_free(cb->mii_bus);
190 cb = cb->next; 194 cb = cb->next;
191 } 195 }
196
197 /* balance the reference of_mdio_find_bus() in mdio_mux_init() took */
198 put_device(&pb->mii_bus->dev);
192} 199}
193EXPORT_SYMBOL_GPL(mdio_mux_uninit); 200EXPORT_SYMBOL_GPL(mdio_mux_uninit);
194 201
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 02a4615b65f8..12f44c53cc8e 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -167,7 +167,9 @@ static int of_mdio_bus_match(struct device *dev, const void *mdio_bus_np)
167 * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. 167 * of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
168 * @mdio_bus_np: Pointer to the mii_bus. 168 * @mdio_bus_np: Pointer to the mii_bus.
169 * 169 *
170 * Returns a pointer to the mii_bus, or NULL if none found. 170 * Returns a reference to the mii_bus, or NULL if none found. The
171 * embedded struct device will have its reference count incremented,
172 * and this must be put once the bus is finished with.
171 * 173 *
172 * Because the association of a device_node and mii_bus is made via 174 * Because the association of a device_node and mii_bus is made via
173 * of_mdiobus_register(), the mii_bus cannot be found before it is 175 * of_mdiobus_register(), the mii_bus cannot be found before it is
@@ -234,15 +236,18 @@ static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
234#endif 236#endif
235 237
236/** 238/**
237 * mdiobus_register - bring up all the PHYs on a given bus and attach them to bus 239 * __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
238 * @bus: target mii_bus 240 * @bus: target mii_bus
241 * @owner: module containing bus accessor functions
239 * 242 *
240 * Description: Called by a bus driver to bring up all the PHYs 243 * Description: Called by a bus driver to bring up all the PHYs
241 * on a given bus, and attach them to the bus. 244 * on a given bus, and attach them to the bus. Drivers should use
245 * mdiobus_register() rather than __mdiobus_register() unless they
246 * need to pass a specific owner module.
242 * 247 *
243 * Returns 0 on success or < 0 on error. 248 * Returns 0 on success or < 0 on error.
244 */ 249 */
245int mdiobus_register(struct mii_bus *bus) 250int __mdiobus_register(struct mii_bus *bus, struct module *owner)
246{ 251{
247 int i, err; 252 int i, err;
248 253
@@ -253,6 +258,7 @@ int mdiobus_register(struct mii_bus *bus)
253 BUG_ON(bus->state != MDIOBUS_ALLOCATED && 258 BUG_ON(bus->state != MDIOBUS_ALLOCATED &&
254 bus->state != MDIOBUS_UNREGISTERED); 259 bus->state != MDIOBUS_UNREGISTERED);
255 260
261 bus->owner = owner;
256 bus->dev.parent = bus->parent; 262 bus->dev.parent = bus->parent;
257 bus->dev.class = &mdio_bus_class; 263 bus->dev.class = &mdio_bus_class;
258 bus->dev.groups = NULL; 264 bus->dev.groups = NULL;
@@ -288,13 +294,16 @@ int mdiobus_register(struct mii_bus *bus)
288 294
289error: 295error:
290 while (--i >= 0) { 296 while (--i >= 0) {
291 if (bus->phy_map[i]) 297 struct phy_device *phydev = bus->phy_map[i];
292 device_unregister(&bus->phy_map[i]->dev); 298 if (phydev) {
299 phy_device_remove(phydev);
300 phy_device_free(phydev);
301 }
293 } 302 }
294 device_del(&bus->dev); 303 device_del(&bus->dev);
295 return err; 304 return err;
296} 305}
297EXPORT_SYMBOL(mdiobus_register); 306EXPORT_SYMBOL(__mdiobus_register);
298 307
299void mdiobus_unregister(struct mii_bus *bus) 308void mdiobus_unregister(struct mii_bus *bus)
300{ 309{
@@ -304,9 +313,11 @@ void mdiobus_unregister(struct mii_bus *bus)
304 bus->state = MDIOBUS_UNREGISTERED; 313 bus->state = MDIOBUS_UNREGISTERED;
305 314
306 for (i = 0; i < PHY_MAX_ADDR; i++) { 315 for (i = 0; i < PHY_MAX_ADDR; i++) {
307 if (bus->phy_map[i]) 316 struct phy_device *phydev = bus->phy_map[i];
308 device_unregister(&bus->phy_map[i]->dev); 317 if (phydev) {
309 bus->phy_map[i] = NULL; 318 phy_device_remove(phydev);
319 phy_device_free(phydev);
320 }
310 } 321 }
311 device_del(&bus->dev); 322 device_del(&bus->dev);
312} 323}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index c0f211127274..f761288abe66 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -384,6 +384,24 @@ int phy_device_register(struct phy_device *phydev)
384EXPORT_SYMBOL(phy_device_register); 384EXPORT_SYMBOL(phy_device_register);
385 385
386/** 386/**
387 * phy_device_remove - Remove a previously registered phy device from the MDIO bus
388 * @phydev: phy_device structure to remove
389 *
390 * This doesn't free the phy_device itself, it merely reverses the effects
391 * of phy_device_register(). Use phy_device_free() to free the device
392 * after calling this function.
393 */
394void phy_device_remove(struct phy_device *phydev)
395{
396 struct mii_bus *bus = phydev->bus;
397 int addr = phydev->addr;
398
399 device_del(&phydev->dev);
400 bus->phy_map[addr] = NULL;
401}
402EXPORT_SYMBOL(phy_device_remove);
403
404/**
387 * phy_find_first - finds the first PHY device on the bus 405 * phy_find_first - finds the first PHY device on the bus
388 * @bus: the target MII bus 406 * @bus: the target MII bus
389 */ 407 */
@@ -578,14 +596,22 @@ EXPORT_SYMBOL(phy_init_hw);
578 * generic driver is used. The phy_device is given a ptr to 596 * generic driver is used. The phy_device is given a ptr to
579 * the attaching device, and given a callback for link status 597 * the attaching device, and given a callback for link status
580 * change. The phy_device is returned to the attaching driver. 598 * change. The phy_device is returned to the attaching driver.
599 * This function takes a reference on the phy device.
581 */ 600 */
582int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, 601int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
583 u32 flags, phy_interface_t interface) 602 u32 flags, phy_interface_t interface)
584{ 603{
604 struct mii_bus *bus = phydev->bus;
585 struct device *d = &phydev->dev; 605 struct device *d = &phydev->dev;
586 struct module *bus_module;
587 int err; 606 int err;
588 607
608 if (!try_module_get(bus->owner)) {
609 dev_err(&dev->dev, "failed to get the bus module\n");
610 return -EIO;
611 }
612
613 get_device(d);
614
589 /* Assume that if there is no driver, that it doesn't 615 /* Assume that if there is no driver, that it doesn't
590 * exist, and we should use the genphy driver. 616 * exist, and we should use the genphy driver.
591 */ 617 */
@@ -600,20 +626,13 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
600 err = device_bind_driver(d); 626 err = device_bind_driver(d);
601 627
602 if (err) 628 if (err)
603 return err; 629 goto error;
604 } 630 }
605 631
606 if (phydev->attached_dev) { 632 if (phydev->attached_dev) {
607 dev_err(&dev->dev, "PHY already attached\n"); 633 dev_err(&dev->dev, "PHY already attached\n");
608 return -EBUSY; 634 err = -EBUSY;
609 } 635 goto error;
610
611 /* Increment the bus module reference count */
612 bus_module = phydev->bus->dev.driver ?
613 phydev->bus->dev.driver->owner : NULL;
614 if (!try_module_get(bus_module)) {
615 dev_err(&dev->dev, "failed to get the bus module\n");
616 return -EIO;
617 } 636 }
618 637
619 phydev->attached_dev = dev; 638 phydev->attached_dev = dev;
@@ -636,6 +655,11 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
636 phy_resume(phydev); 655 phy_resume(phydev);
637 656
638 return err; 657 return err;
658
659error:
660 put_device(d);
661 module_put(bus->owner);
662 return err;
639} 663}
640EXPORT_SYMBOL(phy_attach_direct); 664EXPORT_SYMBOL(phy_attach_direct);
641 665
@@ -677,14 +701,15 @@ EXPORT_SYMBOL(phy_attach);
677/** 701/**
678 * phy_detach - detach a PHY device from its network device 702 * phy_detach - detach a PHY device from its network device
679 * @phydev: target phy_device struct 703 * @phydev: target phy_device struct
704 *
705 * This detaches the phy device from its network device and the phy
706 * driver, and drops the reference count taken in phy_attach_direct().
680 */ 707 */
681void phy_detach(struct phy_device *phydev) 708void phy_detach(struct phy_device *phydev)
682{ 709{
710 struct mii_bus *bus;
683 int i; 711 int i;
684 712
685 if (phydev->bus->dev.driver)
686 module_put(phydev->bus->dev.driver->owner);
687
688 phydev->attached_dev->phydev = NULL; 713 phydev->attached_dev->phydev = NULL;
689 phydev->attached_dev = NULL; 714 phydev->attached_dev = NULL;
690 phy_suspend(phydev); 715 phy_suspend(phydev);
@@ -700,6 +725,15 @@ void phy_detach(struct phy_device *phydev)
700 break; 725 break;
701 } 726 }
702 } 727 }
728
729 /*
730 * The phydev might go away on the put_device() below, so avoid
731 * a use-after-free bug by reading the underlying bus first.
732 */
733 bus = phydev->bus;
734
735 put_device(&phydev->dev);
736 module_put(bus->owner);
703} 737}
704EXPORT_SYMBOL(phy_detach); 738EXPORT_SYMBOL(phy_detach);
705 739
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 17cad185169d..76cad712ddb2 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -66,7 +66,6 @@
66#define PHY_ID_VSC8244 0x000fc6c0 66#define PHY_ID_VSC8244 0x000fc6c0
67#define PHY_ID_VSC8514 0x00070670 67#define PHY_ID_VSC8514 0x00070670
68#define PHY_ID_VSC8574 0x000704a0 68#define PHY_ID_VSC8574 0x000704a0
69#define PHY_ID_VSC8641 0x00070431
70#define PHY_ID_VSC8662 0x00070660 69#define PHY_ID_VSC8662 0x00070660
71#define PHY_ID_VSC8221 0x000fc550 70#define PHY_ID_VSC8221 0x000fc550
72#define PHY_ID_VSC8211 0x000fc4b0 71#define PHY_ID_VSC8211 0x000fc4b0
@@ -273,18 +272,6 @@ static struct phy_driver vsc82xx_driver[] = {
273 .config_intr = &vsc82xx_config_intr, 272 .config_intr = &vsc82xx_config_intr,
274 .driver = { .owner = THIS_MODULE,}, 273 .driver = { .owner = THIS_MODULE,},
275}, { 274}, {
276 .phy_id = PHY_ID_VSC8641,
277 .name = "Vitesse VSC8641",
278 .phy_id_mask = 0x000ffff0,
279 .features = PHY_GBIT_FEATURES,
280 .flags = PHY_HAS_INTERRUPT,
281 .config_init = &vsc824x_config_init,
282 .config_aneg = &vsc82x4_config_aneg,
283 .read_status = &genphy_read_status,
284 .ack_interrupt = &vsc824x_ack_interrupt,
285 .config_intr = &vsc82xx_config_intr,
286 .driver = { .owner = THIS_MODULE,},
287}, {
288 .phy_id = PHY_ID_VSC8662, 275 .phy_id = PHY_ID_VSC8662,
289 .name = "Vitesse VSC8662", 276 .name = "Vitesse VSC8662",
290 .phy_id_mask = 0x000ffff0, 277 .phy_id_mask = 0x000ffff0,
@@ -331,7 +318,6 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
331 { PHY_ID_VSC8244, 0x000fffc0 }, 318 { PHY_ID_VSC8244, 0x000fffc0 },
332 { PHY_ID_VSC8514, 0x000ffff0 }, 319 { PHY_ID_VSC8514, 0x000ffff0 },
333 { PHY_ID_VSC8574, 0x000ffff0 }, 320 { PHY_ID_VSC8574, 0x000ffff0 },
334 { PHY_ID_VSC8641, 0x000ffff0 },
335 { PHY_ID_VSC8662, 0x000ffff0 }, 321 { PHY_ID_VSC8662, 0x000ffff0 },
336 { PHY_ID_VSC8221, 0x000ffff0 }, 322 { PHY_ID_VSC8221, 0x000ffff0 },
337 { PHY_ID_VSC8211, 0x000ffff0 }, 323 { PHY_ID_VSC8211, 0x000ffff0 },
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 0481daf9201a..ed00446759b2 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2755,6 +2755,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
2755 */ 2755 */
2756 dev_net_set(dev, net); 2756 dev_net_set(dev, net);
2757 2757
2758 rtnl_lock();
2758 mutex_lock(&pn->all_ppp_mutex); 2759 mutex_lock(&pn->all_ppp_mutex);
2759 2760
2760 if (unit < 0) { 2761 if (unit < 0) {
@@ -2785,7 +2786,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
2785 ppp->file.index = unit; 2786 ppp->file.index = unit;
2786 sprintf(dev->name, "ppp%d", unit); 2787 sprintf(dev->name, "ppp%d", unit);
2787 2788
2788 ret = register_netdev(dev); 2789 ret = register_netdevice(dev);
2789 if (ret != 0) { 2790 if (ret != 0) {
2790 unit_put(&pn->units_idr, unit); 2791 unit_put(&pn->units_idr, unit);
2791 netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n", 2792 netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
@@ -2797,6 +2798,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
2797 2798
2798 atomic_inc(&ppp_unit_count); 2799 atomic_inc(&ppp_unit_count);
2799 mutex_unlock(&pn->all_ppp_mutex); 2800 mutex_unlock(&pn->all_ppp_mutex);
2801 rtnl_unlock();
2800 2802
2801 *retp = 0; 2803 *retp = 0;
2802 return ppp; 2804 return ppp;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 1610b79ae386..fbb9325d1f6e 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -583,4 +583,15 @@ config USB_VL600
583 583
584 http://ubuntuforums.org/showpost.php?p=10589647&postcount=17 584 http://ubuntuforums.org/showpost.php?p=10589647&postcount=17
585 585
586config USB_NET_CH9200
587 tristate "QingHeng CH9200 USB ethernet support"
588 depends on USB_USBNET
589 select MII
590 help
591 Choose this option if you have a USB ethernet adapter with a QinHeng
592 CH9200 chipset.
593
594 To compile this driver as a module, choose M here: the
595 module will be called ch9200.
596
586endif # USB_NET_DRIVERS 597endif # USB_NET_DRIVERS
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index cf6a0e610a7f..b5f04068dbe4 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -38,4 +38,4 @@ obj-$(CONFIG_USB_NET_HUAWEI_CDC_NCM) += huawei_cdc_ncm.o
38obj-$(CONFIG_USB_VL600) += lg-vl600.o 38obj-$(CONFIG_USB_VL600) += lg-vl600.o
39obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o 39obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o
40obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o 40obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o
41 41obj-$(CONFIG_USB_NET_CH9200) += ch9200.o
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
new file mode 100644
index 000000000000..5e151e6a3e09
--- /dev/null
+++ b/drivers/net/usb/ch9200.c
@@ -0,0 +1,432 @@
1/*
2 * USB 10M/100M ethernet adapter
3 *
4 * This file is licensed under the terms of the GNU General Public License
5 * version 2. This program is licensed "as is" without any warranty of any
6 * kind, whether express or implied
7 *
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/stddef.h>
14#include <linux/init.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/ethtool.h>
18#include <linux/mii.h>
19#include <linux/usb.h>
20#include <linux/crc32.h>
21#include <linux/usb/usbnet.h>
22#include <linux/slab.h>
23
24#define CH9200_VID 0x1A86
25#define CH9200_PID_E092 0xE092
26
27#define CTRL_TIMEOUT_MS 1000
28
29#define CONTROL_TIMEOUT_MS 1000
30
31#define REQUEST_READ 0x0E
32#define REQUEST_WRITE 0x0F
33
34/* Address space:
35 * 00-63 : MII
36 * 64-128: MAC
37 *
38 * Note: all accesses must be 16-bit
39 */
40
41#define MAC_REG_CTRL 64
42#define MAC_REG_STATUS 66
43#define MAC_REG_INTERRUPT_MASK 68
44#define MAC_REG_PHY_COMMAND 70
45#define MAC_REG_PHY_DATA 72
46#define MAC_REG_STATION_L 74
47#define MAC_REG_STATION_M 76
48#define MAC_REG_STATION_H 78
49#define MAC_REG_HASH_L 80
50#define MAC_REG_HASH_M1 82
51#define MAC_REG_HASH_M2 84
52#define MAC_REG_HASH_H 86
53#define MAC_REG_THRESHOLD 88
54#define MAC_REG_FIFO_DEPTH 90
55#define MAC_REG_PAUSE 92
56#define MAC_REG_FLOW_CONTROL 94
57
58/* Control register bits
59 *
60 * Note: bits 13 and 15 are reserved
61 */
62#define LOOPBACK (0x01 << 14)
63#define BASE100X (0x01 << 12)
64#define MBPS_10 (0x01 << 11)
65#define DUPLEX_MODE (0x01 << 10)
66#define PAUSE_FRAME (0x01 << 9)
67#define PROMISCUOUS (0x01 << 8)
68#define MULTICAST (0x01 << 7)
69#define BROADCAST (0x01 << 6)
70#define HASH (0x01 << 5)
71#define APPEND_PAD (0x01 << 4)
72#define APPEND_CRC (0x01 << 3)
73#define TRANSMITTER_ACTION (0x01 << 2)
74#define RECEIVER_ACTION (0x01 << 1)
75#define DMA_ACTION (0x01 << 0)
76
77/* Status register bits
78 *
79 * Note: bits 7-15 are reserved
80 */
81#define ALIGNMENT (0x01 << 6)
82#define FIFO_OVER_RUN (0x01 << 5)
83#define FIFO_UNDER_RUN (0x01 << 4)
84#define RX_ERROR (0x01 << 3)
85#define RX_COMPLETE (0x01 << 2)
86#define TX_ERROR (0x01 << 1)
87#define TX_COMPLETE (0x01 << 0)
88
89/* FIFO depth register bits
90 *
91 * Note: bits 6 and 14 are reserved
92 */
93
94#define ETH_TXBD (0x01 << 15)
95#define ETN_TX_FIFO_DEPTH (0x01 << 8)
96#define ETH_RXBD (0x01 << 7)
97#define ETH_RX_FIFO_DEPTH (0x01 << 0)
98
99static int control_read(struct usbnet *dev,
100 unsigned char request, unsigned short value,
101 unsigned short index, void *data, unsigned short size,
102 int timeout)
103{
104 unsigned char *buf = NULL;
105 unsigned char request_type;
106 int err = 0;
107
108 if (request == REQUEST_READ)
109 request_type = (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER);
110 else
111 request_type = (USB_DIR_IN | USB_TYPE_VENDOR |
112 USB_RECIP_DEVICE);
113
114 netdev_dbg(dev->net, "Control_read() index=0x%02x size=%d\n",
115 index, size);
116
117 buf = kmalloc(size, GFP_KERNEL);
118 if (!buf) {
119 err = -ENOMEM;
120 goto err_out;
121 }
122
123 err = usb_control_msg(dev->udev,
124 usb_rcvctrlpipe(dev->udev, 0),
125 request, request_type, value, index, buf, size,
126 timeout);
127 if (err == size)
128 memcpy(data, buf, size);
129 else if (err >= 0)
130 err = -EINVAL;
131 kfree(buf);
132
133 return err;
134
135err_out:
136 return err;
137}
138
139static int control_write(struct usbnet *dev, unsigned char request,
140 unsigned short value, unsigned short index,
141 void *data, unsigned short size, int timeout)
142{
143 unsigned char *buf = NULL;
144 unsigned char request_type;
145 int err = 0;
146
147 if (request == REQUEST_WRITE)
148 request_type = (USB_DIR_OUT | USB_TYPE_VENDOR |
149 USB_RECIP_OTHER);
150 else
151 request_type = (USB_DIR_OUT | USB_TYPE_VENDOR |
152 USB_RECIP_DEVICE);
153
154 netdev_dbg(dev->net, "Control_write() index=0x%02x size=%d\n",
155 index, size);
156
157 if (data) {
158 buf = kmalloc(size, GFP_KERNEL);
159 if (!buf) {
160 err = -ENOMEM;
161 goto err_out;
162 }
163 memcpy(buf, data, size);
164 }
165
166 err = usb_control_msg(dev->udev,
167 usb_sndctrlpipe(dev->udev, 0),
168 request, request_type, value, index, buf, size,
169 timeout);
170 if (err >= 0 && err < size)
171 err = -EINVAL;
172 kfree(buf);
173
174 return 0;
175
176err_out:
177 return err;
178}
179
180static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
181{
182 struct usbnet *dev = netdev_priv(netdev);
183 unsigned char buff[2];
184
185 netdev_dbg(netdev, "ch9200_mdio_read phy_id:%02x loc:%02x\n",
186 phy_id, loc);
187
188 if (phy_id != 0)
189 return -ENODEV;
190
191 control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02,
192 CONTROL_TIMEOUT_MS);
193
194 return (buff[0] | buff[1] << 8);
195}
196
197static void ch9200_mdio_write(struct net_device *netdev,
198 int phy_id, int loc, int val)
199{
200 struct usbnet *dev = netdev_priv(netdev);
201 unsigned char buff[2];
202
203 netdev_dbg(netdev, "ch9200_mdio_write() phy_id=%02x loc:%02x\n",
204 phy_id, loc);
205
206 if (phy_id != 0)
207 return;
208
209 buff[0] = (unsigned char)val;
210 buff[1] = (unsigned char)(val >> 8);
211
212 control_write(dev, REQUEST_WRITE, 0, loc * 2, buff, 0x02,
213 CONTROL_TIMEOUT_MS);
214}
215
216static int ch9200_link_reset(struct usbnet *dev)
217{
218 struct ethtool_cmd ecmd;
219
220 mii_check_media(&dev->mii, 1, 1);
221 mii_ethtool_gset(&dev->mii, &ecmd);
222
223 netdev_dbg(dev->net, "link_reset() speed:%d duplex:%d\n",
224 ecmd.speed, ecmd.duplex);
225
226 return 0;
227}
228
229static void ch9200_status(struct usbnet *dev, struct urb *urb)
230{
231 int link;
232 unsigned char *buf;
233
234 if (urb->actual_length < 16)
235 return;
236
237 buf = urb->transfer_buffer;
238 link = !!(buf[0] & 0x01);
239
240 if (link) {
241 netif_carrier_on(dev->net);
242 usbnet_defer_kevent(dev, EVENT_LINK_RESET);
243 } else {
244 netif_carrier_off(dev->net);
245 }
246}
247
248static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
249 gfp_t flags)
250{
251 int i = 0;
252 int len = 0;
253 int tx_overhead = 0;
254
255 tx_overhead = 0x40;
256
257 len = skb->len;
258 if (skb_headroom(skb) < tx_overhead) {
259 struct sk_buff *skb2;
260
261 skb2 = skb_copy_expand(skb, tx_overhead, 0, flags);
262 dev_kfree_skb_any(skb);
263 skb = skb2;
264 if (!skb)
265 return NULL;
266 }
267
268 __skb_push(skb, tx_overhead);
269 /* usbnet adds padding if length is a multiple of packet size
270 * if so, adjust length value in header
271 */
272 if ((skb->len % dev->maxpacket) == 0)
273 len++;
274
275 skb->data[0] = len;
276 skb->data[1] = len >> 8;
277 skb->data[2] = 0x00;
278 skb->data[3] = 0x80;
279
280 for (i = 4; i < 48; i++)
281 skb->data[i] = 0x00;
282
283 skb->data[48] = len;
284 skb->data[49] = len >> 8;
285 skb->data[50] = 0x00;
286 skb->data[51] = 0x80;
287
288 for (i = 52; i < 64; i++)
289 skb->data[i] = 0x00;
290
291 return skb;
292}
293
294static int ch9200_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
295{
296 int len = 0;
297 int rx_overhead = 0;
298
299 rx_overhead = 64;
300
301 if (unlikely(skb->len < rx_overhead)) {
302 dev_err(&dev->udev->dev, "unexpected tiny rx frame\n");
303 return 0;
304 }
305
306 len = (skb->data[skb->len - 16] | skb->data[skb->len - 15] << 8);
307 skb_trim(skb, len);
308
309 return 1;
310}
311
312static int get_mac_address(struct usbnet *dev, unsigned char *data)
313{
314 int err = 0;
315 unsigned char mac_addr[0x06];
316 int rd_mac_len = 0;
317
318 netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n",
319 dev->udev->descriptor.idVendor,
320 dev->udev->descriptor.idProduct);
321
322 memset(mac_addr, 0, sizeof(mac_addr));
323 rd_mac_len = control_read(dev, REQUEST_READ, 0,
324 MAC_REG_STATION_L, mac_addr, 0x02,
325 CONTROL_TIMEOUT_MS);
326 rd_mac_len += control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_M,
327 mac_addr + 2, 0x02, CONTROL_TIMEOUT_MS);
328 rd_mac_len += control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_H,
329 mac_addr + 4, 0x02, CONTROL_TIMEOUT_MS);
330 if (rd_mac_len != ETH_ALEN)
331 err = -EINVAL;
332
333 data[0] = mac_addr[5];
334 data[1] = mac_addr[4];
335 data[2] = mac_addr[3];
336 data[3] = mac_addr[2];
337 data[4] = mac_addr[1];
338 data[5] = mac_addr[0];
339
340 return err;
341}
342
343static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf)
344{
345 int retval = 0;
346 unsigned char data[2];
347
348 retval = usbnet_get_endpoints(dev, intf);
349 if (retval)
350 return retval;
351
352 dev->mii.dev = dev->net;
353 dev->mii.mdio_read = ch9200_mdio_read;
354 dev->mii.mdio_write = ch9200_mdio_write;
355 dev->mii.reg_num_mask = 0x1f;
356
357 dev->mii.phy_id_mask = 0x1f;
358
359 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
360 dev->rx_urb_size = 24 * 64 + 16;
361 mii_nway_restart(&dev->mii);
362
363 data[0] = 0x01;
364 data[1] = 0x0F;
365 retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_THRESHOLD, data,
366 0x02, CONTROL_TIMEOUT_MS);
367
368 data[0] = 0xA0;
369 data[1] = 0x90;
370 retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_FIFO_DEPTH, data,
371 0x02, CONTROL_TIMEOUT_MS);
372
373 data[0] = 0x30;
374 data[1] = 0x00;
375 retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_PAUSE, data,
376 0x02, CONTROL_TIMEOUT_MS);
377
378 data[0] = 0x17;
379 data[1] = 0xD8;
380 retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_FLOW_CONTROL,
381 data, 0x02, CONTROL_TIMEOUT_MS);
382
383 /* Undocumented register */
384 data[0] = 0x01;
385 data[1] = 0x00;
386 retval = control_write(dev, REQUEST_WRITE, 0, 254, data, 0x02,
387 CONTROL_TIMEOUT_MS);
388
389 data[0] = 0x5F;
390 data[1] = 0x0D;
391 retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_CTRL, data, 0x02,
392 CONTROL_TIMEOUT_MS);
393
394 retval = get_mac_address(dev, dev->net->dev_addr);
395
396 return retval;
397}
398
399static const struct driver_info ch9200_info = {
400 .description = "CH9200 USB to Network Adaptor",
401 .flags = FLAG_ETHER,
402 .bind = ch9200_bind,
403 .rx_fixup = ch9200_rx_fixup,
404 .tx_fixup = ch9200_tx_fixup,
405 .status = ch9200_status,
406 .link_reset = ch9200_link_reset,
407 .reset = ch9200_link_reset,
408};
409
410static const struct usb_device_id ch9200_products[] = {
411 {
412 USB_DEVICE(0x1A86, 0xE092),
413 .driver_info = (unsigned long)&ch9200_info,
414 },
415 {},
416};
417
418MODULE_DEVICE_TABLE(usb, ch9200_products);
419
420static struct usb_driver ch9200_driver = {
421 .name = "ch9200",
422 .id_table = ch9200_products,
423 .probe = usbnet_probe,
424 .disconnect = usbnet_disconnect,
425 .suspend = usbnet_suspend,
426 .resume = usbnet_resume,
427};
428
429module_usb_driver(ch9200_driver);
430
431MODULE_DESCRIPTION("QinHeng CH9200 USB Network device");
432MODULE_LICENSE("GPL");
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index e7094fbd7568..488c6f50df73 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -193,7 +193,8 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
193 .flowi4_oif = vrf_dev->ifindex, 193 .flowi4_oif = vrf_dev->ifindex,
194 .flowi4_iif = LOOPBACK_IFINDEX, 194 .flowi4_iif = LOOPBACK_IFINDEX,
195 .flowi4_tos = RT_TOS(ip4h->tos), 195 .flowi4_tos = RT_TOS(ip4h->tos),
196 .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC, 196 .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC |
197 FLOWI_FLAG_SKIP_NH_OIF,
197 .daddr = ip4h->daddr, 198 .daddr = ip4h->daddr,
198 }; 199 };
199 200
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index cf8b7f0473b3..bbac1d35ed4e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2392,10 +2392,6 @@ static void vxlan_setup(struct net_device *dev)
2392 2392
2393 eth_hw_addr_random(dev); 2393 eth_hw_addr_random(dev);
2394 ether_setup(dev); 2394 ether_setup(dev);
2395 if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
2396 dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
2397 else
2398 dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
2399 2395
2400 dev->netdev_ops = &vxlan_netdev_ops; 2396 dev->netdev_ops = &vxlan_netdev_ops;
2401 dev->destructor = free_netdev; 2397 dev->destructor = free_netdev;
@@ -2640,8 +2636,11 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2640 dst->remote_ip.sa.sa_family = AF_INET; 2636 dst->remote_ip.sa.sa_family = AF_INET;
2641 2637
2642 if (dst->remote_ip.sa.sa_family == AF_INET6 || 2638 if (dst->remote_ip.sa.sa_family == AF_INET6 ||
2643 vxlan->cfg.saddr.sa.sa_family == AF_INET6) 2639 vxlan->cfg.saddr.sa.sa_family == AF_INET6) {
2640 if (!IS_ENABLED(CONFIG_IPV6))
2641 return -EPFNOSUPPORT;
2644 use_ipv6 = true; 2642 use_ipv6 = true;
2643 }
2645 2644
2646 if (conf->remote_ifindex) { 2645 if (conf->remote_ifindex) {
2647 struct net_device *lowerdev 2646 struct net_device *lowerdev
@@ -2670,8 +2669,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2670 2669
2671 dev->needed_headroom = lowerdev->hard_header_len + 2670 dev->needed_headroom = lowerdev->hard_header_len +
2672 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2671 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2673 } else if (use_ipv6) 2672 } else if (use_ipv6) {
2674 vxlan->flags |= VXLAN_F_IPV6; 2673 vxlan->flags |= VXLAN_F_IPV6;
2674 dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
2675 } else {
2676 dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
2677 }
2675 2678
2676 memcpy(&vxlan->cfg, conf, sizeof(*conf)); 2679 memcpy(&vxlan->cfg, conf, sizeof(*conf));
2677 if (!vxlan->cfg.dst_port) 2680 if (!vxlan->cfg.dst_port)
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 1350fa25cdb0..a87a868fed64 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -197,7 +197,8 @@ static int of_phy_match(struct device *dev, void *phy_np)
197 * of_phy_find_device - Give a PHY node, find the phy_device 197 * of_phy_find_device - Give a PHY node, find the phy_device
198 * @phy_np: Pointer to the phy's device tree node 198 * @phy_np: Pointer to the phy's device tree node
199 * 199 *
200 * Returns a pointer to the phy_device. 200 * If successful, returns a pointer to the phy_device with the embedded
201 * struct device refcount incremented by one, or NULL on failure.
201 */ 202 */
202struct phy_device *of_phy_find_device(struct device_node *phy_np) 203struct phy_device *of_phy_find_device(struct device_node *phy_np)
203{ 204{
@@ -217,7 +218,9 @@ EXPORT_SYMBOL(of_phy_find_device);
217 * @hndlr: Link state callback for the network device 218 * @hndlr: Link state callback for the network device
218 * @iface: PHY data interface type 219 * @iface: PHY data interface type
219 * 220 *
220 * Returns a pointer to the phy_device if successful. NULL otherwise 221 * If successful, returns a pointer to the phy_device with the embedded
222 * struct device refcount incremented by one, or NULL on failure. The
223 * refcount must be dropped by calling phy_disconnect() or phy_detach().
221 */ 224 */
222struct phy_device *of_phy_connect(struct net_device *dev, 225struct phy_device *of_phy_connect(struct net_device *dev,
223 struct device_node *phy_np, 226 struct device_node *phy_np,
@@ -225,13 +228,19 @@ struct phy_device *of_phy_connect(struct net_device *dev,
225 phy_interface_t iface) 228 phy_interface_t iface)
226{ 229{
227 struct phy_device *phy = of_phy_find_device(phy_np); 230 struct phy_device *phy = of_phy_find_device(phy_np);
231 int ret;
228 232
229 if (!phy) 233 if (!phy)
230 return NULL; 234 return NULL;
231 235
232 phy->dev_flags = flags; 236 phy->dev_flags = flags;
233 237
234 return phy_connect_direct(dev, phy, hndlr, iface) ? NULL : phy; 238 ret = phy_connect_direct(dev, phy, hndlr, iface);
239
240 /* refcount is held by phy_connect_direct() on success */
241 put_device(&phy->dev);
242
243 return ret ? NULL : phy;
235} 244}
236EXPORT_SYMBOL(of_phy_connect); 245EXPORT_SYMBOL(of_phy_connect);
237 246
@@ -241,17 +250,27 @@ EXPORT_SYMBOL(of_phy_connect);
241 * @phy_np: Node pointer for the PHY 250 * @phy_np: Node pointer for the PHY
242 * @flags: flags to pass to the PHY 251 * @flags: flags to pass to the PHY
243 * @iface: PHY data interface type 252 * @iface: PHY data interface type
253 *
254 * If successful, returns a pointer to the phy_device with the embedded
255 * struct device refcount incremented by one, or NULL on failure. The
256 * refcount must be dropped by calling phy_disconnect() or phy_detach().
244 */ 257 */
245struct phy_device *of_phy_attach(struct net_device *dev, 258struct phy_device *of_phy_attach(struct net_device *dev,
246 struct device_node *phy_np, u32 flags, 259 struct device_node *phy_np, u32 flags,
247 phy_interface_t iface) 260 phy_interface_t iface)
248{ 261{
249 struct phy_device *phy = of_phy_find_device(phy_np); 262 struct phy_device *phy = of_phy_find_device(phy_np);
263 int ret;
250 264
251 if (!phy) 265 if (!phy)
252 return NULL; 266 return NULL;
253 267
254 return phy_attach_direct(dev, phy, flags, iface) ? NULL : phy; 268 ret = phy_attach_direct(dev, phy, flags, iface);
269
270 /* refcount is held by phy_attach_direct() on success */
271 put_device(&phy->dev);
272
273 return ret ? NULL : phy;
255} 274}
256EXPORT_SYMBOL(of_phy_attach); 275EXPORT_SYMBOL(of_phy_attach);
257 276
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c
index 1710d9dc7fc2..2306313c0029 100644
--- a/drivers/of/of_pci_irq.c
+++ b/drivers/of/of_pci_irq.c
@@ -38,8 +38,8 @@ int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq
38 */ 38 */
39 rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); 39 rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
40 if (rc != 0) 40 if (rc != 0)
41 return rc; 41 goto err;
42 /* No pin, exit */ 42 /* No pin, exit with no error message. */
43 if (pin == 0) 43 if (pin == 0)
44 return -ENODEV; 44 return -ENODEV;
45 45
@@ -53,8 +53,10 @@ int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq
53 ppnode = pci_bus_to_OF_node(pdev->bus); 53 ppnode = pci_bus_to_OF_node(pdev->bus);
54 54
55 /* No node for host bridge ? give up */ 55 /* No node for host bridge ? give up */
56 if (ppnode == NULL) 56 if (ppnode == NULL) {
57 return -EINVAL; 57 rc = -EINVAL;
58 goto err;
59 }
58 } else { 60 } else {
59 /* We found a P2P bridge, check if it has a node */ 61 /* We found a P2P bridge, check if it has a node */
60 ppnode = pci_device_to_OF_node(ppdev); 62 ppnode = pci_device_to_OF_node(ppdev);
@@ -86,7 +88,13 @@ int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq
86 out_irq->args[0] = pin; 88 out_irq->args[0] = pin;
87 laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8)); 89 laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
88 laddr[1] = laddr[2] = cpu_to_be32(0); 90 laddr[1] = laddr[2] = cpu_to_be32(0);
89 return of_irq_parse_raw(laddr, out_irq); 91 rc = of_irq_parse_raw(laddr, out_irq);
92 if (rc)
93 goto err;
94 return 0;
95err:
96 dev_err(&pdev->dev, "of_irq_parse_pci() failed with rc=%d\n", rc);
97 return rc;
90} 98}
91EXPORT_SYMBOL_GPL(of_irq_parse_pci); 99EXPORT_SYMBOL_GPL(of_irq_parse_pci);
92 100
@@ -105,10 +113,8 @@ int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
105 int ret; 113 int ret;
106 114
107 ret = of_irq_parse_pci(dev, &oirq); 115 ret = of_irq_parse_pci(dev, &oirq);
108 if (ret) { 116 if (ret)
109 dev_err(&dev->dev, "of_irq_parse_pci() failed with rc=%d\n", ret);
110 return 0; /* Proper return code 0 == NO_IRQ */ 117 return 0; /* Proper return code 0 == NO_IRQ */
111 }
112 118
113 return irq_create_of_mapping(&oirq); 119 return irq_create_of_mapping(&oirq);
114} 120}
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index baec33c4e698..a0580afe1713 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -560,6 +560,9 @@ dino_fixup_bus(struct pci_bus *bus)
560 } else if (bus->parent) { 560 } else if (bus->parent) {
561 int i; 561 int i;
562 562
563 pci_read_bridge_bases(bus);
564
565
563 for(i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { 566 for(i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
564 if((bus->self->resource[i].flags & 567 if((bus->self->resource[i].flags &
565 (IORESOURCE_IO | IORESOURCE_MEM)) == 0) 568 (IORESOURCE_IO | IORESOURCE_MEM)) == 0)
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 7b9e89ba0465..a32c1f6c252c 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -693,6 +693,7 @@ lba_fixup_bus(struct pci_bus *bus)
693 if (bus->parent) { 693 if (bus->parent) {
694 int i; 694 int i;
695 /* PCI-PCI Bridge */ 695 /* PCI-PCI Bridge */
696 pci_read_bridge_bases(bus);
696 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) 697 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++)
697 pci_claim_bridge_resource(bus->self, i); 698 pci_claim_bridge_resource(bus->self, i);
698 } else { 699 } else {
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 769f7e35f1a2..59ac36fe7c42 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -442,7 +442,8 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = {
442static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count, 442static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
443 void *arg) 443 void *arg)
444{ 444{
445 struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn)); 445 struct pci_dev *tdev = pci_get_slot(dev->bus,
446 PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
446 ssize_t ret; 447 ssize_t ret;
447 448
448 if (!tdev) 449 if (!tdev)
@@ -456,7 +457,8 @@ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
456static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count, 457static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
457 const void *arg) 458 const void *arg)
458{ 459{
459 struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn)); 460 struct pci_dev *tdev = pci_get_slot(dev->bus,
461 PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
460 ssize_t ret; 462 ssize_t ret;
461 463
462 if (!tdev) 464 if (!tdev)
@@ -473,22 +475,6 @@ static const struct pci_vpd_ops pci_vpd_f0_ops = {
473 .release = pci_vpd_pci22_release, 475 .release = pci_vpd_pci22_release,
474}; 476};
475 477
476static int pci_vpd_f0_dev_check(struct pci_dev *dev)
477{
478 struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
479 int ret = 0;
480
481 if (!tdev)
482 return -ENODEV;
483 if (!tdev->vpd || !tdev->multifunction ||
484 dev->class != tdev->class || dev->vendor != tdev->vendor ||
485 dev->device != tdev->device)
486 ret = -ENODEV;
487
488 pci_dev_put(tdev);
489 return ret;
490}
491
492int pci_vpd_pci22_init(struct pci_dev *dev) 478int pci_vpd_pci22_init(struct pci_dev *dev)
493{ 479{
494 struct pci_vpd_pci22 *vpd; 480 struct pci_vpd_pci22 *vpd;
@@ -497,12 +483,7 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
497 cap = pci_find_capability(dev, PCI_CAP_ID_VPD); 483 cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
498 if (!cap) 484 if (!cap)
499 return -ENODEV; 485 return -ENODEV;
500 if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
501 int ret = pci_vpd_f0_dev_check(dev);
502 486
503 if (ret)
504 return ret;
505 }
506 vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC); 487 vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
507 if (!vpd) 488 if (!vpd)
508 return -ENOMEM; 489 return -ENOMEM;
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 6fbd3f2b5992..d3346d23963b 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -256,6 +256,8 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
256 256
257 res->start = start; 257 res->start = start;
258 res->end = end; 258 res->end = end;
259 res->flags &= ~IORESOURCE_UNSET;
260 orig_res.flags &= ~IORESOURCE_UNSET;
259 dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n", 261 dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
260 &orig_res, res); 262 &orig_res, res);
261 263
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 367e28fa7564..c4f64bfee551 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -362,6 +362,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
362static struct of_device_id rcar_pci_of_match[] = { 362static struct of_device_id rcar_pci_of_match[] = {
363 { .compatible = "renesas,pci-r8a7790", }, 363 { .compatible = "renesas,pci-r8a7790", },
364 { .compatible = "renesas,pci-r8a7791", }, 364 { .compatible = "renesas,pci-r8a7791", },
365 { .compatible = "renesas,pci-r8a7794", },
365 { }, 366 { },
366}; 367};
367 368
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index dd652f2ae03d..108a3118ace7 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -299,9 +299,10 @@ static long local_pci_probe(void *_ddi)
299 * Unbound PCI devices are always put in D0, regardless of 299 * Unbound PCI devices are always put in D0, regardless of
300 * runtime PM status. During probe, the device is set to 300 * runtime PM status. During probe, the device is set to
301 * active and the usage count is incremented. If the driver 301 * active and the usage count is incremented. If the driver
302 * supports runtime PM, it should call pm_runtime_put_noidle() 302 * supports runtime PM, it should call pm_runtime_put_noidle(),
303 * in its probe routine and pm_runtime_get_noresume() in its 303 * or any other runtime PM helper function decrementing the usage
304 * remove routine. 304 * count, in its probe routine and pm_runtime_get_noresume() in
305 * its remove routine.
305 */ 306 */
306 pm_runtime_get_sync(dev); 307 pm_runtime_get_sync(dev);
307 pci_dev->driver = pci_drv; 308 pci_dev->driver = pci_drv;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 0b2be174d981..8361d27e5eca 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -676,15 +676,20 @@ static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
676static void pci_set_bus_msi_domain(struct pci_bus *bus) 676static void pci_set_bus_msi_domain(struct pci_bus *bus)
677{ 677{
678 struct irq_domain *d; 678 struct irq_domain *d;
679 struct pci_bus *b;
679 680
680 /* 681 /*
681 * Either bus is the root, and we must obtain it from the 682 * The bus can be a root bus, a subordinate bus, or a virtual bus
682 * firmware, or we inherit it from the bridge device. 683 * created by an SR-IOV device. Walk up to the first bridge device
684 * found or derive the domain from the host bridge.
683 */ 685 */
684 if (pci_is_root_bus(bus)) 686 for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
685 d = pci_host_bridge_msi_domain(bus); 687 if (b->self)
686 else 688 d = dev_get_msi_domain(&b->self->dev);
687 d = dev_get_msi_domain(&bus->self->dev); 689 }
690
691 if (!d)
692 d = pci_host_bridge_msi_domain(b);
688 693
689 dev_set_msi_domain(&bus->dev, d); 694 dev_set_msi_domain(&bus->dev, d);
690} 695}
@@ -855,9 +860,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
855 child->bridge_ctl = bctl; 860 child->bridge_ctl = bctl;
856 } 861 }
857 862
858 /* Read and initialize bridge resources */
859 pci_read_bridge_bases(child);
860
861 cmax = pci_scan_child_bus(child); 863 cmax = pci_scan_child_bus(child);
862 if (cmax > subordinate) 864 if (cmax > subordinate)
863 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n", 865 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
@@ -918,9 +920,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
918 920
919 if (!is_cardbus) { 921 if (!is_cardbus) {
920 child->bridge_ctl = bctl; 922 child->bridge_ctl = bctl;
921
922 /* Read and initialize bridge resources */
923 pci_read_bridge_bases(child);
924 max = pci_scan_child_bus(child); 923 max = pci_scan_child_bus(child);
925 } else { 924 } else {
926 /* 925 /*
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 6a30252cd79f..b03373fd05ca 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1907,11 +1907,27 @@ static void quirk_netmos(struct pci_dev *dev)
1907DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, 1907DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
1908 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos); 1908 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
1909 1909
1910/*
1911 * Quirk non-zero PCI functions to route VPD access through function 0 for
1912 * devices that share VPD resources between functions. The functions are
1913 * expected to be identical devices.
1914 */
1910static void quirk_f0_vpd_link(struct pci_dev *dev) 1915static void quirk_f0_vpd_link(struct pci_dev *dev)
1911{ 1916{
1912 if (!dev->multifunction || !PCI_FUNC(dev->devfn)) 1917 struct pci_dev *f0;
1918
1919 if (!PCI_FUNC(dev->devfn))
1913 return; 1920 return;
1914 dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0; 1921
1922 f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
1923 if (!f0)
1924 return;
1925
1926 if (f0->vpd && dev->class == f0->class &&
1927 dev->vendor == f0->vendor && dev->device == f0->device)
1928 dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
1929
1930 pci_dev_put(f0);
1915} 1931}
1916DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, 1932DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
1917 PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link); 1933 PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 738adfa5332b..52ea605f8130 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -318,6 +318,7 @@ static const struct of_device_id of_anatop_regulator_match_tbl[] = {
318 { .compatible = "fsl,anatop-regulator", }, 318 { .compatible = "fsl,anatop-regulator", },
319 { /* end */ } 319 { /* end */ }
320}; 320};
321MODULE_DEVICE_TABLE(of, of_anatop_regulator_match_tbl);
321 322
322static struct platform_driver anatop_regulator_driver = { 323static struct platform_driver anatop_regulator_driver = {
323 .driver = { 324 .driver = {
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7a85ac9e32c5..7849187d91ae 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1394,15 +1394,15 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
1394 return 0; 1394 return 0;
1395 1395
1396 r = regulator_dev_lookup(dev, rdev->supply_name, &ret); 1396 r = regulator_dev_lookup(dev, rdev->supply_name, &ret);
1397 if (ret == -ENODEV) {
1398 /*
1399 * No supply was specified for this regulator and
1400 * there will never be one.
1401 */
1402 return 0;
1403 }
1404
1405 if (!r) { 1397 if (!r) {
1398 if (ret == -ENODEV) {
1399 /*
1400 * No supply was specified for this regulator and
1401 * there will never be one.
1402 */
1403 return 0;
1404 }
1405
1406 if (have_full_constraints()) { 1406 if (have_full_constraints()) {
1407 r = dummy_regulator_rdev; 1407 r = dummy_regulator_rdev;
1408 } else { 1408 } else {
@@ -1422,11 +1422,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
1422 return ret; 1422 return ret;
1423 1423
1424 /* Cascade always-on state to supply */ 1424 /* Cascade always-on state to supply */
1425 if (_regulator_is_enabled(rdev)) { 1425 if (_regulator_is_enabled(rdev) && rdev->supply) {
1426 ret = regulator_enable(rdev->supply); 1426 ret = regulator_enable(rdev->supply);
1427 if (ret < 0) { 1427 if (ret < 0) {
1428 if (rdev->supply) 1428 _regulator_put(rdev->supply);
1429 _regulator_put(rdev->supply);
1430 return ret; 1429 return ret;
1431 } 1430 }
1432 } 1431 }
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 464018de7e97..7bba8b747f30 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -394,6 +394,7 @@ static const struct of_device_id regulator_gpio_of_match[] = {
394 { .compatible = "regulator-gpio", }, 394 { .compatible = "regulator-gpio", },
395 {}, 395 {},
396}; 396};
397MODULE_DEVICE_TABLE(of, regulator_gpio_of_match);
397#endif 398#endif
398 399
399static struct platform_driver gpio_regulator_driver = { 400static struct platform_driver gpio_regulator_driver = {
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index 4fa7bcaf454e..f9d74d63be7c 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -45,6 +45,10 @@ struct pbias_regulator_data {
45 int voltage; 45 int voltage;
46}; 46};
47 47
48struct pbias_of_data {
49 unsigned int offset;
50};
51
48static const unsigned int pbias_volt_table[] = { 52static const unsigned int pbias_volt_table[] = {
49 1800000, 53 1800000,
50 3000000 54 3000000
@@ -102,8 +106,35 @@ static struct of_regulator_match pbias_matches[] = {
102}; 106};
103#define PBIAS_NUM_REGS ARRAY_SIZE(pbias_matches) 107#define PBIAS_NUM_REGS ARRAY_SIZE(pbias_matches)
104 108
109/* Offset from SCM general area (and syscon) base */
110
111static const struct pbias_of_data pbias_of_data_omap2 = {
112 .offset = 0x230,
113};
114
115static const struct pbias_of_data pbias_of_data_omap3 = {
116 .offset = 0x2b0,
117};
118
119static const struct pbias_of_data pbias_of_data_omap4 = {
120 .offset = 0x60,
121};
122
123static const struct pbias_of_data pbias_of_data_omap5 = {
124 .offset = 0x60,
125};
126
127static const struct pbias_of_data pbias_of_data_dra7 = {
128 .offset = 0xe00,
129};
130
105static const struct of_device_id pbias_of_match[] = { 131static const struct of_device_id pbias_of_match[] = {
106 { .compatible = "ti,pbias-omap", }, 132 { .compatible = "ti,pbias-omap", },
133 { .compatible = "ti,pbias-omap2", .data = &pbias_of_data_omap2, },
134 { .compatible = "ti,pbias-omap3", .data = &pbias_of_data_omap3, },
135 { .compatible = "ti,pbias-omap4", .data = &pbias_of_data_omap4, },
136 { .compatible = "ti,pbias-omap5", .data = &pbias_of_data_omap5, },
137 { .compatible = "ti,pbias-dra7", .data = &pbias_of_data_dra7, },
107 {}, 138 {},
108}; 139};
109MODULE_DEVICE_TABLE(of, pbias_of_match); 140MODULE_DEVICE_TABLE(of, pbias_of_match);
@@ -118,6 +149,9 @@ static int pbias_regulator_probe(struct platform_device *pdev)
118 const struct pbias_reg_info *info; 149 const struct pbias_reg_info *info;
119 int ret = 0; 150 int ret = 0;
120 int count, idx, data_idx = 0; 151 int count, idx, data_idx = 0;
152 const struct of_device_id *match;
153 const struct pbias_of_data *data;
154 unsigned int offset;
121 155
122 count = of_regulator_match(&pdev->dev, np, pbias_matches, 156 count = of_regulator_match(&pdev->dev, np, pbias_matches,
123 PBIAS_NUM_REGS); 157 PBIAS_NUM_REGS);
@@ -133,6 +167,20 @@ static int pbias_regulator_probe(struct platform_device *pdev)
133 if (IS_ERR(syscon)) 167 if (IS_ERR(syscon))
134 return PTR_ERR(syscon); 168 return PTR_ERR(syscon);
135 169
170 match = of_match_device(of_match_ptr(pbias_of_match), &pdev->dev);
171 if (match && match->data) {
172 data = match->data;
173 offset = data->offset;
174 } else {
175 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
176 if (!res)
177 return -EINVAL;
178
179 offset = res->start;
180 dev_WARN(&pdev->dev,
181 "using legacy dt data for pbias offset\n");
182 }
183
136 cfg.regmap = syscon; 184 cfg.regmap = syscon;
137 cfg.dev = &pdev->dev; 185 cfg.dev = &pdev->dev;
138 186
@@ -145,10 +193,6 @@ static int pbias_regulator_probe(struct platform_device *pdev)
145 if (!info) 193 if (!info)
146 return -ENODEV; 194 return -ENODEV;
147 195
148 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
149 if (!res)
150 return -EINVAL;
151
152 drvdata[data_idx].syscon = syscon; 196 drvdata[data_idx].syscon = syscon;
153 drvdata[data_idx].info = info; 197 drvdata[data_idx].info = info;
154 drvdata[data_idx].desc.name = info->name; 198 drvdata[data_idx].desc.name = info->name;
@@ -158,9 +202,9 @@ static int pbias_regulator_probe(struct platform_device *pdev)
158 drvdata[data_idx].desc.volt_table = pbias_volt_table; 202 drvdata[data_idx].desc.volt_table = pbias_volt_table;
159 drvdata[data_idx].desc.n_voltages = 2; 203 drvdata[data_idx].desc.n_voltages = 2;
160 drvdata[data_idx].desc.enable_time = info->enable_time; 204 drvdata[data_idx].desc.enable_time = info->enable_time;
161 drvdata[data_idx].desc.vsel_reg = res->start; 205 drvdata[data_idx].desc.vsel_reg = offset;
162 drvdata[data_idx].desc.vsel_mask = info->vmode; 206 drvdata[data_idx].desc.vsel_mask = info->vmode;
163 drvdata[data_idx].desc.enable_reg = res->start; 207 drvdata[data_idx].desc.enable_reg = offset;
164 drvdata[data_idx].desc.enable_mask = info->enable_mask; 208 drvdata[data_idx].desc.enable_mask = info->enable_mask;
165 drvdata[data_idx].desc.enable_val = info->enable; 209 drvdata[data_idx].desc.enable_val = info->enable;
166 drvdata[data_idx].desc.disable_val = info->disable_val; 210 drvdata[data_idx].desc.disable_val = info->disable_val;
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index 7f97223f95c5..a02c1b961039 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -73,7 +73,7 @@ static const struct regulator_linear_range dcdc4_ranges[] = {
73}; 73};
74 74
75static struct tps_info tps65218_pmic_regs[] = { 75static struct tps_info tps65218_pmic_regs[] = {
76 TPS65218_INFO(DCDC1, "DCDC1", 850000, 167500), 76 TPS65218_INFO(DCDC1, "DCDC1", 850000, 1675000),
77 TPS65218_INFO(DCDC2, "DCDC2", 850000, 1675000), 77 TPS65218_INFO(DCDC2, "DCDC2", 850000, 1675000),
78 TPS65218_INFO(DCDC3, "DCDC3", 900000, 3400000), 78 TPS65218_INFO(DCDC3, "DCDC3", 900000, 3400000),
79 TPS65218_INFO(DCDC4, "DCDC4", 1175000, 3400000), 79 TPS65218_INFO(DCDC4, "DCDC4", 1175000, 3400000),
diff --git a/drivers/regulator/vexpress.c b/drivers/regulator/vexpress.c
index bed9d3ee4198..c810cbbd463f 100644
--- a/drivers/regulator/vexpress.c
+++ b/drivers/regulator/vexpress.c
@@ -103,6 +103,7 @@ static const struct of_device_id vexpress_regulator_of_match[] = {
103 { .compatible = "arm,vexpress-volt", }, 103 { .compatible = "arm,vexpress-volt", },
104 { } 104 { }
105}; 105};
106MODULE_DEVICE_TABLE(of, vexpress_regulator_of_match);
106 107
107static struct platform_driver vexpress_regulator_driver = { 108static struct platform_driver vexpress_regulator_driver = {
108 .probe = vexpress_regulator_probe, 109 .probe = vexpress_regulator_probe,
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index cbfc5990052b..126a48c6431e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1957,7 +1957,7 @@ static int scsi_mq_prep_fn(struct request *req)
1957static void scsi_mq_done(struct scsi_cmnd *cmd) 1957static void scsi_mq_done(struct scsi_cmnd *cmd)
1958{ 1958{
1959 trace_scsi_dispatch_cmd_done(cmd); 1959 trace_scsi_dispatch_cmd_done(cmd);
1960 blk_mq_complete_request(cmd->request); 1960 blk_mq_complete_request(cmd->request, cmd->request->errors);
1961} 1961}
1962 1962
1963static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, 1963static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index bf9ed380bb1c..63318e2afba1 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1720,6 +1720,7 @@ static int atmel_spi_runtime_resume(struct device *dev)
1720 return clk_prepare_enable(as->clk); 1720 return clk_prepare_enable(as->clk);
1721} 1721}
1722 1722
1723#ifdef CONFIG_PM_SLEEP
1723static int atmel_spi_suspend(struct device *dev) 1724static int atmel_spi_suspend(struct device *dev)
1724{ 1725{
1725 struct spi_master *master = dev_get_drvdata(dev); 1726 struct spi_master *master = dev_get_drvdata(dev);
@@ -1756,6 +1757,7 @@ static int atmel_spi_resume(struct device *dev)
1756 1757
1757 return ret; 1758 return ret;
1758} 1759}
1760#endif
1759 1761
1760static const struct dev_pm_ops atmel_spi_pm_ops = { 1762static const struct dev_pm_ops atmel_spi_pm_ops = {
1761 SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume) 1763 SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume)
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index e7874a6171ec..3e8eeb23d4e9 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -386,14 +386,14 @@ static bool bcm2835_spi_can_dma(struct spi_master *master,
386 /* otherwise we only allow transfers within the same page 386 /* otherwise we only allow transfers within the same page
387 * to avoid wasting time on dma_mapping when it is not practical 387 * to avoid wasting time on dma_mapping when it is not practical
388 */ 388 */
389 if (((size_t)tfr->tx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) { 389 if (((size_t)tfr->tx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) {
390 dev_warn_once(&spi->dev, 390 dev_warn_once(&spi->dev,
391 "Unaligned spi tx-transfer bridging page\n"); 391 "Unaligned spi tx-transfer bridging page\n");
392 return false; 392 return false;
393 } 393 }
394 if (((size_t)tfr->rx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) { 394 if (((size_t)tfr->rx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) {
395 dev_warn_once(&spi->dev, 395 dev_warn_once(&spi->dev,
396 "Unaligned spi tx-transfer bridging page\n"); 396 "Unaligned spi rx-transfer bridging page\n");
397 return false; 397 return false;
398 } 398 }
399 399
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
index 5468fc70dbf8..2465259f6241 100644
--- a/drivers/spi/spi-meson-spifc.c
+++ b/drivers/spi/spi-meson-spifc.c
@@ -444,6 +444,7 @@ static const struct of_device_id meson_spifc_dt_match[] = {
444 { .compatible = "amlogic,meson6-spifc", }, 444 { .compatible = "amlogic,meson6-spifc", },
445 { }, 445 { },
446}; 446};
447MODULE_DEVICE_TABLE(of, meson_spifc_dt_match);
447 448
448static struct platform_driver meson_spifc_driver = { 449static struct platform_driver meson_spifc_driver = {
449 .probe = meson_spifc_probe, 450 .probe = meson_spifc_probe,
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 5f6315c47920..ecb6c58238c4 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -85,7 +85,7 @@ struct mtk_spi {
85 void __iomem *base; 85 void __iomem *base;
86 u32 state; 86 u32 state;
87 u32 pad_sel; 87 u32 pad_sel;
88 struct clk *spi_clk, *parent_clk; 88 struct clk *parent_clk, *sel_clk, *spi_clk;
89 struct spi_transfer *cur_transfer; 89 struct spi_transfer *cur_transfer;
90 u32 xfer_len; 90 u32 xfer_len;
91 struct scatterlist *tx_sgl, *rx_sgl; 91 struct scatterlist *tx_sgl, *rx_sgl;
@@ -173,22 +173,6 @@ static void mtk_spi_config(struct mtk_spi *mdata,
173 writel(mdata->pad_sel, mdata->base + SPI_PAD_SEL_REG); 173 writel(mdata->pad_sel, mdata->base + SPI_PAD_SEL_REG);
174} 174}
175 175
176static int mtk_spi_prepare_hardware(struct spi_master *master)
177{
178 struct spi_transfer *trans;
179 struct mtk_spi *mdata = spi_master_get_devdata(master);
180 struct spi_message *msg = master->cur_msg;
181
182 trans = list_first_entry(&msg->transfers, struct spi_transfer,
183 transfer_list);
184 if (!trans->cs_change) {
185 mdata->state = MTK_SPI_IDLE;
186 mtk_spi_reset(mdata);
187 }
188
189 return 0;
190}
191
192static int mtk_spi_prepare_message(struct spi_master *master, 176static int mtk_spi_prepare_message(struct spi_master *master,
193 struct spi_message *msg) 177 struct spi_message *msg)
194{ 178{
@@ -228,11 +212,15 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
228 struct mtk_spi *mdata = spi_master_get_devdata(spi->master); 212 struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
229 213
230 reg_val = readl(mdata->base + SPI_CMD_REG); 214 reg_val = readl(mdata->base + SPI_CMD_REG);
231 if (!enable) 215 if (!enable) {
232 reg_val |= SPI_CMD_PAUSE_EN; 216 reg_val |= SPI_CMD_PAUSE_EN;
233 else 217 writel(reg_val, mdata->base + SPI_CMD_REG);
218 } else {
234 reg_val &= ~SPI_CMD_PAUSE_EN; 219 reg_val &= ~SPI_CMD_PAUSE_EN;
235 writel(reg_val, mdata->base + SPI_CMD_REG); 220 writel(reg_val, mdata->base + SPI_CMD_REG);
221 mdata->state = MTK_SPI_IDLE;
222 mtk_spi_reset(mdata);
223 }
236} 224}
237 225
238static void mtk_spi_prepare_transfer(struct spi_master *master, 226static void mtk_spi_prepare_transfer(struct spi_master *master,
@@ -509,7 +497,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
509 master->mode_bits = SPI_CPOL | SPI_CPHA; 497 master->mode_bits = SPI_CPOL | SPI_CPHA;
510 498
511 master->set_cs = mtk_spi_set_cs; 499 master->set_cs = mtk_spi_set_cs;
512 master->prepare_transfer_hardware = mtk_spi_prepare_hardware;
513 master->prepare_message = mtk_spi_prepare_message; 500 master->prepare_message = mtk_spi_prepare_message;
514 master->transfer_one = mtk_spi_transfer_one; 501 master->transfer_one = mtk_spi_transfer_one;
515 master->can_dma = mtk_spi_can_dma; 502 master->can_dma = mtk_spi_can_dma;
@@ -576,13 +563,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
576 goto err_put_master; 563 goto err_put_master;
577 } 564 }
578 565
579 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
580 if (IS_ERR(mdata->spi_clk)) {
581 ret = PTR_ERR(mdata->spi_clk);
582 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
583 goto err_put_master;
584 }
585
586 mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk"); 566 mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
587 if (IS_ERR(mdata->parent_clk)) { 567 if (IS_ERR(mdata->parent_clk)) {
588 ret = PTR_ERR(mdata->parent_clk); 568 ret = PTR_ERR(mdata->parent_clk);
@@ -590,13 +570,27 @@ static int mtk_spi_probe(struct platform_device *pdev)
590 goto err_put_master; 570 goto err_put_master;
591 } 571 }
592 572
573 mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk");
574 if (IS_ERR(mdata->sel_clk)) {
575 ret = PTR_ERR(mdata->sel_clk);
576 dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret);
577 goto err_put_master;
578 }
579
580 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
581 if (IS_ERR(mdata->spi_clk)) {
582 ret = PTR_ERR(mdata->spi_clk);
583 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
584 goto err_put_master;
585 }
586
593 ret = clk_prepare_enable(mdata->spi_clk); 587 ret = clk_prepare_enable(mdata->spi_clk);
594 if (ret < 0) { 588 if (ret < 0) {
595 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret); 589 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
596 goto err_put_master; 590 goto err_put_master;
597 } 591 }
598 592
599 ret = clk_set_parent(mdata->spi_clk, mdata->parent_clk); 593 ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
600 if (ret < 0) { 594 if (ret < 0) {
601 dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret); 595 dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
602 goto err_disable_clk; 596 goto err_disable_clk;
@@ -630,7 +624,6 @@ static int mtk_spi_remove(struct platform_device *pdev)
630 pm_runtime_disable(&pdev->dev); 624 pm_runtime_disable(&pdev->dev);
631 625
632 mtk_spi_reset(mdata); 626 mtk_spi_reset(mdata);
633 clk_disable_unprepare(mdata->spi_clk);
634 spi_master_put(master); 627 spi_master_put(master);
635 628
636 return 0; 629 return 0;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index fdd791977041..a8ef38ebb9c9 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -654,6 +654,10 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
654 if (!(sccr1_reg & SSCR1_TIE)) 654 if (!(sccr1_reg & SSCR1_TIE))
655 mask &= ~SSSR_TFS; 655 mask &= ~SSSR_TFS;
656 656
657 /* Ignore RX timeout interrupt if it is disabled */
658 if (!(sccr1_reg & SSCR1_TINTE))
659 mask &= ~SSSR_TINT;
660
657 if (!(status & mask)) 661 if (!(status & mask))
658 return IRQ_NONE; 662 return IRQ_NONE;
659 663
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
index 2e32ea2f194f..be6155cba9de 100644
--- a/drivers/spi/spi-xtensa-xtfpga.c
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -34,13 +34,13 @@ struct xtfpga_spi {
34static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi, 34static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi,
35 unsigned addr, u32 val) 35 unsigned addr, u32 val)
36{ 36{
37 iowrite32(val, spi->regs + addr); 37 __raw_writel(val, spi->regs + addr);
38} 38}
39 39
40static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi, 40static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi,
41 unsigned addr) 41 unsigned addr)
42{ 42{
43 return ioread32(spi->regs + addr); 43 return __raw_readl(spi->regs + addr);
44} 44}
45 45
46static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi) 46static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 3abb3903f2ad..a5f53de813d3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1610,8 +1610,7 @@ static struct class spi_master_class = {
1610 * 1610 *
1611 * The caller is responsible for assigning the bus number and initializing 1611 * The caller is responsible for assigning the bus number and initializing
1612 * the master's methods before calling spi_register_master(); and (after errors 1612 * the master's methods before calling spi_register_master(); and (after errors
1613 * adding the device) calling spi_master_put() and kfree() to prevent a memory 1613 * adding the device) calling spi_master_put() to prevent a memory leak.
1614 * leak.
1615 */ 1614 */
1616struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 1615struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1617{ 1616{
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index fba92a526531..ef008e52f953 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -651,7 +651,8 @@ static int spidev_release(struct inode *inode, struct file *filp)
651 kfree(spidev->rx_buffer); 651 kfree(spidev->rx_buffer);
652 spidev->rx_buffer = NULL; 652 spidev->rx_buffer = NULL;
653 653
654 spidev->speed_hz = spidev->spi->max_speed_hz; 654 if (spidev->spi)
655 spidev->speed_hz = spidev->spi->max_speed_hz;
655 656
656 /* ... after we unbound from the underlying device? */ 657 /* ... after we unbound from the underlying device? */
657 spin_lock_irq(&spidev->spi_lock); 658 spin_lock_irq(&spidev->spi_lock);
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index 20288fc53946..8f3ac37bfe12 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -5,5 +5,25 @@ TODO:
5 - add proper arch dependencies as needed 5 - add proper arch dependencies as needed
6 - audit userspace interfaces to make sure they are sane 6 - audit userspace interfaces to make sure they are sane
7 7
8
9ion/
10 - Remove ION_IOC_SYNC: Flushing for devices should be purely a kernel internal
11 interface on top of dma-buf. flush_for_device needs to be added to dma-buf
12 first.
13 - Remove ION_IOC_CUSTOM: Atm used for cache flushing for cpu access in some
14 vendor trees. Should be replaced with an ioctl on the dma-buf to expose the
15 begin/end_cpu_access hooks to userspace.
16 - Clarify the tricks ion plays with explicitly managing coherency behind the
17 dma api's back (this is absolutely needed for high-perf gpu drivers): Add an
18 explicit coherency management mode to flush_for_device to be used by drivers
19 which want to manage caches themselves and which indicates whether cpu caches
20 need flushing.
21 - With those removed there's probably no use for ION_IOC_IMPORT anymore either
22 since ion would just be the central allocator for shared buffers.
23 - Add dt-binding to expose cma regions as ion heaps, with the rule that any
24 such cma regions must already be used by some device for dma. I.e. ion only
25 exposes existing cma regions and doesn't reserve unecessarily memory when
26 booting a system which doesn't use ion.
27
8Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc: 28Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
9Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com> 29Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com>
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 217aa537c4eb..6e8d8392ca38 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1179,13 +1179,13 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1179 mutex_unlock(&client->lock); 1179 mutex_unlock(&client->lock);
1180 goto end; 1180 goto end;
1181 } 1181 }
1182 mutex_unlock(&client->lock);
1183 1182
1184 handle = ion_handle_create(client, buffer); 1183 handle = ion_handle_create(client, buffer);
1185 if (IS_ERR(handle)) 1184 if (IS_ERR(handle)) {
1185 mutex_unlock(&client->lock);
1186 goto end; 1186 goto end;
1187 }
1187 1188
1188 mutex_lock(&client->lock);
1189 ret = ion_handle_add(client, handle); 1189 ret = ion_handle_add(client, handle);
1190 mutex_unlock(&client->lock); 1190 mutex_unlock(&client->lock);
1191 if (ret) { 1191 if (ret) {
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
index 32f3a9d921d6..5cafa50d1fac 100644
--- a/drivers/staging/fbtft/fb_uc1611.c
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -76,7 +76,7 @@ static int init_display(struct fbtft_par *par)
76 76
77 /* Set CS active high */ 77 /* Set CS active high */
78 par->spi->mode |= SPI_CS_HIGH; 78 par->spi->mode |= SPI_CS_HIGH;
79 ret = par->spi->master->setup(par->spi); 79 ret = spi_setup(par->spi);
80 if (ret) { 80 if (ret) {
81 dev_err(par->info->device, "Could not set SPI_CS_HIGH\n"); 81 dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
82 return ret; 82 return ret;
diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
index 88fb2c0132d5..8eae6ef25846 100644
--- a/drivers/staging/fbtft/fb_watterott.c
+++ b/drivers/staging/fbtft/fb_watterott.c
@@ -169,7 +169,7 @@ static int init_display(struct fbtft_par *par)
169 /* enable SPI interface by having CS and MOSI low during reset */ 169 /* enable SPI interface by having CS and MOSI low during reset */
170 save_mode = par->spi->mode; 170 save_mode = par->spi->mode;
171 par->spi->mode |= SPI_CS_HIGH; 171 par->spi->mode |= SPI_CS_HIGH;
172 ret = par->spi->master->setup(par->spi); /* set CS inactive low */ 172 ret = spi_setup(par->spi); /* set CS inactive low */
173 if (ret) { 173 if (ret) {
174 dev_err(par->info->device, "Could not set SPI_CS_HIGH\n"); 174 dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
175 return ret; 175 return ret;
@@ -180,7 +180,7 @@ static int init_display(struct fbtft_par *par)
180 par->fbtftops.reset(par); 180 par->fbtftops.reset(par);
181 mdelay(1000); 181 mdelay(1000);
182 par->spi->mode = save_mode; 182 par->spi->mode = save_mode;
183 ret = par->spi->master->setup(par->spi); 183 ret = spi_setup(par->spi);
184 if (ret) { 184 if (ret) {
185 dev_err(par->info->device, "Could not restore SPI mode\n"); 185 dev_err(par->info->device, "Could not restore SPI mode\n");
186 return ret; 186 return ret;
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 23392eb6799e..7f5fa3d1cab0 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -1436,15 +1436,11 @@ int fbtft_probe_common(struct fbtft_display *display,
1436 1436
1437 /* 9-bit SPI setup */ 1437 /* 9-bit SPI setup */
1438 if (par->spi && display->buswidth == 9) { 1438 if (par->spi && display->buswidth == 9) {
1439 par->spi->bits_per_word = 9; 1439 if (par->spi->master->bits_per_word_mask & SPI_BPW_MASK(9)) {
1440 ret = par->spi->master->setup(par->spi); 1440 par->spi->bits_per_word = 9;
1441 if (ret) { 1441 } else {
1442 dev_warn(&par->spi->dev, 1442 dev_warn(&par->spi->dev,
1443 "9-bit SPI not available, emulating using 8-bit.\n"); 1443 "9-bit SPI not available, emulating using 8-bit.\n");
1444 par->spi->bits_per_word = 8;
1445 ret = par->spi->master->setup(par->spi);
1446 if (ret)
1447 goto out_release;
1448 /* allocate buffer with room for dc bits */ 1444 /* allocate buffer with room for dc bits */
1449 par->extra = devm_kzalloc(par->info->device, 1445 par->extra = devm_kzalloc(par->info->device,
1450 par->txbuf.len + (par->txbuf.len / 8) + 8, 1446 par->txbuf.len + (par->txbuf.len / 8) + 8,
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
index c763efc5de7d..3f380a0086c3 100644
--- a/drivers/staging/fbtft/flexfb.c
+++ b/drivers/staging/fbtft/flexfb.c
@@ -463,15 +463,12 @@ static int flexfb_probe_common(struct spi_device *sdev,
463 } 463 }
464 par->fbtftops.write_register = fbtft_write_reg8_bus9; 464 par->fbtftops.write_register = fbtft_write_reg8_bus9;
465 par->fbtftops.write_vmem = fbtft_write_vmem16_bus9; 465 par->fbtftops.write_vmem = fbtft_write_vmem16_bus9;
466 sdev->bits_per_word = 9; 466 if (par->spi->master->bits_per_word_mask
467 ret = sdev->master->setup(sdev); 467 & SPI_BPW_MASK(9)) {
468 if (ret) { 468 par->spi->bits_per_word = 9;
469 } else {
469 dev_warn(dev, 470 dev_warn(dev,
470 "9-bit SPI not available, emulating using 8-bit.\n"); 471 "9-bit SPI not available, emulating using 8-bit.\n");
471 sdev->bits_per_word = 8;
472 ret = sdev->master->setup(sdev);
473 if (ret)
474 goto out_release;
475 /* allocate buffer with room for dc bits */ 472 /* allocate buffer with room for dc bits */
476 par->extra = devm_kzalloc(par->info->device, 473 par->extra = devm_kzalloc(par->info->device,
477 par->txbuf.len + (par->txbuf.len / 8) + 8, 474 par->txbuf.len + (par->txbuf.len / 8) + 8,
diff --git a/drivers/staging/lustre/README.txt b/drivers/staging/lustre/README.txt
index cf0ca50ff83b..0676243eea9e 100644
--- a/drivers/staging/lustre/README.txt
+++ b/drivers/staging/lustre/README.txt
@@ -14,10 +14,8 @@ Unlike shared disk storage cluster filesystems (e.g. OCFS2, GFS, GPFS),
14Lustre has independent Metadata and Data servers that clients can access 14Lustre has independent Metadata and Data servers that clients can access
15in parallel to maximize performance. 15in parallel to maximize performance.
16 16
17In order to use Lustre client you will need to download lustre client 17In order to use Lustre client you will need to download the "lustre-client"
18tools from 18package that contains the userspace tools from http://lustre.org/download/
19https://downloads.hpdd.intel.com/public/lustre/latest-feature-release/
20the package name is lustre-client.
21 19
22You will need to install and configure your Lustre servers separately. 20You will need to install and configure your Lustre servers separately.
23 21
@@ -76,12 +74,10 @@ Mount Options
76 74
77More Information 75More Information
78================ 76================
79You can get more information at 77You can get more information at the Lustre website: http://wiki.lustre.org/
80OpenSFS website: http://lustre.opensfs.org/about/
81Intel HPDD wiki: https://wiki.hpdd.intel.com
82 78
83Out of tree Lustre client and server code is available at: 79Source for the userspace tools and out-of-tree client and server code
84http://git.whamcloud.com/fs/lustre-release.git 80is available at: http://git.hpdd.intel.com/fs/lustre-release.git
85 81
86Latest binary packages: 82Latest binary packages:
87http://lustre.opensfs.org/download-lustre/ 83http://lustre.org/download/
diff --git a/drivers/staging/most/Kconfig b/drivers/staging/most/Kconfig
index d50de03de7b9..0b9b9b539f70 100644
--- a/drivers/staging/most/Kconfig
+++ b/drivers/staging/most/Kconfig
@@ -1,5 +1,6 @@
1menuconfig MOST 1menuconfig MOST
2 tristate "MOST driver" 2 tristate "MOST driver"
3 depends on HAS_DMA
3 select MOSTCORE 4 select MOSTCORE
4 default n 5 default n
5 ---help--- 6 ---help---
diff --git a/drivers/staging/most/hdm-dim2/Kconfig b/drivers/staging/most/hdm-dim2/Kconfig
index 1d4ad1d67758..fc548769479b 100644
--- a/drivers/staging/most/hdm-dim2/Kconfig
+++ b/drivers/staging/most/hdm-dim2/Kconfig
@@ -5,6 +5,7 @@
5config HDM_DIM2 5config HDM_DIM2
6 tristate "DIM2 HDM" 6 tristate "DIM2 HDM"
7 depends on AIM_NETWORK 7 depends on AIM_NETWORK
8 depends on HAS_IOMEM
8 9
9 ---help--- 10 ---help---
10 Say Y here if you want to connect via MediaLB to network transceiver. 11 Say Y here if you want to connect via MediaLB to network transceiver.
diff --git a/drivers/staging/most/hdm-usb/Kconfig b/drivers/staging/most/hdm-usb/Kconfig
index a482c3fdf34b..ec1546312ee6 100644
--- a/drivers/staging/most/hdm-usb/Kconfig
+++ b/drivers/staging/most/hdm-usb/Kconfig
@@ -4,7 +4,7 @@
4 4
5config HDM_USB 5config HDM_USB
6 tristate "USB HDM" 6 tristate "USB HDM"
7 depends on USB 7 depends on USB && NET
8 select AIM_NETWORK 8 select AIM_NETWORK
9 ---help--- 9 ---help---
10 Say Y here if you want to connect via USB to network tranceiver. 10 Say Y here if you want to connect via USB to network tranceiver.
diff --git a/drivers/staging/most/mostcore/Kconfig b/drivers/staging/most/mostcore/Kconfig
index 38abf1b21b66..47172546d728 100644
--- a/drivers/staging/most/mostcore/Kconfig
+++ b/drivers/staging/most/mostcore/Kconfig
@@ -4,6 +4,7 @@
4 4
5config MOSTCORE 5config MOSTCORE
6 tristate "MOST Core" 6 tristate "MOST Core"
7 depends on HAS_DMA
7 8
8 ---help--- 9 ---help---
9 Say Y here if you want to enable MOST support. 10 Say Y here if you want to enable MOST support.
diff --git a/drivers/staging/unisys/visorbus/Makefile b/drivers/staging/unisys/visorbus/Makefile
index fa27ee5f336c..fc790e7592fc 100644
--- a/drivers/staging/unisys/visorbus/Makefile
+++ b/drivers/staging/unisys/visorbus/Makefile
@@ -10,4 +10,3 @@ visorbus-y += visorchipset.o
10visorbus-y += periodic_work.o 10visorbus-y += periodic_work.o
11 11
12ccflags-y += -Idrivers/staging/unisys/include 12ccflags-y += -Idrivers/staging/unisys/include
13ccflags-y += -Idrivers/staging/unisys/visorutil
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index 2309f5f2b238..a272b48bab28 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -37,6 +37,8 @@ static int visorbus_debugref;
37#define POLLJIFFIES_TESTWORK 100 37#define POLLJIFFIES_TESTWORK 100
38#define POLLJIFFIES_NORMALCHANNEL 10 38#define POLLJIFFIES_NORMALCHANNEL 10
39 39
40static int busreg_rc = -ENODEV; /* stores the result from bus registration */
41
40static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env); 42static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env);
41static int visorbus_match(struct device *xdev, struct device_driver *xdrv); 43static int visorbus_match(struct device *xdev, struct device_driver *xdrv);
42static void fix_vbus_dev_info(struct visor_device *visordev); 44static void fix_vbus_dev_info(struct visor_device *visordev);
@@ -863,6 +865,9 @@ int visorbus_register_visor_driver(struct visor_driver *drv)
863{ 865{
864 int rc = 0; 866 int rc = 0;
865 867
868 if (busreg_rc < 0)
869 return -ENODEV; /*can't register on a nonexistent bus*/
870
866 drv->driver.name = drv->name; 871 drv->driver.name = drv->name;
867 drv->driver.bus = &visorbus_type; 872 drv->driver.bus = &visorbus_type;
868 drv->driver.probe = visordriver_probe_device; 873 drv->driver.probe = visordriver_probe_device;
@@ -885,6 +890,8 @@ int visorbus_register_visor_driver(struct visor_driver *drv)
885 if (rc < 0) 890 if (rc < 0)
886 return rc; 891 return rc;
887 rc = register_driver_attributes(drv); 892 rc = register_driver_attributes(drv);
893 if (rc < 0)
894 driver_unregister(&drv->driver);
888 return rc; 895 return rc;
889} 896}
890EXPORT_SYMBOL_GPL(visorbus_register_visor_driver); 897EXPORT_SYMBOL_GPL(visorbus_register_visor_driver);
@@ -1260,10 +1267,8 @@ remove_bus_instance(struct visor_device *dev)
1260static int 1267static int
1261create_bus_type(void) 1268create_bus_type(void)
1262{ 1269{
1263 int rc = 0; 1270 busreg_rc = bus_register(&visorbus_type);
1264 1271 return busreg_rc;
1265 rc = bus_register(&visorbus_type);
1266 return rc;
1267} 1272}
1268 1273
1269/** Remove the one-and-only one instance of the visor bus type (visorbus_type). 1274/** Remove the one-and-only one instance of the visor bus type (visorbus_type).
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 8c9da7ea7845..9d3c1e282062 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1189,16 +1189,16 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
1189 spin_lock_irqsave(&devdata->priv_lock, flags); 1189 spin_lock_irqsave(&devdata->priv_lock, flags);
1190 atomic_dec(&devdata->num_rcvbuf_in_iovm); 1190 atomic_dec(&devdata->num_rcvbuf_in_iovm);
1191 1191
1192 /* update rcv stats - call it with priv_lock held */
1193 devdata->net_stats.rx_packets++;
1194 devdata->net_stats.rx_bytes = skb->len;
1195
1196 /* set length to how much was ACTUALLY received - 1192 /* set length to how much was ACTUALLY received -
1197 * NOTE: rcv_done_len includes actual length of data rcvd 1193 * NOTE: rcv_done_len includes actual length of data rcvd
1198 * including ethhdr 1194 * including ethhdr
1199 */ 1195 */
1200 skb->len = cmdrsp->net.rcv.rcv_done_len; 1196 skb->len = cmdrsp->net.rcv.rcv_done_len;
1201 1197
1198 /* update rcv stats - call it with priv_lock held */
1199 devdata->net_stats.rx_packets++;
1200 devdata->net_stats.rx_bytes += skb->len;
1201
1202 /* test enabled while holding lock */ 1202 /* test enabled while holding lock */
1203 if (!(devdata->enabled && devdata->enab_dis_acked)) { 1203 if (!(devdata->enabled && devdata->enab_dis_acked)) {
1204 /* don't process it unless we're in enable mode and until 1204 /* don't process it unless we're in enable mode and until
@@ -1924,13 +1924,16 @@ static int visornic_probe(struct visor_device *dev)
1924 "%s debugfs_create_dir %s failed\n", 1924 "%s debugfs_create_dir %s failed\n",
1925 __func__, netdev->name); 1925 __func__, netdev->name);
1926 err = -ENOMEM; 1926 err = -ENOMEM;
1927 goto cleanup_xmit_cmdrsp; 1927 goto cleanup_register_netdev;
1928 } 1928 }
1929 1929
1930 dev_info(&dev->device, "%s success netdev=%s\n", 1930 dev_info(&dev->device, "%s success netdev=%s\n",
1931 __func__, netdev->name); 1931 __func__, netdev->name);
1932 return 0; 1932 return 0;
1933 1933
1934cleanup_register_netdev:
1935 unregister_netdev(netdev);
1936
1934cleanup_napi_add: 1937cleanup_napi_add:
1935 del_timer_sync(&devdata->irq_poll_timer); 1938 del_timer_sync(&devdata->irq_poll_timer);
1936 netif_napi_del(&devdata->napi); 1939 netif_napi_del(&devdata->napi);
@@ -2128,8 +2131,9 @@ static int visornic_init(void)
2128 if (!dev_num_pool) 2131 if (!dev_num_pool)
2129 goto cleanup_workqueue; 2132 goto cleanup_workqueue;
2130 2133
2131 visorbus_register_visor_driver(&visornic_driver); 2134 err = visorbus_register_visor_driver(&visornic_driver);
2132 return 0; 2135 if (!err)
2136 return 0;
2133 2137
2134cleanup_workqueue: 2138cleanup_workqueue:
2135 if (visornic_timeout_reset_workqueue) { 2139 if (visornic_timeout_reset_workqueue) {
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index e8a52f7d6204..51d1734d5390 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -407,6 +407,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
407 TYPERANGE_UTF8, USE_INITIAL_ONLY); 407 TYPERANGE_UTF8, USE_INITIAL_ONLY);
408 if (!param) 408 if (!param)
409 goto out; 409 goto out;
410
410 /* 411 /*
411 * Extra parameters for ISER from RFC-5046 412 * Extra parameters for ISER from RFC-5046
412 */ 413 */
@@ -496,9 +497,9 @@ int iscsi_set_keys_to_negotiate(
496 } else if (!strcmp(param->name, SESSIONTYPE)) { 497 } else if (!strcmp(param->name, SESSIONTYPE)) {
497 SET_PSTATE_NEGOTIATE(param); 498 SET_PSTATE_NEGOTIATE(param);
498 } else if (!strcmp(param->name, IFMARKER)) { 499 } else if (!strcmp(param->name, IFMARKER)) {
499 SET_PSTATE_NEGOTIATE(param); 500 SET_PSTATE_REJECT(param);
500 } else if (!strcmp(param->name, OFMARKER)) { 501 } else if (!strcmp(param->name, OFMARKER)) {
501 SET_PSTATE_NEGOTIATE(param); 502 SET_PSTATE_REJECT(param);
502 } else if (!strcmp(param->name, IFMARKINT)) { 503 } else if (!strcmp(param->name, IFMARKINT)) {
503 SET_PSTATE_REJECT(param); 504 SET_PSTATE_REJECT(param);
504 } else if (!strcmp(param->name, OFMARKINT)) { 505 } else if (!strcmp(param->name, OFMARKINT)) {
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index dcc424ac35d4..88ea4e4f124b 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -62,22 +62,13 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
62 struct se_session *se_sess = se_cmd->se_sess; 62 struct se_session *se_sess = se_cmd->se_sess;
63 struct se_node_acl *nacl = se_sess->se_node_acl; 63 struct se_node_acl *nacl = se_sess->se_node_acl;
64 struct se_dev_entry *deve; 64 struct se_dev_entry *deve;
65 sense_reason_t ret = TCM_NO_SENSE;
65 66
66 rcu_read_lock(); 67 rcu_read_lock();
67 deve = target_nacl_find_deve(nacl, unpacked_lun); 68 deve = target_nacl_find_deve(nacl, unpacked_lun);
68 if (deve) { 69 if (deve) {
69 atomic_long_inc(&deve->total_cmds); 70 atomic_long_inc(&deve->total_cmds);
70 71
71 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
72 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
73 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
74 " Access for 0x%08llx\n",
75 se_cmd->se_tfo->get_fabric_name(),
76 unpacked_lun);
77 rcu_read_unlock();
78 return TCM_WRITE_PROTECTED;
79 }
80
81 if (se_cmd->data_direction == DMA_TO_DEVICE) 72 if (se_cmd->data_direction == DMA_TO_DEVICE)
82 atomic_long_add(se_cmd->data_length, 73 atomic_long_add(se_cmd->data_length,
83 &deve->write_bytes); 74 &deve->write_bytes);
@@ -93,6 +84,17 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
93 84
94 percpu_ref_get(&se_lun->lun_ref); 85 percpu_ref_get(&se_lun->lun_ref);
95 se_cmd->lun_ref_active = true; 86 se_cmd->lun_ref_active = true;
87
88 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
89 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
90 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
91 " Access for 0x%08llx\n",
92 se_cmd->se_tfo->get_fabric_name(),
93 unpacked_lun);
94 rcu_read_unlock();
95 ret = TCM_WRITE_PROTECTED;
96 goto ref_dev;
97 }
96 } 98 }
97 rcu_read_unlock(); 99 rcu_read_unlock();
98 100
@@ -109,12 +111,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
109 unpacked_lun); 111 unpacked_lun);
110 return TCM_NON_EXISTENT_LUN; 112 return TCM_NON_EXISTENT_LUN;
111 } 113 }
112 /*
113 * Force WRITE PROTECT for virtual LUN 0
114 */
115 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
116 (se_cmd->data_direction != DMA_NONE))
117 return TCM_WRITE_PROTECTED;
118 114
119 se_lun = se_sess->se_tpg->tpg_virt_lun0; 115 se_lun = se_sess->se_tpg->tpg_virt_lun0;
120 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; 116 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
@@ -123,6 +119,15 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
123 119
124 percpu_ref_get(&se_lun->lun_ref); 120 percpu_ref_get(&se_lun->lun_ref);
125 se_cmd->lun_ref_active = true; 121 se_cmd->lun_ref_active = true;
122
123 /*
124 * Force WRITE PROTECT for virtual LUN 0
125 */
126 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
127 (se_cmd->data_direction != DMA_NONE)) {
128 ret = TCM_WRITE_PROTECTED;
129 goto ref_dev;
130 }
126 } 131 }
127 /* 132 /*
128 * RCU reference protected by percpu se_lun->lun_ref taken above that 133 * RCU reference protected by percpu se_lun->lun_ref taken above that
@@ -130,6 +135,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
130 * pointer can be kfree_rcu() by the final se_lun->lun_group put via 135 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
131 * target_core_fabric_configfs.c:target_fabric_port_release 136 * target_core_fabric_configfs.c:target_fabric_port_release
132 */ 137 */
138ref_dev:
133 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 139 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
134 atomic_long_inc(&se_cmd->se_dev->num_cmds); 140 atomic_long_inc(&se_cmd->se_dev->num_cmds);
135 141
@@ -140,7 +146,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
140 atomic_long_add(se_cmd->data_length, 146 atomic_long_add(se_cmd->data_length,
141 &se_cmd->se_dev->read_bytes); 147 &se_cmd->se_dev->read_bytes);
142 148
143 return 0; 149 return ret;
144} 150}
145EXPORT_SYMBOL(transport_lookup_cmd_lun); 151EXPORT_SYMBOL(transport_lookup_cmd_lun);
146 152
@@ -427,8 +433,6 @@ void core_disable_device_list_for_node(
427 433
428 hlist_del_rcu(&orig->link); 434 hlist_del_rcu(&orig->link);
429 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); 435 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
430 rcu_assign_pointer(orig->se_lun, NULL);
431 rcu_assign_pointer(orig->se_lun_acl, NULL);
432 orig->lun_flags = 0; 436 orig->lun_flags = 0;
433 orig->creation_time = 0; 437 orig->creation_time = 0;
434 orig->attach_count--; 438 orig->attach_count--;
@@ -439,6 +443,9 @@ void core_disable_device_list_for_node(
439 kref_put(&orig->pr_kref, target_pr_kref_release); 443 kref_put(&orig->pr_kref, target_pr_kref_release);
440 wait_for_completion(&orig->pr_comp); 444 wait_for_completion(&orig->pr_comp);
441 445
446 rcu_assign_pointer(orig->se_lun, NULL);
447 rcu_assign_pointer(orig->se_lun_acl, NULL);
448
442 kfree_rcu(orig, rcu_head); 449 kfree_rcu(orig, rcu_head);
443 450
444 core_scsi3_free_pr_reg_from_nacl(dev, nacl); 451 core_scsi3_free_pr_reg_from_nacl(dev, nacl);
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 9522960c7fdd..22390e0e046c 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -187,5 +187,5 @@ core_delete_hba(struct se_hba *hba)
187 187
188bool target_sense_desc_format(struct se_device *dev) 188bool target_sense_desc_format(struct se_device *dev)
189{ 189{
190 return dev->transport->get_blocks(dev) > U32_MAX; 190 return (dev) ? dev->transport->get_blocks(dev) > U32_MAX : false;
191} 191}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 5a9982f5d5d6..0f19e11acac2 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -105,6 +105,8 @@ static int iblock_configure_device(struct se_device *dev)
105 mode = FMODE_READ|FMODE_EXCL; 105 mode = FMODE_READ|FMODE_EXCL;
106 if (!ib_dev->ibd_readonly) 106 if (!ib_dev->ibd_readonly)
107 mode |= FMODE_WRITE; 107 mode |= FMODE_WRITE;
108 else
109 dev->dev_flags |= DF_READ_ONLY;
108 110
109 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); 111 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
110 if (IS_ERR(bd)) { 112 if (IS_ERR(bd)) {
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 5ab7100de17e..e7933115087a 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -618,7 +618,7 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
618 struct se_device *dev, 618 struct se_device *dev,
619 struct se_node_acl *nacl, 619 struct se_node_acl *nacl,
620 struct se_lun *lun, 620 struct se_lun *lun,
621 struct se_dev_entry *deve, 621 struct se_dev_entry *dest_deve,
622 u64 mapped_lun, 622 u64 mapped_lun,
623 unsigned char *isid, 623 unsigned char *isid,
624 u64 sa_res_key, 624 u64 sa_res_key,
@@ -640,7 +640,29 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
640 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list); 640 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
641 atomic_set(&pr_reg->pr_res_holders, 0); 641 atomic_set(&pr_reg->pr_res_holders, 0);
642 pr_reg->pr_reg_nacl = nacl; 642 pr_reg->pr_reg_nacl = nacl;
643 pr_reg->pr_reg_deve = deve; 643 /*
644 * For destination registrations for ALL_TG_PT=1 and SPEC_I_PT=1,
645 * the se_dev_entry->pr_ref will have been already obtained by
646 * core_get_se_deve_from_rtpi() or __core_scsi3_alloc_registration().
647 *
648 * Otherwise, locate se_dev_entry now and obtain a reference until
649 * registration completes in __core_scsi3_add_registration().
650 */
651 if (dest_deve) {
652 pr_reg->pr_reg_deve = dest_deve;
653 } else {
654 rcu_read_lock();
655 pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun);
656 if (!pr_reg->pr_reg_deve) {
657 rcu_read_unlock();
658 pr_err("Unable to locate PR deve %s mapped_lun: %llu\n",
659 nacl->initiatorname, mapped_lun);
660 kmem_cache_free(t10_pr_reg_cache, pr_reg);
661 return NULL;
662 }
663 kref_get(&pr_reg->pr_reg_deve->pr_kref);
664 rcu_read_unlock();
665 }
644 pr_reg->pr_res_mapped_lun = mapped_lun; 666 pr_reg->pr_res_mapped_lun = mapped_lun;
645 pr_reg->pr_aptpl_target_lun = lun->unpacked_lun; 667 pr_reg->pr_aptpl_target_lun = lun->unpacked_lun;
646 pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi; 668 pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
@@ -936,17 +958,29 @@ static int __core_scsi3_check_aptpl_registration(
936 !(strcmp(pr_reg->pr_tport, t_port)) && 958 !(strcmp(pr_reg->pr_tport, t_port)) &&
937 (pr_reg->pr_reg_tpgt == tpgt) && 959 (pr_reg->pr_reg_tpgt == tpgt) &&
938 (pr_reg->pr_aptpl_target_lun == target_lun)) { 960 (pr_reg->pr_aptpl_target_lun == target_lun)) {
961 /*
962 * Obtain the ->pr_reg_deve pointer + reference, that
963 * is released by __core_scsi3_add_registration() below.
964 */
965 rcu_read_lock();
966 pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun);
967 if (!pr_reg->pr_reg_deve) {
968 pr_err("Unable to locate PR APTPL %s mapped_lun:"
969 " %llu\n", nacl->initiatorname, mapped_lun);
970 rcu_read_unlock();
971 continue;
972 }
973 kref_get(&pr_reg->pr_reg_deve->pr_kref);
974 rcu_read_unlock();
939 975
940 pr_reg->pr_reg_nacl = nacl; 976 pr_reg->pr_reg_nacl = nacl;
941 pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi; 977 pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
942
943 list_del(&pr_reg->pr_reg_aptpl_list); 978 list_del(&pr_reg->pr_reg_aptpl_list);
944 spin_unlock(&pr_tmpl->aptpl_reg_lock); 979 spin_unlock(&pr_tmpl->aptpl_reg_lock);
945 /* 980 /*
946 * At this point all of the pointers in *pr_reg will 981 * At this point all of the pointers in *pr_reg will
947 * be setup, so go ahead and add the registration. 982 * be setup, so go ahead and add the registration.
948 */ 983 */
949
950 __core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0); 984 __core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0);
951 /* 985 /*
952 * If this registration is the reservation holder, 986 * If this registration is the reservation holder,
@@ -1044,18 +1078,11 @@ static void __core_scsi3_add_registration(
1044 1078
1045 __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type); 1079 __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
1046 spin_unlock(&pr_tmpl->registration_lock); 1080 spin_unlock(&pr_tmpl->registration_lock);
1047
1048 rcu_read_lock();
1049 deve = pr_reg->pr_reg_deve;
1050 if (deve)
1051 set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
1052 rcu_read_unlock();
1053
1054 /* 1081 /*
1055 * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE. 1082 * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
1056 */ 1083 */
1057 if (!pr_reg->pr_reg_all_tg_pt || register_move) 1084 if (!pr_reg->pr_reg_all_tg_pt || register_move)
1058 return; 1085 goto out;
1059 /* 1086 /*
1060 * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1 1087 * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
1061 * allocated in __core_scsi3_alloc_registration() 1088 * allocated in __core_scsi3_alloc_registration()
@@ -1075,19 +1102,31 @@ static void __core_scsi3_add_registration(
1075 __core_scsi3_dump_registration(tfo, dev, nacl_tmp, pr_reg_tmp, 1102 __core_scsi3_dump_registration(tfo, dev, nacl_tmp, pr_reg_tmp,
1076 register_type); 1103 register_type);
1077 spin_unlock(&pr_tmpl->registration_lock); 1104 spin_unlock(&pr_tmpl->registration_lock);
1078 1105 /*
1106 * Drop configfs group dependency reference and deve->pr_kref
1107 * obtained from __core_scsi3_alloc_registration() code.
1108 */
1079 rcu_read_lock(); 1109 rcu_read_lock();
1080 deve = pr_reg_tmp->pr_reg_deve; 1110 deve = pr_reg_tmp->pr_reg_deve;
1081 if (deve) 1111 if (deve) {
1082 set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags); 1112 set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
1113 core_scsi3_lunacl_undepend_item(deve);
1114 pr_reg_tmp->pr_reg_deve = NULL;
1115 }
1083 rcu_read_unlock(); 1116 rcu_read_unlock();
1084
1085 /*
1086 * Drop configfs group dependency reference from
1087 * __core_scsi3_alloc_registration()
1088 */
1089 core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
1090 } 1117 }
1118out:
1119 /*
1120 * Drop deve->pr_kref obtained in __core_scsi3_do_alloc_registration()
1121 */
1122 rcu_read_lock();
1123 deve = pr_reg->pr_reg_deve;
1124 if (deve) {
1125 set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
1126 kref_put(&deve->pr_kref, target_pr_kref_release);
1127 pr_reg->pr_reg_deve = NULL;
1128 }
1129 rcu_read_unlock();
1091} 1130}
1092 1131
1093static int core_scsi3_alloc_registration( 1132static int core_scsi3_alloc_registration(
@@ -1785,9 +1824,11 @@ core_scsi3_decode_spec_i_port(
1785 dest_node_acl->initiatorname, i_buf, (dest_se_deve) ? 1824 dest_node_acl->initiatorname, i_buf, (dest_se_deve) ?
1786 dest_se_deve->mapped_lun : 0); 1825 dest_se_deve->mapped_lun : 0);
1787 1826
1788 if (!dest_se_deve) 1827 if (!dest_se_deve) {
1828 kref_put(&local_pr_reg->pr_reg_deve->pr_kref,
1829 target_pr_kref_release);
1789 continue; 1830 continue;
1790 1831 }
1791 core_scsi3_lunacl_undepend_item(dest_se_deve); 1832 core_scsi3_lunacl_undepend_item(dest_se_deve);
1792 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1833 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1793 core_scsi3_tpg_undepend_item(dest_tpg); 1834 core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1823,9 +1864,11 @@ out:
1823 1864
1824 kmem_cache_free(t10_pr_reg_cache, dest_pr_reg); 1865 kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
1825 1866
1826 if (!dest_se_deve) 1867 if (!dest_se_deve) {
1868 kref_put(&local_pr_reg->pr_reg_deve->pr_kref,
1869 target_pr_kref_release);
1827 continue; 1870 continue;
1828 1871 }
1829 core_scsi3_lunacl_undepend_item(dest_se_deve); 1872 core_scsi3_lunacl_undepend_item(dest_se_deve);
1830 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1873 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1831 core_scsi3_tpg_undepend_item(dest_tpg); 1874 core_scsi3_tpg_undepend_item(dest_tpg);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 2d0381dd105c..5fb9dd7f08bb 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -668,7 +668,10 @@ int core_tpg_add_lun(
668 list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list); 668 list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
669 spin_unlock(&dev->se_port_lock); 669 spin_unlock(&dev->se_port_lock);
670 670
671 lun->lun_access = lun_access; 671 if (dev->dev_flags & DF_READ_ONLY)
672 lun->lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
673 else
674 lun->lun_access = lun_access;
672 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 675 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
673 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist); 676 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
674 mutex_unlock(&tpg->tpg_lun_mutex); 677 mutex_unlock(&tpg->tpg_lun_mutex);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 039004400987..5aabc4bc0d75 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -163,7 +163,7 @@ config THERMAL_EMULATION
163 163
164config HISI_THERMAL 164config HISI_THERMAL
165 tristate "Hisilicon thermal driver" 165 tristate "Hisilicon thermal driver"
166 depends on ARCH_HISI && CPU_THERMAL && OF 166 depends on (ARCH_HISI && CPU_THERMAL && OF) || COMPILE_TEST
167 help 167 help
168 Enable this to plug hisilicon's thermal sensor driver into the Linux 168 Enable this to plug hisilicon's thermal sensor driver into the Linux
169 thermal framework. cpufreq is used as the cooling device to throttle 169 thermal framework. cpufreq is used as the cooling device to throttle
@@ -182,7 +182,7 @@ config IMX_THERMAL
182 182
183config SPEAR_THERMAL 183config SPEAR_THERMAL
184 bool "SPEAr thermal sensor driver" 184 bool "SPEAr thermal sensor driver"
185 depends on PLAT_SPEAR 185 depends on PLAT_SPEAR || COMPILE_TEST
186 depends on OF 186 depends on OF
187 help 187 help
188 Enable this to plug the SPEAr thermal sensor driver into the Linux 188 Enable this to plug the SPEAr thermal sensor driver into the Linux
@@ -190,7 +190,7 @@ config SPEAR_THERMAL
190 190
191config ROCKCHIP_THERMAL 191config ROCKCHIP_THERMAL
192 tristate "Rockchip thermal driver" 192 tristate "Rockchip thermal driver"
193 depends on ARCH_ROCKCHIP 193 depends on ARCH_ROCKCHIP || COMPILE_TEST
194 depends on RESET_CONTROLLER 194 depends on RESET_CONTROLLER
195 help 195 help
196 Rockchip thermal driver provides support for Temperature sensor 196 Rockchip thermal driver provides support for Temperature sensor
@@ -208,7 +208,7 @@ config RCAR_THERMAL
208 208
209config KIRKWOOD_THERMAL 209config KIRKWOOD_THERMAL
210 tristate "Temperature sensor on Marvell Kirkwood SoCs" 210 tristate "Temperature sensor on Marvell Kirkwood SoCs"
211 depends on MACH_KIRKWOOD 211 depends on MACH_KIRKWOOD || COMPILE_TEST
212 depends on OF 212 depends on OF
213 help 213 help
214 Support for the Kirkwood thermal sensor driver into the Linux thermal 214 Support for the Kirkwood thermal sensor driver into the Linux thermal
@@ -216,7 +216,7 @@ config KIRKWOOD_THERMAL
216 216
217config DOVE_THERMAL 217config DOVE_THERMAL
218 tristate "Temperature sensor on Marvell Dove SoCs" 218 tristate "Temperature sensor on Marvell Dove SoCs"
219 depends on ARCH_DOVE || MACH_DOVE 219 depends on ARCH_DOVE || MACH_DOVE || COMPILE_TEST
220 depends on OF 220 depends on OF
221 help 221 help
222 Support for the Dove thermal sensor driver in the Linux thermal 222 Support for the Dove thermal sensor driver in the Linux thermal
@@ -234,7 +234,7 @@ config DB8500_THERMAL
234 234
235config ARMADA_THERMAL 235config ARMADA_THERMAL
236 tristate "Armada 370/XP thermal management" 236 tristate "Armada 370/XP thermal management"
237 depends on ARCH_MVEBU 237 depends on ARCH_MVEBU || COMPILE_TEST
238 depends on OF 238 depends on OF
239 help 239 help
240 Enable this option if you want to have support for thermal management 240 Enable this option if you want to have support for thermal management
@@ -349,11 +349,12 @@ config INTEL_PCH_THERMAL
349 programmable trip points and other information. 349 programmable trip points and other information.
350 350
351menu "Texas Instruments thermal drivers" 351menu "Texas Instruments thermal drivers"
352depends on ARCH_HAS_BANDGAP || COMPILE_TEST
352source "drivers/thermal/ti-soc-thermal/Kconfig" 353source "drivers/thermal/ti-soc-thermal/Kconfig"
353endmenu 354endmenu
354 355
355menu "Samsung thermal drivers" 356menu "Samsung thermal drivers"
356depends on ARCH_EXYNOS 357depends on ARCH_EXYNOS || COMPILE_TEST
357source "drivers/thermal/samsung/Kconfig" 358source "drivers/thermal/samsung/Kconfig"
358endmenu 359endmenu
359 360
@@ -364,7 +365,7 @@ endmenu
364 365
365config QCOM_SPMI_TEMP_ALARM 366config QCOM_SPMI_TEMP_ALARM
366 tristate "Qualcomm SPMI PMIC Temperature Alarm" 367 tristate "Qualcomm SPMI PMIC Temperature Alarm"
367 depends on OF && SPMI && IIO 368 depends on OF && (SPMI || COMPILE_TEST) && IIO
368 select REGMAP_SPMI 369 select REGMAP_SPMI
369 help 370 help
370 This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP) 371 This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP)
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 620dcd405ff6..42c6f71bdcc1 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -262,7 +262,9 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
262 * efficiently. Power is stored in mW, frequency in KHz. The 262 * efficiently. Power is stored in mW, frequency in KHz. The
263 * resulting table is in ascending order. 263 * resulting table is in ascending order.
264 * 264 *
265 * Return: 0 on success, -E* on error. 265 * Return: 0 on success, -EINVAL if there are no OPPs for any CPUs,
266 * -ENOMEM if we run out of memory or -EAGAIN if an OPP was
267 * added/enabled while the function was executing.
266 */ 268 */
267static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device, 269static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
268 u32 capacitance) 270 u32 capacitance)
@@ -273,8 +275,6 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
273 int num_opps = 0, cpu, i, ret = 0; 275 int num_opps = 0, cpu, i, ret = 0;
274 unsigned long freq; 276 unsigned long freq;
275 277
276 rcu_read_lock();
277
278 for_each_cpu(cpu, &cpufreq_device->allowed_cpus) { 278 for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
279 dev = get_cpu_device(cpu); 279 dev = get_cpu_device(cpu);
280 if (!dev) { 280 if (!dev) {
@@ -284,24 +284,20 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
284 } 284 }
285 285
286 num_opps = dev_pm_opp_get_opp_count(dev); 286 num_opps = dev_pm_opp_get_opp_count(dev);
287 if (num_opps > 0) { 287 if (num_opps > 0)
288 break; 288 break;
289 } else if (num_opps < 0) { 289 else if (num_opps < 0)
290 ret = num_opps; 290 return num_opps;
291 goto unlock;
292 }
293 } 291 }
294 292
295 if (num_opps == 0) { 293 if (num_opps == 0)
296 ret = -EINVAL; 294 return -EINVAL;
297 goto unlock;
298 }
299 295
300 power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL); 296 power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL);
301 if (!power_table) { 297 if (!power_table)
302 ret = -ENOMEM; 298 return -ENOMEM;
303 goto unlock; 299
304 } 300 rcu_read_lock();
305 301
306 for (freq = 0, i = 0; 302 for (freq = 0, i = 0;
307 opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp); 303 opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp);
@@ -309,6 +305,12 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
309 u32 freq_mhz, voltage_mv; 305 u32 freq_mhz, voltage_mv;
310 u64 power; 306 u64 power;
311 307
308 if (i >= num_opps) {
309 rcu_read_unlock();
310 ret = -EAGAIN;
311 goto free_power_table;
312 }
313
312 freq_mhz = freq / 1000000; 314 freq_mhz = freq / 1000000;
313 voltage_mv = dev_pm_opp_get_voltage(opp) / 1000; 315 voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
314 316
@@ -326,17 +328,22 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
326 power_table[i].power = power; 328 power_table[i].power = power;
327 } 329 }
328 330
329 if (i == 0) { 331 rcu_read_unlock();
332
333 if (i != num_opps) {
330 ret = PTR_ERR(opp); 334 ret = PTR_ERR(opp);
331 goto unlock; 335 goto free_power_table;
332 } 336 }
333 337
334 cpufreq_device->cpu_dev = dev; 338 cpufreq_device->cpu_dev = dev;
335 cpufreq_device->dyn_power_table = power_table; 339 cpufreq_device->dyn_power_table = power_table;
336 cpufreq_device->dyn_power_table_entries = i; 340 cpufreq_device->dyn_power_table_entries = i;
337 341
338unlock: 342 return 0;
339 rcu_read_unlock(); 343
344free_power_table:
345 kfree(power_table);
346
340 return ret; 347 return ret;
341} 348}
342 349
@@ -847,7 +854,7 @@ __cpufreq_cooling_register(struct device_node *np,
847 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); 854 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
848 if (ret) { 855 if (ret) {
849 cool_dev = ERR_PTR(ret); 856 cool_dev = ERR_PTR(ret);
850 goto free_table; 857 goto free_power_table;
851 } 858 }
852 859
853 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", 860 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
@@ -889,6 +896,8 @@ __cpufreq_cooling_register(struct device_node *np,
889 896
890remove_idr: 897remove_idr:
891 release_idr(&cpufreq_idr, cpufreq_dev->id); 898 release_idr(&cpufreq_idr, cpufreq_dev->id);
899free_power_table:
900 kfree(cpufreq_dev->dyn_power_table);
892free_table: 901free_table:
893 kfree(cpufreq_dev->freq_table); 902 kfree(cpufreq_dev->freq_table);
894free_time_in_idle_timestamp: 903free_time_in_idle_timestamp:
@@ -1039,6 +1048,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
1039 1048
1040 thermal_cooling_device_unregister(cpufreq_dev->cool_dev); 1049 thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
1041 release_idr(&cpufreq_idr, cpufreq_dev->id); 1050 release_idr(&cpufreq_idr, cpufreq_dev->id);
1051 kfree(cpufreq_dev->dyn_power_table);
1042 kfree(cpufreq_dev->time_in_idle_timestamp); 1052 kfree(cpufreq_dev->time_in_idle_timestamp);
1043 kfree(cpufreq_dev->time_in_idle); 1053 kfree(cpufreq_dev->time_in_idle);
1044 kfree(cpufreq_dev->freq_table); 1054 kfree(cpufreq_dev->freq_table);
diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c
index 607b62c7e611..e58bd0b658b5 100644
--- a/drivers/thermal/db8500_cpufreq_cooling.c
+++ b/drivers/thermal/db8500_cpufreq_cooling.c
@@ -72,6 +72,7 @@ static const struct of_device_id db8500_cpufreq_cooling_match[] = {
72 { .compatible = "stericsson,db8500-cpufreq-cooling" }, 72 { .compatible = "stericsson,db8500-cpufreq-cooling" },
73 {}, 73 {},
74}; 74};
75MODULE_DEVICE_TABLE(of, db8500_cpufreq_cooling_match);
75#endif 76#endif
76 77
77static struct platform_driver db8500_cpufreq_cooling_driver = { 78static struct platform_driver db8500_cpufreq_cooling_driver = {
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 9c8a7aad0252..e570ff084add 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -24,6 +24,8 @@
24 24
25#include "thermal_core.h" 25#include "thermal_core.h"
26 26
27#define INVALID_TRIP -1
28
27#define FRAC_BITS 10 29#define FRAC_BITS 10
28#define int_to_frac(x) ((x) << FRAC_BITS) 30#define int_to_frac(x) ((x) << FRAC_BITS)
29#define frac_to_int(x) ((x) >> FRAC_BITS) 31#define frac_to_int(x) ((x) >> FRAC_BITS)
@@ -56,16 +58,21 @@ static inline s64 div_frac(s64 x, s64 y)
56 58
57/** 59/**
58 * struct power_allocator_params - parameters for the power allocator governor 60 * struct power_allocator_params - parameters for the power allocator governor
61 * @allocated_tzp: whether we have allocated tzp for this thermal zone and
62 * it needs to be freed on unbind
59 * @err_integral: accumulated error in the PID controller. 63 * @err_integral: accumulated error in the PID controller.
60 * @prev_err: error in the previous iteration of the PID controller. 64 * @prev_err: error in the previous iteration of the PID controller.
61 * Used to calculate the derivative term. 65 * Used to calculate the derivative term.
62 * @trip_switch_on: first passive trip point of the thermal zone. The 66 * @trip_switch_on: first passive trip point of the thermal zone. The
63 * governor switches on when this trip point is crossed. 67 * governor switches on when this trip point is crossed.
68 * If the thermal zone only has one passive trip point,
69 * @trip_switch_on should be INVALID_TRIP.
64 * @trip_max_desired_temperature: last passive trip point of the thermal 70 * @trip_max_desired_temperature: last passive trip point of the thermal
65 * zone. The temperature we are 71 * zone. The temperature we are
66 * controlling for. 72 * controlling for.
67 */ 73 */
68struct power_allocator_params { 74struct power_allocator_params {
75 bool allocated_tzp;
69 s64 err_integral; 76 s64 err_integral;
70 s32 prev_err; 77 s32 prev_err;
71 int trip_switch_on; 78 int trip_switch_on;
@@ -73,6 +80,98 @@ struct power_allocator_params {
73}; 80};
74 81
75/** 82/**
83 * estimate_sustainable_power() - Estimate the sustainable power of a thermal zone
84 * @tz: thermal zone we are operating in
85 *
86 * For thermal zones that don't provide a sustainable_power in their
87 * thermal_zone_params, estimate one. Calculate it using the minimum
88 * power of all the cooling devices as that gives a valid value that
89 * can give some degree of functionality. For optimal performance of
90 * this governor, provide a sustainable_power in the thermal zone's
91 * thermal_zone_params.
92 */
93static u32 estimate_sustainable_power(struct thermal_zone_device *tz)
94{
95 u32 sustainable_power = 0;
96 struct thermal_instance *instance;
97 struct power_allocator_params *params = tz->governor_data;
98
99 list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
100 struct thermal_cooling_device *cdev = instance->cdev;
101 u32 min_power;
102
103 if (instance->trip != params->trip_max_desired_temperature)
104 continue;
105
106 if (power_actor_get_min_power(cdev, tz, &min_power))
107 continue;
108
109 sustainable_power += min_power;
110 }
111
112 return sustainable_power;
113}
114
115/**
116 * estimate_pid_constants() - Estimate the constants for the PID controller
117 * @tz: thermal zone for which to estimate the constants
118 * @sustainable_power: sustainable power for the thermal zone
119 * @trip_switch_on: trip point number for the switch on temperature
120 * @control_temp: target temperature for the power allocator governor
121 * @force: whether to force the update of the constants
122 *
123 * This function is used to update the estimation of the PID
124 * controller constants in struct thermal_zone_parameters.
125 * Sustainable power is provided in case it was estimated. The
126 * estimated sustainable_power should not be stored in the
127 * thermal_zone_parameters so it has to be passed explicitly to this
128 * function.
129 *
130 * If @force is not set, the values in the thermal zone's parameters
131 * are preserved if they are not zero. If @force is set, the values
132 * in thermal zone's parameters are overwritten.
133 */
134static void estimate_pid_constants(struct thermal_zone_device *tz,
135 u32 sustainable_power, int trip_switch_on,
136 int control_temp, bool force)
137{
138 int ret;
139 int switch_on_temp;
140 u32 temperature_threshold;
141
142 ret = tz->ops->get_trip_temp(tz, trip_switch_on, &switch_on_temp);
143 if (ret)
144 switch_on_temp = 0;
145
146 temperature_threshold = control_temp - switch_on_temp;
147 /*
148 * estimate_pid_constants() tries to find appropriate default
149 * values for thermal zones that don't provide them. If a
150 * system integrator has configured a thermal zone with two
151 * passive trip points at the same temperature, that person
152 * hasn't put any effort to set up the thermal zone properly
153 * so just give up.
154 */
155 if (!temperature_threshold)
156 return;
157
158 if (!tz->tzp->k_po || force)
159 tz->tzp->k_po = int_to_frac(sustainable_power) /
160 temperature_threshold;
161
162 if (!tz->tzp->k_pu || force)
163 tz->tzp->k_pu = int_to_frac(2 * sustainable_power) /
164 temperature_threshold;
165
166 if (!tz->tzp->k_i || force)
167 tz->tzp->k_i = int_to_frac(10) / 1000;
168 /*
169 * The default for k_d and integral_cutoff is 0, so we can
170 * leave them as they are.
171 */
172}
173
174/**
76 * pid_controller() - PID controller 175 * pid_controller() - PID controller
77 * @tz: thermal zone we are operating in 176 * @tz: thermal zone we are operating in
78 * @current_temp: the current temperature in millicelsius 177 * @current_temp: the current temperature in millicelsius
@@ -98,10 +197,20 @@ static u32 pid_controller(struct thermal_zone_device *tz,
98{ 197{
99 s64 p, i, d, power_range; 198 s64 p, i, d, power_range;
100 s32 err, max_power_frac; 199 s32 err, max_power_frac;
200 u32 sustainable_power;
101 struct power_allocator_params *params = tz->governor_data; 201 struct power_allocator_params *params = tz->governor_data;
102 202
103 max_power_frac = int_to_frac(max_allocatable_power); 203 max_power_frac = int_to_frac(max_allocatable_power);
104 204
205 if (tz->tzp->sustainable_power) {
206 sustainable_power = tz->tzp->sustainable_power;
207 } else {
208 sustainable_power = estimate_sustainable_power(tz);
209 estimate_pid_constants(tz, sustainable_power,
210 params->trip_switch_on, control_temp,
211 true);
212 }
213
105 err = control_temp - current_temp; 214 err = control_temp - current_temp;
106 err = int_to_frac(err); 215 err = int_to_frac(err);
107 216
@@ -139,7 +248,7 @@ static u32 pid_controller(struct thermal_zone_device *tz,
139 power_range = p + i + d; 248 power_range = p + i + d;
140 249
141 /* feed-forward the known sustainable dissipatable power */ 250 /* feed-forward the known sustainable dissipatable power */
142 power_range = tz->tzp->sustainable_power + frac_to_int(power_range); 251 power_range = sustainable_power + frac_to_int(power_range);
143 252
144 power_range = clamp(power_range, (s64)0, (s64)max_allocatable_power); 253 power_range = clamp(power_range, (s64)0, (s64)max_allocatable_power);
145 254
@@ -247,6 +356,11 @@ static int allocate_power(struct thermal_zone_device *tz,
247 } 356 }
248 } 357 }
249 358
359 if (!num_actors) {
360 ret = -ENODEV;
361 goto unlock;
362 }
363
250 /* 364 /*
251 * We need to allocate five arrays of the same size: 365 * We need to allocate five arrays of the same size:
252 * req_power, max_power, granted_power, extra_actor_power and 366 * req_power, max_power, granted_power, extra_actor_power and
@@ -340,43 +454,66 @@ unlock:
340 return ret; 454 return ret;
341} 455}
342 456
343static int get_governor_trips(struct thermal_zone_device *tz, 457/**
344 struct power_allocator_params *params) 458 * get_governor_trips() - get the number of the two trip points that are key for this governor
459 * @tz: thermal zone to operate on
460 * @params: pointer to private data for this governor
461 *
462 * The power allocator governor works optimally with two trips points:
463 * a "switch on" trip point and a "maximum desired temperature". These
464 * are defined as the first and last passive trip points.
465 *
466 * If there is only one trip point, then that's considered to be the
467 * "maximum desired temperature" trip point and the governor is always
468 * on. If there are no passive or active trip points, then the
469 * governor won't do anything. In fact, its throttle function
470 * won't be called at all.
471 */
472static void get_governor_trips(struct thermal_zone_device *tz,
473 struct power_allocator_params *params)
345{ 474{
346 int i, ret, last_passive; 475 int i, last_active, last_passive;
347 bool found_first_passive; 476 bool found_first_passive;
348 477
349 found_first_passive = false; 478 found_first_passive = false;
350 last_passive = -1; 479 last_active = INVALID_TRIP;
351 ret = -EINVAL; 480 last_passive = INVALID_TRIP;
352 481
353 for (i = 0; i < tz->trips; i++) { 482 for (i = 0; i < tz->trips; i++) {
354 enum thermal_trip_type type; 483 enum thermal_trip_type type;
484 int ret;
355 485
356 ret = tz->ops->get_trip_type(tz, i, &type); 486 ret = tz->ops->get_trip_type(tz, i, &type);
357 if (ret) 487 if (ret) {
358 return ret; 488 dev_warn(&tz->device,
489 "Failed to get trip point %d type: %d\n", i,
490 ret);
491 continue;
492 }
359 493
360 if (!found_first_passive) { 494 if (type == THERMAL_TRIP_PASSIVE) {
361 if (type == THERMAL_TRIP_PASSIVE) { 495 if (!found_first_passive) {
362 params->trip_switch_on = i; 496 params->trip_switch_on = i;
363 found_first_passive = true; 497 found_first_passive = true;
498 } else {
499 last_passive = i;
364 } 500 }
365 } else if (type == THERMAL_TRIP_PASSIVE) { 501 } else if (type == THERMAL_TRIP_ACTIVE) {
366 last_passive = i; 502 last_active = i;
367 } else { 503 } else {
368 break; 504 break;
369 } 505 }
370 } 506 }
371 507
372 if (last_passive != -1) { 508 if (last_passive != INVALID_TRIP) {
373 params->trip_max_desired_temperature = last_passive; 509 params->trip_max_desired_temperature = last_passive;
374 ret = 0; 510 } else if (found_first_passive) {
511 params->trip_max_desired_temperature = params->trip_switch_on;
512 params->trip_switch_on = INVALID_TRIP;
375 } else { 513 } else {
376 ret = -EINVAL; 514 params->trip_switch_on = INVALID_TRIP;
515 params->trip_max_desired_temperature = last_active;
377 } 516 }
378
379 return ret;
380} 517}
381 518
382static void reset_pid_controller(struct power_allocator_params *params) 519static void reset_pid_controller(struct power_allocator_params *params)
@@ -405,60 +542,45 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
405 * power_allocator_bind() - bind the power_allocator governor to a thermal zone 542 * power_allocator_bind() - bind the power_allocator governor to a thermal zone
406 * @tz: thermal zone to bind it to 543 * @tz: thermal zone to bind it to
407 * 544 *
408 * Check that the thermal zone is valid for this governor, that is, it 545 * Initialize the PID controller parameters and bind it to the thermal
409 * has two thermal trips. If so, initialize the PID controller 546 * zone.
410 * parameters and bind it to the thermal zone.
411 * 547 *
412 * Return: 0 on success, -EINVAL if the trips were invalid or -ENOMEM 548 * Return: 0 on success, or -ENOMEM if we ran out of memory.
413 * if we ran out of memory.
414 */ 549 */
415static int power_allocator_bind(struct thermal_zone_device *tz) 550static int power_allocator_bind(struct thermal_zone_device *tz)
416{ 551{
417 int ret; 552 int ret;
418 struct power_allocator_params *params; 553 struct power_allocator_params *params;
419 int switch_on_temp, control_temp; 554 int control_temp;
420 u32 temperature_threshold;
421
422 if (!tz->tzp || !tz->tzp->sustainable_power) {
423 dev_err(&tz->device,
424 "power_allocator: missing sustainable_power\n");
425 return -EINVAL;
426 }
427 555
428 params = kzalloc(sizeof(*params), GFP_KERNEL); 556 params = kzalloc(sizeof(*params), GFP_KERNEL);
429 if (!params) 557 if (!params)
430 return -ENOMEM; 558 return -ENOMEM;
431 559
432 ret = get_governor_trips(tz, params); 560 if (!tz->tzp) {
433 if (ret) { 561 tz->tzp = kzalloc(sizeof(*tz->tzp), GFP_KERNEL);
434 dev_err(&tz->device, 562 if (!tz->tzp) {
435 "thermal zone %s has wrong trip setup for power allocator\n", 563 ret = -ENOMEM;
436 tz->type); 564 goto free_params;
437 goto free; 565 }
438 }
439 566
440 ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, 567 params->allocated_tzp = true;
441 &switch_on_temp); 568 }
442 if (ret)
443 goto free;
444 569
445 ret = tz->ops->get_trip_temp(tz, params->trip_max_desired_temperature, 570 if (!tz->tzp->sustainable_power)
446 &control_temp); 571 dev_warn(&tz->device, "power_allocator: sustainable_power will be estimated\n");
447 if (ret)
448 goto free;
449 572
450 temperature_threshold = control_temp - switch_on_temp; 573 get_governor_trips(tz, params);
451 574
452 tz->tzp->k_po = tz->tzp->k_po ?: 575 if (tz->trips > 0) {
453 int_to_frac(tz->tzp->sustainable_power) / temperature_threshold; 576 ret = tz->ops->get_trip_temp(tz,
454 tz->tzp->k_pu = tz->tzp->k_pu ?: 577 params->trip_max_desired_temperature,
455 int_to_frac(2 * tz->tzp->sustainable_power) / 578 &control_temp);
456 temperature_threshold; 579 if (!ret)
457 tz->tzp->k_i = tz->tzp->k_i ?: int_to_frac(10) / 1000; 580 estimate_pid_constants(tz, tz->tzp->sustainable_power,
458 /* 581 params->trip_switch_on,
459 * The default for k_d and integral_cutoff is 0, so we can 582 control_temp, false);
460 * leave them as they are. 583 }
461 */
462 584
463 reset_pid_controller(params); 585 reset_pid_controller(params);
464 586
@@ -466,14 +588,23 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
466 588
467 return 0; 589 return 0;
468 590
469free: 591free_params:
470 kfree(params); 592 kfree(params);
593
471 return ret; 594 return ret;
472} 595}
473 596
474static void power_allocator_unbind(struct thermal_zone_device *tz) 597static void power_allocator_unbind(struct thermal_zone_device *tz)
475{ 598{
599 struct power_allocator_params *params = tz->governor_data;
600
476 dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id); 601 dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
602
603 if (params->allocated_tzp) {
604 kfree(tz->tzp);
605 tz->tzp = NULL;
606 }
607
477 kfree(tz->governor_data); 608 kfree(tz->governor_data);
478 tz->governor_data = NULL; 609 tz->governor_data = NULL;
479} 610}
@@ -499,13 +630,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
499 630
500 ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, 631 ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
501 &switch_on_temp); 632 &switch_on_temp);
502 if (ret) { 633 if (!ret && (current_temp < switch_on_temp)) {
503 dev_warn(&tz->device,
504 "Failed to get switch on temperature: %d\n", ret);
505 return ret;
506 }
507
508 if (current_temp < switch_on_temp) {
509 tz->passive = 0; 634 tz->passive = 0;
510 reset_pid_controller(params); 635 reset_pid_controller(params);
511 allow_maximum_power(tz); 636 allow_maximum_power(tz);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 5e5fc7015c7f..d9e525cc9c1c 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1013,6 +1013,34 @@ int power_actor_get_max_power(struct thermal_cooling_device *cdev,
1013} 1013}
1014 1014
1015/** 1015/**
1016 * power_actor_get_min_power() - get the mainimum power that a cdev can consume
1017 * @cdev: pointer to &thermal_cooling_device
1018 * @tz: a valid thermal zone device pointer
1019 * @min_power: pointer in which to store the minimum power
1020 *
1021 * Calculate the minimum power consumption in milliwatts that the
1022 * cooling device can currently consume and store it in @min_power.
1023 *
1024 * Return: 0 on success, -EINVAL if @cdev doesn't support the
1025 * power_actor API or -E* on other error.
1026 */
1027int power_actor_get_min_power(struct thermal_cooling_device *cdev,
1028 struct thermal_zone_device *tz, u32 *min_power)
1029{
1030 unsigned long max_state;
1031 int ret;
1032
1033 if (!cdev_is_power_actor(cdev))
1034 return -EINVAL;
1035
1036 ret = cdev->ops->get_max_state(cdev, &max_state);
1037 if (ret)
1038 return ret;
1039
1040 return cdev->ops->state2power(cdev, tz, max_state, min_power);
1041}
1042
1043/**
1016 * power_actor_set_power() - limit the maximum power that a cooling device can consume 1044 * power_actor_set_power() - limit the maximum power that a cooling device can consume
1017 * @cdev: pointer to &thermal_cooling_device 1045 * @cdev: pointer to &thermal_cooling_device
1018 * @instance: thermal instance to update 1046 * @instance: thermal instance to update
diff --git a/drivers/thermal/ti-soc-thermal/Kconfig b/drivers/thermal/ti-soc-thermal/Kconfig
index bd4c7beba679..cb6686ff09ae 100644
--- a/drivers/thermal/ti-soc-thermal/Kconfig
+++ b/drivers/thermal/ti-soc-thermal/Kconfig
@@ -1,7 +1,5 @@
1config TI_SOC_THERMAL 1config TI_SOC_THERMAL
2 tristate "Texas Instruments SoCs temperature sensor driver" 2 tristate "Texas Instruments SoCs temperature sensor driver"
3 depends on THERMAL
4 depends on ARCH_HAS_BANDGAP
5 help 3 help
6 If you say yes here you get support for the Texas Instruments 4 If you say yes here you get support for the Texas Instruments
7 OMAP4460+ on die bandgap temperature sensor support. The register 5 OMAP4460+ on die bandgap temperature sensor support. The register
@@ -24,7 +22,7 @@ config TI_THERMAL
24config OMAP4_THERMAL 22config OMAP4_THERMAL
25 bool "Texas Instruments OMAP4 thermal support" 23 bool "Texas Instruments OMAP4 thermal support"
26 depends on TI_SOC_THERMAL 24 depends on TI_SOC_THERMAL
27 depends on ARCH_OMAP4 25 depends on ARCH_OMAP4 || COMPILE_TEST
28 help 26 help
29 If you say yes here you get thermal support for the Texas Instruments 27 If you say yes here you get thermal support for the Texas Instruments
30 OMAP4 SoC family. The current chip supported are: 28 OMAP4 SoC family. The current chip supported are:
@@ -38,7 +36,7 @@ config OMAP4_THERMAL
38config OMAP5_THERMAL 36config OMAP5_THERMAL
39 bool "Texas Instruments OMAP5 thermal support" 37 bool "Texas Instruments OMAP5 thermal support"
40 depends on TI_SOC_THERMAL 38 depends on TI_SOC_THERMAL
41 depends on SOC_OMAP5 39 depends on SOC_OMAP5 || COMPILE_TEST
42 help 40 help
43 If you say yes here you get thermal support for the Texas Instruments 41 If you say yes here you get thermal support for the Texas Instruments
44 OMAP5 SoC family. The current chip supported are: 42 OMAP5 SoC family. The current chip supported are:
@@ -50,7 +48,7 @@ config OMAP5_THERMAL
50config DRA752_THERMAL 48config DRA752_THERMAL
51 bool "Texas Instruments DRA752 thermal support" 49 bool "Texas Instruments DRA752 thermal support"
52 depends on TI_SOC_THERMAL 50 depends on TI_SOC_THERMAL
53 depends on SOC_DRA7XX 51 depends on SOC_DRA7XX || COMPILE_TEST
54 help 52 help
55 If you say yes here you get thermal support for the Texas Instruments 53 If you say yes here you get thermal support for the Texas Instruments
56 DRA752 SoC family. The current chip supported are: 54 DRA752 SoC family. The current chip supported are:
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index c68fe1222c16..20a41f7de76f 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -643,7 +643,7 @@ static struct pci_device_id nhi_ids[] = {
643 { 643 {
644 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 644 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
645 .vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c, 645 .vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c,
646 .subvendor = 0x2222, .subdevice = 0x1111, 646 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
647 }, 647 },
648 { 0,} 648 { 0,}
649}; 649};
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 54e6c8ddef5d..b1e0ba3e525b 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2910,3 +2910,5 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
2910} 2910}
2911 2911
2912#endif /* CONFIG_SERIAL_8250_CONSOLE */ 2912#endif /* CONFIG_SERIAL_8250_CONSOLE */
2913
2914MODULE_LICENSE("GPL");
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 867e9f3f3859..dcc50c878159 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -61,7 +61,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
61 { .compatible = "fsl,imx27-usb", .data = &imx27_usb_data}, 61 { .compatible = "fsl,imx27-usb", .data = &imx27_usb_data},
62 { .compatible = "fsl,imx6q-usb", .data = &imx6q_usb_data}, 62 { .compatible = "fsl,imx6q-usb", .data = &imx6q_usb_data},
63 { .compatible = "fsl,imx6sl-usb", .data = &imx6sl_usb_data}, 63 { .compatible = "fsl,imx6sl-usb", .data = &imx6sl_usb_data},
64 { .compatible = "fsl,imx6sx-usb", .data = &imx6sl_usb_data}, 64 { .compatible = "fsl,imx6sx-usb", .data = &imx6sx_usb_data},
65 { /* sentinel */ } 65 { /* sentinel */ }
66}; 66};
67MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids); 67MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
diff --git a/drivers/usb/chipidea/ci_hdrc_usb2.c b/drivers/usb/chipidea/ci_hdrc_usb2.c
index 9eae1a16cef9..4456d2cf80ff 100644
--- a/drivers/usb/chipidea/ci_hdrc_usb2.c
+++ b/drivers/usb/chipidea/ci_hdrc_usb2.c
@@ -12,6 +12,7 @@
12#include <linux/dma-mapping.h> 12#include <linux/dma-mapping.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/of.h> 14#include <linux/of.h>
15#include <linux/of_platform.h>
15#include <linux/phy/phy.h> 16#include <linux/phy/phy.h>
16#include <linux/platform_device.h> 17#include <linux/platform_device.h>
17#include <linux/usb/chipidea.h> 18#include <linux/usb/chipidea.h>
@@ -30,18 +31,36 @@ static const struct ci_hdrc_platform_data ci_default_pdata = {
30 .flags = CI_HDRC_DISABLE_STREAMING, 31 .flags = CI_HDRC_DISABLE_STREAMING,
31}; 32};
32 33
34static struct ci_hdrc_platform_data ci_zynq_pdata = {
35 .capoffset = DEF_CAPOFFSET,
36};
37
38static const struct of_device_id ci_hdrc_usb2_of_match[] = {
39 { .compatible = "chipidea,usb2"},
40 { .compatible = "xlnx,zynq-usb-2.20a", .data = &ci_zynq_pdata},
41 { }
42};
43MODULE_DEVICE_TABLE(of, ci_hdrc_usb2_of_match);
44
33static int ci_hdrc_usb2_probe(struct platform_device *pdev) 45static int ci_hdrc_usb2_probe(struct platform_device *pdev)
34{ 46{
35 struct device *dev = &pdev->dev; 47 struct device *dev = &pdev->dev;
36 struct ci_hdrc_usb2_priv *priv; 48 struct ci_hdrc_usb2_priv *priv;
37 struct ci_hdrc_platform_data *ci_pdata = dev_get_platdata(dev); 49 struct ci_hdrc_platform_data *ci_pdata = dev_get_platdata(dev);
38 int ret; 50 int ret;
51 const struct of_device_id *match;
39 52
40 if (!ci_pdata) { 53 if (!ci_pdata) {
41 ci_pdata = devm_kmalloc(dev, sizeof(*ci_pdata), GFP_KERNEL); 54 ci_pdata = devm_kmalloc(dev, sizeof(*ci_pdata), GFP_KERNEL);
42 *ci_pdata = ci_default_pdata; /* struct copy */ 55 *ci_pdata = ci_default_pdata; /* struct copy */
43 } 56 }
44 57
58 match = of_match_device(ci_hdrc_usb2_of_match, &pdev->dev);
59 if (match && match->data) {
60 /* struct copy */
61 *ci_pdata = *(struct ci_hdrc_platform_data *)match->data;
62 }
63
45 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 64 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
46 if (!priv) 65 if (!priv)
47 return -ENOMEM; 66 return -ENOMEM;
@@ -96,12 +115,6 @@ static int ci_hdrc_usb2_remove(struct platform_device *pdev)
96 return 0; 115 return 0;
97} 116}
98 117
99static const struct of_device_id ci_hdrc_usb2_of_match[] = {
100 { .compatible = "chipidea,usb2" },
101 { }
102};
103MODULE_DEVICE_TABLE(of, ci_hdrc_usb2_of_match);
104
105static struct platform_driver ci_hdrc_usb2_driver = { 118static struct platform_driver ci_hdrc_usb2_driver = {
106 .probe = ci_hdrc_usb2_probe, 119 .probe = ci_hdrc_usb2_probe,
107 .remove = ci_hdrc_usb2_remove, 120 .remove = ci_hdrc_usb2_remove,
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index a637da25dda0..8223fe73ea85 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -656,6 +656,44 @@ __acquires(hwep->lock)
656 return 0; 656 return 0;
657} 657}
658 658
659static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer)
660{
661 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
662 int direction, retval = 0;
663 unsigned long flags;
664
665 if (ep == NULL || hwep->ep.desc == NULL)
666 return -EINVAL;
667
668 if (usb_endpoint_xfer_isoc(hwep->ep.desc))
669 return -EOPNOTSUPP;
670
671 spin_lock_irqsave(hwep->lock, flags);
672
673 if (value && hwep->dir == TX && check_transfer &&
674 !list_empty(&hwep->qh.queue) &&
675 !usb_endpoint_xfer_control(hwep->ep.desc)) {
676 spin_unlock_irqrestore(hwep->lock, flags);
677 return -EAGAIN;
678 }
679
680 direction = hwep->dir;
681 do {
682 retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
683
684 if (!value)
685 hwep->wedge = 0;
686
687 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
688 hwep->dir = (hwep->dir == TX) ? RX : TX;
689
690 } while (hwep->dir != direction);
691
692 spin_unlock_irqrestore(hwep->lock, flags);
693 return retval;
694}
695
696
659/** 697/**
660 * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts 698 * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
661 * @gadget: gadget 699 * @gadget: gadget
@@ -1051,7 +1089,7 @@ __acquires(ci->lock)
1051 num += ci->hw_ep_max / 2; 1089 num += ci->hw_ep_max / 2;
1052 1090
1053 spin_unlock(&ci->lock); 1091 spin_unlock(&ci->lock);
1054 err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep); 1092 err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false);
1055 spin_lock(&ci->lock); 1093 spin_lock(&ci->lock);
1056 if (!err) 1094 if (!err)
1057 isr_setup_status_phase(ci); 1095 isr_setup_status_phase(ci);
@@ -1117,8 +1155,8 @@ delegate:
1117 1155
1118 if (err < 0) { 1156 if (err < 0) {
1119 spin_unlock(&ci->lock); 1157 spin_unlock(&ci->lock);
1120 if (usb_ep_set_halt(&hwep->ep)) 1158 if (_ep_set_halt(&hwep->ep, 1, false))
1121 dev_err(ci->dev, "error: ep_set_halt\n"); 1159 dev_err(ci->dev, "error: _ep_set_halt\n");
1122 spin_lock(&ci->lock); 1160 spin_lock(&ci->lock);
1123 } 1161 }
1124} 1162}
@@ -1149,9 +1187,9 @@ __acquires(ci->lock)
1149 err = isr_setup_status_phase(ci); 1187 err = isr_setup_status_phase(ci);
1150 if (err < 0) { 1188 if (err < 0) {
1151 spin_unlock(&ci->lock); 1189 spin_unlock(&ci->lock);
1152 if (usb_ep_set_halt(&hwep->ep)) 1190 if (_ep_set_halt(&hwep->ep, 1, false))
1153 dev_err(ci->dev, 1191 dev_err(ci->dev,
1154 "error: ep_set_halt\n"); 1192 "error: _ep_set_halt\n");
1155 spin_lock(&ci->lock); 1193 spin_lock(&ci->lock);
1156 } 1194 }
1157 } 1195 }
@@ -1397,41 +1435,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1397 */ 1435 */
1398static int ep_set_halt(struct usb_ep *ep, int value) 1436static int ep_set_halt(struct usb_ep *ep, int value)
1399{ 1437{
1400 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); 1438 return _ep_set_halt(ep, value, true);
1401 int direction, retval = 0;
1402 unsigned long flags;
1403
1404 if (ep == NULL || hwep->ep.desc == NULL)
1405 return -EINVAL;
1406
1407 if (usb_endpoint_xfer_isoc(hwep->ep.desc))
1408 return -EOPNOTSUPP;
1409
1410 spin_lock_irqsave(hwep->lock, flags);
1411
1412#ifndef STALL_IN
1413 /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
1414 if (value && hwep->type == USB_ENDPOINT_XFER_BULK && hwep->dir == TX &&
1415 !list_empty(&hwep->qh.queue)) {
1416 spin_unlock_irqrestore(hwep->lock, flags);
1417 return -EAGAIN;
1418 }
1419#endif
1420
1421 direction = hwep->dir;
1422 do {
1423 retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
1424
1425 if (!value)
1426 hwep->wedge = 0;
1427
1428 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1429 hwep->dir = (hwep->dir == TX) ? RX : TX;
1430
1431 } while (hwep->dir != direction);
1432
1433 spin_unlock_irqrestore(hwep->lock, flags);
1434 return retval;
1435} 1439}
1436 1440
1437/** 1441/**
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index b2a540b43f97..b9ddf0c1ffe5 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -112,7 +112,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
112 cfgno, inum, asnum, ep->desc.bEndpointAddress); 112 cfgno, inum, asnum, ep->desc.bEndpointAddress);
113 ep->ss_ep_comp.bmAttributes = 16; 113 ep->ss_ep_comp.bmAttributes = 16;
114 } else if (usb_endpoint_xfer_isoc(&ep->desc) && 114 } else if (usb_endpoint_xfer_isoc(&ep->desc) &&
115 desc->bmAttributes > 2) { 115 USB_SS_MULT(desc->bmAttributes) > 3) {
116 dev_warn(ddev, "Isoc endpoint has Mult of %d in " 116 dev_warn(ddev, "Isoc endpoint has Mult of %d in "
117 "config %d interface %d altsetting %d ep %d: " 117 "config %d interface %d altsetting %d ep %d: "
118 "setting to 3\n", desc->bmAttributes + 1, 118 "setting to 3\n", desc->bmAttributes + 1,
@@ -121,7 +121,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
121 } 121 }
122 122
123 if (usb_endpoint_xfer_isoc(&ep->desc)) 123 if (usb_endpoint_xfer_isoc(&ep->desc))
124 max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) * 124 max_tx = (desc->bMaxBurst + 1) *
125 (USB_SS_MULT(desc->bmAttributes)) *
125 usb_endpoint_maxp(&ep->desc); 126 usb_endpoint_maxp(&ep->desc);
126 else if (usb_endpoint_xfer_int(&ep->desc)) 127 else if (usb_endpoint_xfer_int(&ep->desc))
127 max_tx = usb_endpoint_maxp(&ep->desc) * 128 max_tx = usb_endpoint_maxp(&ep->desc) *
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index a5a1b7c45743..22e9606d8e08 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -514,8 +514,6 @@ static int dwc3_omap_probe(struct platform_device *pdev)
514 goto err1; 514 goto err1;
515 } 515 }
516 516
517 dwc3_omap_enable_irqs(omap);
518
519 ret = dwc3_omap_extcon_register(omap); 517 ret = dwc3_omap_extcon_register(omap);
520 if (ret < 0) 518 if (ret < 0)
521 goto err2; 519 goto err2;
@@ -526,6 +524,8 @@ static int dwc3_omap_probe(struct platform_device *pdev)
526 goto err3; 524 goto err3;
527 } 525 }
528 526
527 dwc3_omap_enable_irqs(omap);
528
529 return 0; 529 return 0;
530 530
531err3: 531err3:
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0c25704dcb6b..1e8bdf817811 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2665,8 +2665,6 @@ static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2665 int i; 2665 int i;
2666 irqreturn_t ret = IRQ_NONE; 2666 irqreturn_t ret = IRQ_NONE;
2667 2667
2668 spin_lock(&dwc->lock);
2669
2670 for (i = 0; i < dwc->num_event_buffers; i++) { 2668 for (i = 0; i < dwc->num_event_buffers; i++) {
2671 irqreturn_t status; 2669 irqreturn_t status;
2672 2670
@@ -2675,8 +2673,6 @@ static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2675 ret = status; 2673 ret = status;
2676 } 2674 }
2677 2675
2678 spin_unlock(&dwc->lock);
2679
2680 return ret; 2676 return ret;
2681} 2677}
2682 2678
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 978435a51038..6399c106a3a5 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -186,6 +186,7 @@ void usb_ep_autoconfig_reset (struct usb_gadget *gadget)
186 186
187 list_for_each_entry (ep, &gadget->ep_list, ep_list) { 187 list_for_each_entry (ep, &gadget->ep_list, ep_list) {
188 ep->claimed = false; 188 ep->claimed = false;
189 ep->driver_data = NULL;
189 } 190 }
190 gadget->in_epnum = 0; 191 gadget->in_epnum = 0;
191 gadget->out_epnum = 0; 192 gadget->out_epnum = 0;
diff --git a/drivers/usb/gadget/udc/amd5536udc.c b/drivers/usb/gadget/udc/amd5536udc.c
index fdacddb18c00..175ca93fe5e2 100644
--- a/drivers/usb/gadget/udc/amd5536udc.c
+++ b/drivers/usb/gadget/udc/amd5536udc.c
@@ -3138,8 +3138,8 @@ static void udc_pci_remove(struct pci_dev *pdev)
3138 writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg); 3138 writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
3139 if (dev->irq_registered) 3139 if (dev->irq_registered)
3140 free_irq(pdev->irq, dev); 3140 free_irq(pdev->irq, dev);
3141 if (dev->regs) 3141 if (dev->virt_addr)
3142 iounmap(dev->regs); 3142 iounmap(dev->virt_addr);
3143 if (dev->mem_region) 3143 if (dev->mem_region)
3144 release_mem_region(pci_resource_start(pdev, 0), 3144 release_mem_region(pci_resource_start(pdev, 0),
3145 pci_resource_len(pdev, 0)); 3145 pci_resource_len(pdev, 0));
@@ -3226,17 +3226,13 @@ static int udc_pci_probe(
3226 3226
3227 /* init */ 3227 /* init */
3228 dev = kzalloc(sizeof(struct udc), GFP_KERNEL); 3228 dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
3229 if (!dev) { 3229 if (!dev)
3230 retval = -ENOMEM; 3230 return -ENOMEM;
3231 goto finished;
3232 }
3233 3231
3234 /* pci setup */ 3232 /* pci setup */
3235 if (pci_enable_device(pdev) < 0) { 3233 if (pci_enable_device(pdev) < 0) {
3236 kfree(dev);
3237 dev = NULL;
3238 retval = -ENODEV; 3234 retval = -ENODEV;
3239 goto finished; 3235 goto err_pcidev;
3240 } 3236 }
3241 dev->active = 1; 3237 dev->active = 1;
3242 3238
@@ -3246,28 +3242,22 @@ static int udc_pci_probe(
3246 3242
3247 if (!request_mem_region(resource, len, name)) { 3243 if (!request_mem_region(resource, len, name)) {
3248 dev_dbg(&pdev->dev, "pci device used already\n"); 3244 dev_dbg(&pdev->dev, "pci device used already\n");
3249 kfree(dev);
3250 dev = NULL;
3251 retval = -EBUSY; 3245 retval = -EBUSY;
3252 goto finished; 3246 goto err_memreg;
3253 } 3247 }
3254 dev->mem_region = 1; 3248 dev->mem_region = 1;
3255 3249
3256 dev->virt_addr = ioremap_nocache(resource, len); 3250 dev->virt_addr = ioremap_nocache(resource, len);
3257 if (dev->virt_addr == NULL) { 3251 if (dev->virt_addr == NULL) {
3258 dev_dbg(&pdev->dev, "start address cannot be mapped\n"); 3252 dev_dbg(&pdev->dev, "start address cannot be mapped\n");
3259 kfree(dev);
3260 dev = NULL;
3261 retval = -EFAULT; 3253 retval = -EFAULT;
3262 goto finished; 3254 goto err_ioremap;
3263 } 3255 }
3264 3256
3265 if (!pdev->irq) { 3257 if (!pdev->irq) {
3266 dev_err(&pdev->dev, "irq not set\n"); 3258 dev_err(&pdev->dev, "irq not set\n");
3267 kfree(dev);
3268 dev = NULL;
3269 retval = -ENODEV; 3259 retval = -ENODEV;
3270 goto finished; 3260 goto err_irq;
3271 } 3261 }
3272 3262
3273 spin_lock_init(&dev->lock); 3263 spin_lock_init(&dev->lock);
@@ -3283,10 +3273,8 @@ static int udc_pci_probe(
3283 3273
3284 if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) { 3274 if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
3285 dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq); 3275 dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
3286 kfree(dev);
3287 dev = NULL;
3288 retval = -EBUSY; 3276 retval = -EBUSY;
3289 goto finished; 3277 goto err_irq;
3290 } 3278 }
3291 dev->irq_registered = 1; 3279 dev->irq_registered = 1;
3292 3280
@@ -3314,8 +3302,17 @@ static int udc_pci_probe(
3314 return 0; 3302 return 0;
3315 3303
3316finished: 3304finished:
3317 if (dev) 3305 udc_pci_remove(pdev);
3318 udc_pci_remove(pdev); 3306 return retval;
3307
3308err_irq:
3309 iounmap(dev->virt_addr);
3310err_ioremap:
3311 release_mem_region(resource, len);
3312err_memreg:
3313 pci_disable_device(pdev);
3314err_pcidev:
3315 kfree(dev);
3319 return retval; 3316 return retval;
3320} 3317}
3321 3318
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 3dfada8d6061..f0f2b066ac08 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -2002,6 +2002,17 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
2002 ep->udc = udc; 2002 ep->udc = udc;
2003 INIT_LIST_HEAD(&ep->queue); 2003 INIT_LIST_HEAD(&ep->queue);
2004 2004
2005 if (ep->index == 0) {
2006 ep->ep.caps.type_control = true;
2007 } else {
2008 ep->ep.caps.type_iso = ep->can_isoc;
2009 ep->ep.caps.type_bulk = true;
2010 ep->ep.caps.type_int = true;
2011 }
2012
2013 ep->ep.caps.dir_in = true;
2014 ep->ep.caps.dir_out = true;
2015
2005 if (i) 2016 if (i)
2006 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); 2017 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
2007 2018
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index 5c8f4effb62a..ccb9c213cc9f 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -324,8 +324,7 @@ static void bdc_mem_free(struct bdc *bdc)
324 bdc->scratchpad.buff, bdc->scratchpad.sp_dma); 324 bdc->scratchpad.buff, bdc->scratchpad.sp_dma);
325 325
326 /* Destroy the dma pools */ 326 /* Destroy the dma pools */
327 if (bdc->bd_table_pool) 327 dma_pool_destroy(bdc->bd_table_pool);
328 dma_pool_destroy(bdc->bd_table_pool);
329 328
330 /* Free the bdc_ep array */ 329 /* Free the bdc_ep array */
331 kfree(bdc->bdc_ep_array); 330 kfree(bdc->bdc_ep_array);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 1379ad40d864..27af0f008b57 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1348,6 +1348,7 @@ static int transfer(struct dummy_hcd *dum_hcd, struct urb *urb,
1348{ 1348{
1349 struct dummy *dum = dum_hcd->dum; 1349 struct dummy *dum = dum_hcd->dum;
1350 struct dummy_request *req; 1350 struct dummy_request *req;
1351 int sent = 0;
1351 1352
1352top: 1353top:
1353 /* if there's no request queued, the device is NAKing; return */ 1354 /* if there's no request queued, the device is NAKing; return */
@@ -1385,12 +1386,15 @@ top:
1385 if (len == 0) 1386 if (len == 0)
1386 break; 1387 break;
1387 1388
1388 /* use an extra pass for the final short packet */ 1389 /* send multiple of maxpacket first, then remainder */
1389 if (len > ep->ep.maxpacket) { 1390 if (len >= ep->ep.maxpacket) {
1390 rescan = 1; 1391 is_short = 0;
1391 len -= (len % ep->ep.maxpacket); 1392 if (len % ep->ep.maxpacket)
1393 rescan = 1;
1394 len -= len % ep->ep.maxpacket;
1395 } else {
1396 is_short = 1;
1392 } 1397 }
1393 is_short = (len % ep->ep.maxpacket) != 0;
1394 1398
1395 len = dummy_perform_transfer(urb, req, len); 1399 len = dummy_perform_transfer(urb, req, len);
1396 1400
@@ -1399,6 +1403,7 @@ top:
1399 req->req.status = len; 1403 req->req.status = len;
1400 } else { 1404 } else {
1401 limit -= len; 1405 limit -= len;
1406 sent += len;
1402 urb->actual_length += len; 1407 urb->actual_length += len;
1403 req->req.actual += len; 1408 req->req.actual += len;
1404 } 1409 }
@@ -1421,7 +1426,7 @@ top:
1421 *status = -EOVERFLOW; 1426 *status = -EOVERFLOW;
1422 else 1427 else
1423 *status = 0; 1428 *status = 0;
1424 } else if (!to_host) { 1429 } else {
1425 *status = 0; 1430 *status = 0;
1426 if (host_len > dev_len) 1431 if (host_len > dev_len)
1427 req->req.status = -EOVERFLOW; 1432 req->req.status = -EOVERFLOW;
@@ -1429,15 +1434,24 @@ top:
1429 req->req.status = 0; 1434 req->req.status = 0;
1430 } 1435 }
1431 1436
1432 /* many requests terminate without a short packet */ 1437 /*
1438 * many requests terminate without a short packet.
1439 * send a zlp if demanded by flags.
1440 */
1433 } else { 1441 } else {
1434 if (req->req.length == req->req.actual 1442 if (req->req.length == req->req.actual) {
1435 && !req->req.zero) 1443 if (req->req.zero && to_host)
1436 req->req.status = 0; 1444 rescan = 1;
1437 if (urb->transfer_buffer_length == urb->actual_length 1445 else
1438 && !(urb->transfer_flags 1446 req->req.status = 0;
1439 & URB_ZERO_PACKET)) 1447 }
1440 *status = 0; 1448 if (urb->transfer_buffer_length == urb->actual_length) {
1449 if (urb->transfer_flags & URB_ZERO_PACKET &&
1450 !to_host)
1451 rescan = 1;
1452 else
1453 *status = 0;
1454 }
1441 } 1455 }
1442 1456
1443 /* device side completion --> continuable */ 1457 /* device side completion --> continuable */
@@ -1460,7 +1474,7 @@ top:
1460 if (rescan) 1474 if (rescan)
1461 goto top; 1475 goto top;
1462 } 1476 }
1463 return limit; 1477 return sent;
1464} 1478}
1465 1479
1466static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep) 1480static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep)
@@ -1890,7 +1904,7 @@ restart:
1890 default: 1904 default:
1891treat_control_like_bulk: 1905treat_control_like_bulk:
1892 ep->last_io = jiffies; 1906 ep->last_io = jiffies;
1893 total = transfer(dum_hcd, urb, ep, limit, &status); 1907 total -= transfer(dum_hcd, urb, ep, limit, &status);
1894 break; 1908 break;
1895 } 1909 }
1896 1910
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 8aa2593c2c36..b9429bc42511 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -2117,8 +2117,7 @@ static int gr_remove(struct platform_device *pdev)
2117 return -EBUSY; 2117 return -EBUSY;
2118 2118
2119 gr_dfs_delete(dev); 2119 gr_dfs_delete(dev);
2120 if (dev->desc_pool) 2120 dma_pool_destroy(dev->desc_pool);
2121 dma_pool_destroy(dev->desc_pool);
2122 platform_set_drvdata(pdev, NULL); 2121 platform_set_drvdata(pdev, NULL);
2123 2122
2124 gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req); 2123 gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index 4c489692745e..dafe74eb9ade 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -1767,8 +1767,7 @@ static int mv_u3d_remove(struct platform_device *dev)
1767 usb_del_gadget_udc(&u3d->gadget); 1767 usb_del_gadget_udc(&u3d->gadget);
1768 1768
1769 /* free memory allocated in probe */ 1769 /* free memory allocated in probe */
1770 if (u3d->trb_pool) 1770 dma_pool_destroy(u3d->trb_pool);
1771 dma_pool_destroy(u3d->trb_pool);
1772 1771
1773 if (u3d->ep_context) 1772 if (u3d->ep_context)
1774 dma_free_coherent(&dev->dev, u3d->ep_context_size, 1773 dma_free_coherent(&dev->dev, u3d->ep_context_size,
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 339af51df57d..81b6229c7805 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -2100,8 +2100,7 @@ static int mv_udc_remove(struct platform_device *pdev)
2100 } 2100 }
2101 2101
2102 /* free memory allocated in probe */ 2102 /* free memory allocated in probe */
2103 if (udc->dtd_pool) 2103 dma_pool_destroy(udc->dtd_pool);
2104 dma_pool_destroy(udc->dtd_pool);
2105 2104
2106 if (udc->ep_dqh) 2105 if (udc->ep_dqh)
2107 dma_free_coherent(&pdev->dev, udc->ep_dqh_size, 2106 dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 9a8c936cd42c..41f841fa6c4d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1498,10 +1498,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1498 * use Event Data TRBs, and we don't chain in a link TRB on short 1498 * use Event Data TRBs, and we don't chain in a link TRB on short
1499 * transfers, we're basically dividing by 1. 1499 * transfers, we're basically dividing by 1.
1500 * 1500 *
1501 * xHCI 1.0 specification indicates that the Average TRB Length should 1501 * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length
1502 * be set to 8 for control endpoints. 1502 * should be set to 8 for control endpoints.
1503 */ 1503 */
1504 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100) 1504 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
1505 ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8)); 1505 ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
1506 else 1506 else
1507 ep_ctx->tx_info |= 1507 ep_ctx->tx_info |=
@@ -1792,8 +1792,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1792 int size; 1792 int size;
1793 int i, j, num_ports; 1793 int i, j, num_ports;
1794 1794
1795 if (timer_pending(&xhci->cmd_timer)) 1795 del_timer_sync(&xhci->cmd_timer);
1796 del_timer_sync(&xhci->cmd_timer);
1797 1796
1798 /* Free the Event Ring Segment Table and the actual Event Ring */ 1797 /* Free the Event Ring Segment Table and the actual Event Ring */
1799 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 1798 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
@@ -2321,6 +2320,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2321 2320
2322 INIT_LIST_HEAD(&xhci->cmd_list); 2321 INIT_LIST_HEAD(&xhci->cmd_list);
2323 2322
2323 /* init command timeout timer */
2324 setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
2325 (unsigned long)xhci);
2326
2324 page_size = readl(&xhci->op_regs->page_size); 2327 page_size = readl(&xhci->op_regs->page_size);
2325 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2328 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2326 "Supported page size register = 0x%x", page_size); 2329 "Supported page size register = 0x%x", page_size);
@@ -2505,10 +2508,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2505 "Wrote ERST address to ir_set 0."); 2508 "Wrote ERST address to ir_set 0.");
2506 xhci_print_ir_set(xhci, 0); 2509 xhci_print_ir_set(xhci, 0);
2507 2510
2508 /* init command timeout timer */
2509 setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
2510 (unsigned long)xhci);
2511
2512 /* 2511 /*
2513 * XXX: Might need to set the Interrupter Moderation Register to 2512 * XXX: Might need to set the Interrupter Moderation Register to
2514 * something other than the default (~1ms minimum between interrupts). 2513 * something other than the default (~1ms minimum between interrupts).
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 5590eac2b22d..c79d33676672 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -180,51 +180,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
180 "QUIRK: Resetting on resume"); 180 "QUIRK: Resetting on resume");
181} 181}
182 182
183/*
184 * In some Intel xHCI controllers, in order to get D3 working,
185 * through a vendor specific SSIC CONFIG register at offset 0x883c,
186 * SSIC PORT need to be marked as "unused" before putting xHCI
187 * into D3. After D3 exit, the SSIC port need to be marked as "used".
188 * Without this change, xHCI might not enter D3 state.
189 * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
190 * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
191 */
192static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
193{
194 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
195 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
196 u32 val;
197 void __iomem *reg;
198
199 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
200 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
201
202 reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
203
204 /* Notify SSIC that SSIC profile programming is not done */
205 val = readl(reg) & ~PROG_DONE;
206 writel(val, reg);
207
208 /* Mark SSIC port as unused(suspend) or used(resume) */
209 val = readl(reg);
210 if (suspend)
211 val |= SSIC_PORT_UNUSED;
212 else
213 val &= ~SSIC_PORT_UNUSED;
214 writel(val, reg);
215
216 /* Notify SSIC that SSIC profile programming is done */
217 val = readl(reg) | PROG_DONE;
218 writel(val, reg);
219 readl(reg);
220 }
221
222 reg = (void __iomem *) xhci->cap_regs + 0x80a4;
223 val = readl(reg);
224 writel(val | BIT(28), reg);
225 readl(reg);
226}
227
228#ifdef CONFIG_ACPI 183#ifdef CONFIG_ACPI
229static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) 184static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
230{ 185{
@@ -345,6 +300,51 @@ static void xhci_pci_remove(struct pci_dev *dev)
345} 300}
346 301
347#ifdef CONFIG_PM 302#ifdef CONFIG_PM
303/*
304 * In some Intel xHCI controllers, in order to get D3 working,
305 * through a vendor specific SSIC CONFIG register at offset 0x883c,
306 * SSIC PORT need to be marked as "unused" before putting xHCI
307 * into D3. After D3 exit, the SSIC port need to be marked as "used".
308 * Without this change, xHCI might not enter D3 state.
309 * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
310 * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
311 */
312static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
313{
314 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
315 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
316 u32 val;
317 void __iomem *reg;
318
319 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
320 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
321
322 reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
323
324 /* Notify SSIC that SSIC profile programming is not done */
325 val = readl(reg) & ~PROG_DONE;
326 writel(val, reg);
327
328 /* Mark SSIC port as unused(suspend) or used(resume) */
329 val = readl(reg);
330 if (suspend)
331 val |= SSIC_PORT_UNUSED;
332 else
333 val &= ~SSIC_PORT_UNUSED;
334 writel(val, reg);
335
336 /* Notify SSIC that SSIC profile programming is done */
337 val = readl(reg) | PROG_DONE;
338 writel(val, reg);
339 readl(reg);
340 }
341
342 reg = (void __iomem *) xhci->cap_regs + 0x80a4;
343 val = readl(reg);
344 writel(val | BIT(28), reg);
345 readl(reg);
346}
347
348static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) 348static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
349{ 349{
350 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 350 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a47a1e897086..43291f93afeb 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -302,6 +302,15 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
302 ret = xhci_handshake(&xhci->op_regs->cmd_ring, 302 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
303 CMD_RING_RUNNING, 0, 5 * 1000 * 1000); 303 CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
304 if (ret < 0) { 304 if (ret < 0) {
305 /* we are about to kill xhci, give it one more chance */
306 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
307 &xhci->op_regs->cmd_ring);
308 udelay(1000);
309 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
310 CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
311 if (ret == 0)
312 return 0;
313
305 xhci_err(xhci, "Stopped the command ring failed, " 314 xhci_err(xhci, "Stopped the command ring failed, "
306 "maybe the host is dead\n"); 315 "maybe the host is dead\n");
307 xhci->xhc_state |= XHCI_STATE_DYING; 316 xhci->xhc_state |= XHCI_STATE_DYING;
@@ -3461,8 +3470,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3461 if (start_cycle == 0) 3470 if (start_cycle == 0)
3462 field |= 0x1; 3471 field |= 0x1;
3463 3472
3464 /* xHCI 1.0 6.4.1.2.1: Transfer Type field */ 3473 /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
3465 if (xhci->hci_version == 0x100) { 3474 if (xhci->hci_version >= 0x100) {
3466 if (urb->transfer_buffer_length > 0) { 3475 if (urb->transfer_buffer_length > 0) {
3467 if (setup->bRequestType & USB_DIR_IN) 3476 if (setup->bRequestType & USB_DIR_IN)
3468 field |= TRB_TX_TYPE(TRB_DATA_IN); 3477 field |= TRB_TX_TYPE(TRB_DATA_IN);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6b0f4a47e402..9957bd96d4bc 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -146,7 +146,8 @@ static int xhci_start(struct xhci_hcd *xhci)
146 "waited %u microseconds.\n", 146 "waited %u microseconds.\n",
147 XHCI_MAX_HALT_USEC); 147 XHCI_MAX_HALT_USEC);
148 if (!ret) 148 if (!ret)
149 xhci->xhc_state &= ~XHCI_STATE_HALTED; 149 xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
150
150 return ret; 151 return ret;
151} 152}
152 153
@@ -654,15 +655,6 @@ int xhci_run(struct usb_hcd *hcd)
654} 655}
655EXPORT_SYMBOL_GPL(xhci_run); 656EXPORT_SYMBOL_GPL(xhci_run);
656 657
657static void xhci_only_stop_hcd(struct usb_hcd *hcd)
658{
659 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
660
661 spin_lock_irq(&xhci->lock);
662 xhci_halt(xhci);
663 spin_unlock_irq(&xhci->lock);
664}
665
666/* 658/*
667 * Stop xHCI driver. 659 * Stop xHCI driver.
668 * 660 *
@@ -677,12 +669,14 @@ void xhci_stop(struct usb_hcd *hcd)
677 u32 temp; 669 u32 temp;
678 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 670 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
679 671
680 if (!usb_hcd_is_primary_hcd(hcd)) { 672 if (xhci->xhc_state & XHCI_STATE_HALTED)
681 xhci_only_stop_hcd(xhci->shared_hcd);
682 return; 673 return;
683 }
684 674
675 mutex_lock(&xhci->mutex);
685 spin_lock_irq(&xhci->lock); 676 spin_lock_irq(&xhci->lock);
677 xhci->xhc_state |= XHCI_STATE_HALTED;
678 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
679
686 /* Make sure the xHC is halted for a USB3 roothub 680 /* Make sure the xHC is halted for a USB3 roothub
687 * (xhci_stop() could be called as part of failed init). 681 * (xhci_stop() could be called as part of failed init).
688 */ 682 */
@@ -717,6 +711,7 @@ void xhci_stop(struct usb_hcd *hcd)
717 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 711 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
718 "xhci_stop completed - status = %x", 712 "xhci_stop completed - status = %x",
719 readl(&xhci->op_regs->status)); 713 readl(&xhci->op_regs->status));
714 mutex_unlock(&xhci->mutex);
720} 715}
721 716
722/* 717/*
@@ -3793,6 +3788,9 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3793 3788
3794 mutex_lock(&xhci->mutex); 3789 mutex_lock(&xhci->mutex);
3795 3790
3791 if (xhci->xhc_state) /* dying or halted */
3792 goto out;
3793
3796 if (!udev->slot_id) { 3794 if (!udev->slot_id) {
3797 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3795 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3798 "Bad Slot ID %d", udev->slot_id); 3796 "Bad Slot ID %d", udev->slot_id);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 514a6cdaeff6..4a518ff12310 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1051,6 +1051,7 @@ void musb_start(struct musb *musb)
1051 * (c) peripheral initiates, using SRP 1051 * (c) peripheral initiates, using SRP
1052 */ 1052 */
1053 if (musb->port_mode != MUSB_PORT_MODE_HOST && 1053 if (musb->port_mode != MUSB_PORT_MODE_HOST &&
1054 musb->xceiv->otg->state != OTG_STATE_A_WAIT_BCON &&
1054 (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) { 1055 (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
1055 musb->is_active = 1; 1056 musb->is_active = 1;
1056 } else { 1057 } else {
@@ -2448,6 +2449,9 @@ static int musb_suspend(struct device *dev)
2448 struct musb *musb = dev_to_musb(dev); 2449 struct musb *musb = dev_to_musb(dev);
2449 unsigned long flags; 2450 unsigned long flags;
2450 2451
2452 musb_platform_disable(musb);
2453 musb_generic_disable(musb);
2454
2451 spin_lock_irqsave(&musb->lock, flags); 2455 spin_lock_irqsave(&musb->lock, flags);
2452 2456
2453 if (is_peripheral_active(musb)) { 2457 if (is_peripheral_active(musb)) {
@@ -2501,6 +2505,9 @@ static int musb_resume(struct device *dev)
2501 pm_runtime_disable(dev); 2505 pm_runtime_disable(dev);
2502 pm_runtime_set_active(dev); 2506 pm_runtime_set_active(dev);
2503 pm_runtime_enable(dev); 2507 pm_runtime_enable(dev);
2508
2509 musb_start(musb);
2510
2504 return 0; 2511 return 0;
2505} 2512}
2506 2513
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index d07cafb7d5f5..e499b862a946 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -551,6 +551,9 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
551 } else { 551 } else {
552 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE); 552 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
553 553
554 /* delay to drain to cppi dma pipeline for isoch */
555 udelay(250);
556
554 csr = musb_readw(epio, MUSB_RXCSR); 557 csr = musb_readw(epio, MUSB_RXCSR);
555 csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB); 558 csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
556 musb_writew(epio, MUSB_RXCSR, csr); 559 musb_writew(epio, MUSB_RXCSR, csr);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index a0cfead6150f..84512d1d5eee 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -225,8 +225,11 @@ static void dsps_musb_enable(struct musb *musb)
225 225
226 dsps_writel(reg_base, wrp->epintr_set, epmask); 226 dsps_writel(reg_base, wrp->epintr_set, epmask);
227 dsps_writel(reg_base, wrp->coreintr_set, coremask); 227 dsps_writel(reg_base, wrp->coreintr_set, coremask);
228 /* start polling for ID change. */ 228 /* start polling for ID change in dual-role idle mode */
229 mod_timer(&glue->timer, jiffies + msecs_to_jiffies(wrp->poll_timeout)); 229 if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
230 musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
231 mod_timer(&glue->timer, jiffies +
232 msecs_to_jiffies(wrp->poll_timeout));
230 dsps_musb_try_idle(musb, 0); 233 dsps_musb_try_idle(musb, 0);
231} 234}
232 235
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index 39168fe9b406..b2685e75a683 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -379,6 +379,8 @@ static const struct of_device_id ux500_match[] = {
379 {} 379 {}
380}; 380};
381 381
382MODULE_DEVICE_TABLE(of, ux500_match);
383
382static struct platform_driver ux500_driver = { 384static struct platform_driver ux500_driver = {
383 .probe = ux500_probe, 385 .probe = ux500_probe,
384 .remove = ux500_remove, 386 .remove = ux500_remove,
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 7d3beee2a587..173132416170 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -155,7 +155,7 @@ config USB_MSM_OTG
155config USB_QCOM_8X16_PHY 155config USB_QCOM_8X16_PHY
156 tristate "Qualcomm APQ8016/MSM8916 on-chip USB PHY controller support" 156 tristate "Qualcomm APQ8016/MSM8916 on-chip USB PHY controller support"
157 depends on ARCH_QCOM || COMPILE_TEST 157 depends on ARCH_QCOM || COMPILE_TEST
158 depends on RESET_CONTROLLER 158 depends on RESET_CONTROLLER && EXTCON
159 select USB_PHY 159 select USB_PHY
160 select USB_ULPI_VIEWPORT 160 select USB_ULPI_VIEWPORT
161 help 161 help
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index ec6ecd03269c..5320cb8642cb 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -232,7 +232,8 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
232 clk_rate = pdata->clk_rate; 232 clk_rate = pdata->clk_rate;
233 needs_vcc = pdata->needs_vcc; 233 needs_vcc = pdata->needs_vcc;
234 if (gpio_is_valid(pdata->gpio_reset)) { 234 if (gpio_is_valid(pdata->gpio_reset)) {
235 err = devm_gpio_request_one(dev, pdata->gpio_reset, 0, 235 err = devm_gpio_request_one(dev, pdata->gpio_reset,
236 GPIOF_ACTIVE_LOW,
236 dev_name(dev)); 237 dev_name(dev));
237 if (!err) 238 if (!err)
238 nop->gpiod_reset = 239 nop->gpiod_reset =
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index 8a55b37d1a02..db68156568e6 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -31,6 +31,7 @@ static const struct i2c_device_id isp1301_id[] = {
31 { "isp1301", 0 }, 31 { "isp1301", 0 },
32 { } 32 { }
33}; 33};
34MODULE_DEVICE_TABLE(i2c, isp1301_id);
34 35
35static struct i2c_client *isp1301_i2c_client; 36static struct i2c_client *isp1301_i2c_client;
36 37
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 6d1941a2396a..6956c4f62216 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -278,6 +278,10 @@ static void option_instat_callback(struct urb *urb);
278#define ZTE_PRODUCT_MF622 0x0001 278#define ZTE_PRODUCT_MF622 0x0001
279#define ZTE_PRODUCT_MF628 0x0015 279#define ZTE_PRODUCT_MF628 0x0015
280#define ZTE_PRODUCT_MF626 0x0031 280#define ZTE_PRODUCT_MF626 0x0031
281#define ZTE_PRODUCT_ZM8620_X 0x0396
282#define ZTE_PRODUCT_ME3620_MBIM 0x0426
283#define ZTE_PRODUCT_ME3620_X 0x1432
284#define ZTE_PRODUCT_ME3620_L 0x1433
281#define ZTE_PRODUCT_AC2726 0xfff1 285#define ZTE_PRODUCT_AC2726 0xfff1
282#define ZTE_PRODUCT_MG880 0xfffd 286#define ZTE_PRODUCT_MG880 0xfffd
283#define ZTE_PRODUCT_CDMA_TECH 0xfffe 287#define ZTE_PRODUCT_CDMA_TECH 0xfffe
@@ -544,6 +548,18 @@ static const struct option_blacklist_info zte_mc2716_z_blacklist = {
544 .sendsetup = BIT(1) | BIT(2) | BIT(3), 548 .sendsetup = BIT(1) | BIT(2) | BIT(3),
545}; 549};
546 550
551static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
552 .reserved = BIT(2) | BIT(3) | BIT(4),
553};
554
555static const struct option_blacklist_info zte_me3620_xl_blacklist = {
556 .reserved = BIT(3) | BIT(4) | BIT(5),
557};
558
559static const struct option_blacklist_info zte_zm8620_x_blacklist = {
560 .reserved = BIT(3) | BIT(4) | BIT(5),
561};
562
547static const struct option_blacklist_info huawei_cdc12_blacklist = { 563static const struct option_blacklist_info huawei_cdc12_blacklist = {
548 .reserved = BIT(1) | BIT(2), 564 .reserved = BIT(1) | BIT(2),
549}; 565};
@@ -1591,6 +1607,14 @@ static const struct usb_device_id option_ids[] = {
1591 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, 1607 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
1592 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), 1608 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
1593 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, 1609 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
1610 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
1611 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
1612 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
1613 .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
1614 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
1615 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
1616 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
1617 .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
1594 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, 1618 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
1595 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, 1619 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
1596 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, 1620 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 6c3734d2b45a..d3ea90bef84d 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -80,6 +80,8 @@ static int whiteheat_firmware_download(struct usb_serial *serial,
80static int whiteheat_firmware_attach(struct usb_serial *serial); 80static int whiteheat_firmware_attach(struct usb_serial *serial);
81 81
82/* function prototypes for the Connect Tech WhiteHEAT serial converter */ 82/* function prototypes for the Connect Tech WhiteHEAT serial converter */
83static int whiteheat_probe(struct usb_serial *serial,
84 const struct usb_device_id *id);
83static int whiteheat_attach(struct usb_serial *serial); 85static int whiteheat_attach(struct usb_serial *serial);
84static void whiteheat_release(struct usb_serial *serial); 86static void whiteheat_release(struct usb_serial *serial);
85static int whiteheat_port_probe(struct usb_serial_port *port); 87static int whiteheat_port_probe(struct usb_serial_port *port);
@@ -116,6 +118,7 @@ static struct usb_serial_driver whiteheat_device = {
116 .description = "Connect Tech - WhiteHEAT", 118 .description = "Connect Tech - WhiteHEAT",
117 .id_table = id_table_std, 119 .id_table = id_table_std,
118 .num_ports = 4, 120 .num_ports = 4,
121 .probe = whiteheat_probe,
119 .attach = whiteheat_attach, 122 .attach = whiteheat_attach,
120 .release = whiteheat_release, 123 .release = whiteheat_release,
121 .port_probe = whiteheat_port_probe, 124 .port_probe = whiteheat_port_probe,
@@ -217,6 +220,34 @@ static int whiteheat_firmware_attach(struct usb_serial *serial)
217/***************************************************************************** 220/*****************************************************************************
218 * Connect Tech's White Heat serial driver functions 221 * Connect Tech's White Heat serial driver functions
219 *****************************************************************************/ 222 *****************************************************************************/
223
224static int whiteheat_probe(struct usb_serial *serial,
225 const struct usb_device_id *id)
226{
227 struct usb_host_interface *iface_desc;
228 struct usb_endpoint_descriptor *endpoint;
229 size_t num_bulk_in = 0;
230 size_t num_bulk_out = 0;
231 size_t min_num_bulk;
232 unsigned int i;
233
234 iface_desc = serial->interface->cur_altsetting;
235
236 for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
237 endpoint = &iface_desc->endpoint[i].desc;
238 if (usb_endpoint_is_bulk_in(endpoint))
239 ++num_bulk_in;
240 if (usb_endpoint_is_bulk_out(endpoint))
241 ++num_bulk_out;
242 }
243
244 min_num_bulk = COMMAND_PORT + 1;
245 if (num_bulk_in < min_num_bulk || num_bulk_out < min_num_bulk)
246 return -ENODEV;
247
248 return 0;
249}
250
220static int whiteheat_attach(struct usb_serial *serial) 251static int whiteheat_attach(struct usb_serial *serial)
221{ 252{
222 struct usb_serial_port *command_port; 253 struct usb_serial_port *command_port;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index c68edc16aa54..79e1aa1b0959 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -817,8 +817,9 @@ config ITCO_WDT
817 tristate "Intel TCO Timer/Watchdog" 817 tristate "Intel TCO Timer/Watchdog"
818 depends on (X86 || IA64) && PCI 818 depends on (X86 || IA64) && PCI
819 select WATCHDOG_CORE 819 select WATCHDOG_CORE
820 depends on I2C || I2C=n
820 select LPC_ICH if !EXPERT 821 select LPC_ICH if !EXPERT
821 select I2C_I801 if !EXPERT 822 select I2C_I801 if !EXPERT && I2C
822 ---help--- 823 ---help---
823 Hardware driver for the intel TCO timer based watchdog devices. 824 Hardware driver for the intel TCO timer based watchdog devices.
824 These drivers are included in the Intel 82801 I/O Controller 825 These drivers are included in the Intel 82801 I/O Controller
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index 66c3e656a616..8a5ce5b5a0b6 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -36,6 +36,13 @@
36#define PM_RSTC_WRCFG_FULL_RESET 0x00000020 36#define PM_RSTC_WRCFG_FULL_RESET 0x00000020
37#define PM_RSTC_RESET 0x00000102 37#define PM_RSTC_RESET 0x00000102
38 38
39/*
40 * The Raspberry Pi firmware uses the RSTS register to know which partiton
41 * to boot from. The partiton value is spread into bits 0, 2, 4, 6, 8, 10.
42 * Partiton 63 is a special partition used by the firmware to indicate halt.
43 */
44#define PM_RSTS_RASPBERRYPI_HALT 0x555
45
39#define SECS_TO_WDOG_TICKS(x) ((x) << 16) 46#define SECS_TO_WDOG_TICKS(x) ((x) << 16)
40#define WDOG_TICKS_TO_SECS(x) ((x) >> 16) 47#define WDOG_TICKS_TO_SECS(x) ((x) >> 16)
41 48
@@ -151,8 +158,7 @@ static void bcm2835_power_off(void)
151 * hard reset. 158 * hard reset.
152 */ 159 */
153 val = readl_relaxed(wdt->base + PM_RSTS); 160 val = readl_relaxed(wdt->base + PM_RSTS);
154 val &= PM_RSTC_WRCFG_CLR; 161 val |= PM_PASSWORD | PM_RSTS_RASPBERRYPI_HALT;
155 val |= PM_PASSWORD | PM_RSTS_HADWRH_SET;
156 writel_relaxed(val, wdt->base + PM_RSTS); 162 writel_relaxed(val, wdt->base + PM_RSTS);
157 163
158 /* Continue with normal reset mechanism */ 164 /* Continue with normal reset mechanism */
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index cc1bdfc2ff71..006e2348022c 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -303,6 +303,7 @@ static const struct of_device_id gef_wdt_ids[] = {
303 }, 303 },
304 {}, 304 {},
305}; 305};
306MODULE_DEVICE_TABLE(of, gef_wdt_ids);
306 307
307static struct platform_driver gef_wdt_driver = { 308static struct platform_driver gef_wdt_driver = {
308 .driver = { 309 .driver = {
diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c
index 69013007dc47..098fa9c34d6d 100644
--- a/drivers/watchdog/mena21_wdt.c
+++ b/drivers/watchdog/mena21_wdt.c
@@ -253,6 +253,7 @@ static const struct of_device_id a21_wdt_ids[] = {
253 { .compatible = "men,a021-wdt" }, 253 { .compatible = "men,a021-wdt" },
254 { }, 254 { },
255}; 255};
256MODULE_DEVICE_TABLE(of, a21_wdt_ids);
256 257
257static struct platform_driver a21_wdt_driver = { 258static struct platform_driver a21_wdt_driver = {
258 .probe = a21_wdt_probe, 259 .probe = a21_wdt_probe,
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c
index 2789da2c0515..60b0605bd7e6 100644
--- a/drivers/watchdog/moxart_wdt.c
+++ b/drivers/watchdog/moxart_wdt.c
@@ -168,6 +168,7 @@ static const struct of_device_id moxart_watchdog_match[] = {
168 { .compatible = "moxa,moxart-watchdog" }, 168 { .compatible = "moxa,moxart-watchdog" },
169 { }, 169 { },
170}; 170};
171MODULE_DEVICE_TABLE(of, moxart_watchdog_match);
171 172
172static struct platform_driver moxart_wdt_driver = { 173static struct platform_driver moxart_wdt_driver = {
173 .probe = moxart_wdt_probe, 174 .probe = moxart_wdt_probe,