aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_lpss.c2
-rw-r--r--drivers/acpi/acpica/psargs.c9
-rw-r--r--drivers/acpi/nfit.c105
-rw-r--r--drivers/acpi/pci_irq.c17
-rw-r--r--drivers/acpi/pci_link.c128
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/ata/ahci.c69
-rw-r--r--drivers/ata/ahci.h6
-rw-r--r--drivers/ata/ahci_brcmstb.c1
-rw-r--r--drivers/ata/ahci_xgene.c85
-rw-r--r--drivers/ata/libahci.c90
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/ata/libata-scsi.c11
-rw-r--r--drivers/ata/libata-sff.c35
-rw-r--r--drivers/ata/pata_rb532_cf.c11
-rw-r--r--drivers/base/component.c49
-rw-r--r--drivers/base/platform-msi.c2
-rw-r--r--drivers/base/platform.c13
-rw-r--r--drivers/base/power/common.c2
-rw-r--r--drivers/base/power/domain.c54
-rw-r--r--drivers/base/property.c8
-rw-r--r--drivers/base/regmap/regmap-mmio.c16
-rw-r--r--drivers/block/floppy.c67
-rw-r--r--drivers/block/null_blk.c8
-rw-r--r--drivers/block/xen-blkfront.c74
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/bus/vexpress-config.c4
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hw_random/Kconfig1
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c3
-rw-r--r--drivers/char/random.c22
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/clk-gpio.c2
-rw-r--r--drivers/clk/clk-scpi.c2
-rw-r--r--drivers/clk/mvebu/dove-divider.c2
-rw-r--r--drivers/clk/qcom/gcc-apq8084.c1
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c1
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c1
-rw-r--r--drivers/clk/qcom/lcc-msm8960.c1
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c1
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c2
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c26
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c26
-rw-r--r--drivers/clk/tegra/clk-emc.c6
-rw-r--r--drivers/clk/tegra/clk-id.h1
-rw-r--r--drivers/clk/tegra/clk-pll.c50
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c5
-rw-r--r--drivers/clk/tegra/clk-tegra-super-gen4.c6
-rw-r--r--drivers/clk/tegra/clk-tegra210.c132
-rw-r--r--drivers/clk/ti/dpll3xxx.c3
-rw-r--r--drivers/clk/versatile/clk-icst.c3
-rw-r--r--drivers/clocksource/Kconfig12
-rw-r--r--drivers/clocksource/tcb_clksrc.c3
-rw-r--r--drivers/cpufreq/Kconfig1
-rw-r--r--drivers/cpufreq/Kconfig.arm4
-rw-r--r--drivers/cpufreq/cpufreq-dt.c15
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cpufreq/cpufreq_governor.c11
-rw-r--r--drivers/cpufreq/mt8173-cpufreq.c1
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c2
-rw-r--r--drivers/cpuidle/coupled.c1
-rw-r--r--drivers/cpuidle/cpuidle.c2
-rw-r--r--drivers/crypto/atmel-aes.c16
-rw-r--r--drivers/crypto/atmel-sha.c23
-rw-r--r--drivers/crypto/caam/ctrl.c4
-rw-r--r--drivers/crypto/marvell/cesa.c2
-rw-r--r--drivers/devfreq/tegra-devfreq.c2
-rw-r--r--drivers/dma/at_xdmac.c42
-rw-r--r--drivers/dma/dw/core.c15
-rw-r--r--drivers/dma/dw/pci.c4
-rw-r--r--drivers/dma/edma.c41
-rw-r--r--drivers/dma/fsldma.c2
-rw-r--r--drivers/dma/ioat/dma.c34
-rw-r--r--drivers/dma/pxa_dma.c8
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/firmware/efi/efivars.c35
-rw-r--r--drivers/firmware/efi/vars.c144
-rw-r--r--drivers/gpio/gpio-altera.c5
-rw-r--r--drivers/gpio/gpio-davinci.c7
-rw-r--r--drivers/gpio/gpio-rcar.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c157
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_dpm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h32
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c41
-rw-r--r--drivers/gpu/drm/ast/ast_main.c2
-rw-r--r--drivers/gpu/drm/drm_atomic.c44
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c26
-rw-r--r--drivers/gpu/drm/drm_crtc.c49
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c242
-rw-r--r--drivers/gpu/drm/drm_irq.c73
-rw-r--r--drivers/gpu/drm/etnaviv/common.xml.h59
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c36
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c189
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h9
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h26
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c55
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c32
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c72
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c6
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c48
-rw-r--r--drivers/gpu/drm/i2c/adv7511.h12
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c10
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c28
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h17
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c4
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c13
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c115
-rw-r--r--drivers/gpu/drm/i915/intel_display.c119
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c32
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c45
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c13
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c21
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c14
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c21
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c153
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c20
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c12
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c10
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/vce_v1_0.c12
-rw-r--r--drivers/gpu/drm/rockchip/Makefile8
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c24
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h11
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h13
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c65
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c22
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c51
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c2
-rw-r--r--drivers/gpu/host1x/bus.c2
-rw-r--r--drivers/gpu/host1x/dev.c7
-rw-r--r--drivers/gpu/host1x/dev.h1
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c31
-rw-r--r--drivers/hwmon/ads1015.c2
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c11
-rw-r--r--drivers/hwmon/fam15h_power.c10
-rw-r--r--drivers/hwmon/gpio-fan.c7
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c4
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c3
-rw-r--r--drivers/i2c/busses/i2c-i801.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c4
-rw-r--r--drivers/i2c/busses/i2c-piix4.c50
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c2
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c2
-rw-r--r--drivers/iio/accel/Kconfig1
-rw-r--r--drivers/iio/adc/Kconfig3
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c2
-rw-r--r--drivers/iio/dac/mcp4725.c1
-rw-r--r--drivers/iio/humidity/dht11.c8
-rw-r--r--drivers/iio/imu/adis_buffer.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig2
-rw-r--r--drivers/iio/inkern.c2
-rw-r--r--drivers/iio/light/acpi-als.c6
-rw-r--r--drivers/iio/light/ltr501.c2
-rw-r--r--drivers/iio/pressure/mpl115.c2
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c2
-rw-r--r--drivers/infiniband/core/device.c1
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/sysfs.c7
-rw-r--r--drivers/infiniband/core/ud_header.c7
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c9
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c63
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c7
-rw-r--r--drivers/infiniband/hw/mlx5/main.c8
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c32
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c41
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c16
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c24
-rw-r--r--drivers/input/joystick/xpad.c1
-rw-r--r--drivers/input/keyboard/adp5589-keys.c7
-rw-r--r--drivers/input/keyboard/cap11xx.c8
-rw-r--r--drivers/input/misc/Kconfig2
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c2
-rw-r--r--drivers/input/mouse/vmmouse.c13
-rw-r--r--drivers/input/serio/serio.c2
-rw-r--r--drivers/input/touchscreen/colibri-vf50-ts.c1
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c18
-rw-r--r--drivers/iommu/amd_iommu.c6
-rw-r--r--drivers/iommu/amd_iommu_init.c63
-rw-r--r--drivers/iommu/dmar.c7
-rw-r--r--drivers/iommu/intel-iommu.c6
-rw-r--r--drivers/iommu/intel-svm.c37
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/iommu/io-pgtable-arm.c1
-rw-r--r--drivers/irqchip/Kconfig1
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c41
-rw-r--r--drivers/irqchip/irq-gic.c13
-rw-r--r--drivers/irqchip/irq-mxs.c1
-rw-r--r--drivers/irqchip/irq-s3c24xx.c2
-rw-r--r--drivers/irqchip/irq-sun4i.c1
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c9
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/lightnvm/core.c25
-rw-r--r--drivers/lightnvm/rrpc.c4
-rw-r--r--drivers/lightnvm/rrpc.h5
-rw-r--r--drivers/mailbox/Kconfig1
-rw-r--r--drivers/mailbox/pcc.c8
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/md/faulty.c18
-rw-r--r--drivers/md/md-cluster.c6
-rw-r--r--drivers/md/raid1.c21
-rw-r--r--drivers/md/raid10.c20
-rw-r--r--drivers/md/raid5.c45
-rw-r--r--drivers/mfd/db8500-prcmu.c3
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/mei/main.c6
-rw-r--r--drivers/mmc/card/block.c7
-rw-r--r--drivers/mmc/host/mmc_spi.c15
-rw-r--r--drivers/mmc/host/omap_hsmmc.c2
-rw-r--r--drivers/mmc/host/pxamci.c37
-rw-r--r--drivers/mmc/host/sdhci-acpi.c30
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c1
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c31
-rw-r--r--drivers/mmc/host/sdhci.c5
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mtd/ubi/upd.c2
-rw-r--r--drivers/net/bonding/bond_main.c40
-rw-r--r--drivers/net/can/spi/mcp251x.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c14
-rw-r--r--drivers/net/can/usb/gs_usb.c24
-rw-r--r--drivers/net/dsa/mv88e6352.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx.c32
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c1
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c1
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c64
-rw-r--r--drivers/net/ethernet/amd/lance.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c12
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h1
-rw-r--r--drivers/net/ethernet/arc/emac_main.c74
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c18
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h36
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c299
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c81
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c46
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c27
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c8
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h9
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c18
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c34
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c19
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c34
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c20
-rw-r--r--drivers/net/ethernet/ethoc.c1
-rw-r--r--drivers/net/ethernet/ezchip/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/fec.h44
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c133
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c4
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c17
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c47
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c15
-rw-r--r--drivers/net/ethernet/hp/hp100.c18
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c5
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c62
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c4
-rw-r--r--drivers/net/ethernet/jme.c26
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c231
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c126
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h152
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c169
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c154
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c50
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.h4
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c31
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c32
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c20
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c10
-rw-r--r--drivers/net/ethernet/rocker/rocker.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c9
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c16
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c45
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c12
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c12
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c105
-rw-r--r--drivers/net/fddi/defxx.c8
-rw-r--r--drivers/net/geneve.c56
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c33
-rw-r--r--drivers/net/hyperv/netvsc_drv.c70
-rw-r--r--drivers/net/irda/bfin_sir.h3
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/bcm7xxx.c43
-rw-r--r--drivers/net/phy/dp83640.c17
-rw-r--r--drivers/net/phy/marvell.c15
-rw-r--r--drivers/net/phy/micrel.c28
-rw-r--r--drivers/net/phy/phy.c46
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/smsc.c54
-rw-r--r--drivers/net/ppp/ppp_generic.c11
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/ppp/pptp.c34
-rw-r--r--drivers/net/usb/Kconfig10
-rw-r--r--drivers/net/usb/Makefile2
-rw-r--r--drivers/net/usb/ax88172a.c1
-rw-r--r--drivers/net/usb/cdc_ncm.c26
-rw-r--r--drivers/net/usb/lan78xx.c132
-rw-r--r--drivers/net/usb/qmi_wwan.c8
-rw-r--r--drivers/net/usb/usbnet.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c73
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vrf.c13
-rw-r--r--drivers/net/vxlan.c89
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c188
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00config.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/io.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h4
-rw-r--r--drivers/net/xen-netfront.c15
-rw-r--r--drivers/nvdimm/bus.c20
-rw-r--r--drivers/nvdimm/namespace_devs.c8
-rw-r--r--drivers/nvdimm/pfn_devs.c4
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/nvme/host/Kconfig5
-rw-r--r--drivers/nvme/host/core.c112
-rw-r--r--drivers/nvme/host/lightnvm.c12
-rw-r--r--drivers/nvme/host/nvme.h12
-rw-r--r--drivers/nvme/host/pci.c160
-rw-r--r--drivers/nvmem/core.c6
-rw-r--r--drivers/nvmem/qfprom.c1
-rw-r--r--drivers/of/irq.c27
-rw-r--r--drivers/of/of_mdio.c39
-rw-r--r--drivers/pci/host/Kconfig1
-rw-r--r--drivers/pci/host/pci-keystone-dw.c11
-rw-r--r--drivers/pci/host/pci-layerscape.c21
-rw-r--r--drivers/pci/host/pcie-iproc.c29
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c4
-rw-r--r--drivers/pci/pci.c4
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c4
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h1
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c2
-rw-r--r--drivers/pci/xen-pcifront.c10
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/phy-core.c16
-rw-r--r--drivers/phy/phy-twl4030-usb.c14
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c9
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c5
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa2xx.c1
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c48
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c1
-rw-r--r--drivers/platform/x86/intel-hid.c3
-rw-r--r--drivers/platform/x86/intel_scu_ipcutil.c2
-rw-r--r--drivers/pnp/quirks.c1
-rw-r--r--drivers/power/bq27xxx_battery_i2c.c37
-rw-r--r--drivers/ptp/ptp_ixp46x.c7
-rw-r--r--drivers/s390/block/dasd.c1
-rw-r--r--drivers/s390/block/dasd_alias.c23
-rw-r--r--drivers/s390/block/dasd_diag.c9
-rw-r--r--drivers/s390/cio/chp.c21
-rw-r--r--drivers/s390/cio/chp.h2
-rw-r--r--drivers/s390/cio/chsc.c43
-rw-r--r--drivers/s390/crypto/zcrypt_error.h15
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c9
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c20
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c4
-rw-r--r--drivers/scsi/hisi_sas/Kconfig2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c9
-rw-r--r--drivers/scsi/ipr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c68
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h59
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c16
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c76
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_sysfs.c6
-rw-r--r--drivers/scsi/sd.c13
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sr.c4
-rw-r--r--drivers/scsi/storvsc_drv.c16
-rw-r--r--drivers/sh/pm_runtime.c2
-rw-r--r--drivers/spi/spi-atmel.c1
-rw-r--r--drivers/spi/spi-bcm2835aux.c4
-rw-r--r--drivers/spi/spi-fsl-espi.c4
-rw-r--r--drivers/spi/spi-imx.c25
-rw-r--r--drivers/spi/spi-loopback-test.c1
-rw-r--r--drivers/spi/spi-omap2-mcspi.c3
-rw-r--r--drivers/spi/spi-rockchip.c3
-rw-r--r--drivers/ssb/Kconfig1
-rw-r--r--drivers/ssb/main.c7
-rw-r--r--drivers/staging/iio/adc/Kconfig1
-rw-r--r--drivers/staging/iio/meter/ade7753.c12
-rw-r--r--drivers/staging/panel/panel.c34
-rw-r--r--drivers/staging/rdma/Kconfig6
-rw-r--r--drivers/staging/rdma/Makefile3
-rw-r--r--drivers/staging/rdma/amso1100/Kbuild6
-rw-r--r--drivers/staging/rdma/amso1100/Kconfig15
-rw-r--r--drivers/staging/rdma/amso1100/TODO4
-rw-r--r--drivers/staging/rdma/amso1100/c2.c1240
-rw-r--r--drivers/staging/rdma/amso1100/c2.h547
-rw-r--r--drivers/staging/rdma/amso1100/c2_ae.c327
-rw-r--r--drivers/staging/rdma/amso1100/c2_ae.h108
-rw-r--r--drivers/staging/rdma/amso1100/c2_alloc.c142
-rw-r--r--drivers/staging/rdma/amso1100/c2_cm.c458
-rw-r--r--drivers/staging/rdma/amso1100/c2_cq.c437
-rw-r--r--drivers/staging/rdma/amso1100/c2_intr.c219
-rw-r--r--drivers/staging/rdma/amso1100/c2_mm.c377
-rw-r--r--drivers/staging/rdma/amso1100/c2_mq.c175
-rw-r--r--drivers/staging/rdma/amso1100/c2_mq.h106
-rw-r--r--drivers/staging/rdma/amso1100/c2_pd.c90
-rw-r--r--drivers/staging/rdma/amso1100/c2_provider.c862
-rw-r--r--drivers/staging/rdma/amso1100/c2_provider.h182
-rw-r--r--drivers/staging/rdma/amso1100/c2_qp.c1024
-rw-r--r--drivers/staging/rdma/amso1100/c2_rnic.c652
-rw-r--r--drivers/staging/rdma/amso1100/c2_status.h158
-rw-r--r--drivers/staging/rdma/amso1100/c2_user.h82
-rw-r--r--drivers/staging/rdma/amso1100/c2_vq.c260
-rw-r--r--drivers/staging/rdma/amso1100/c2_vq.h63
-rw-r--r--drivers/staging/rdma/amso1100/c2_wr.h1520
-rw-r--r--drivers/staging/rdma/ehca/Kconfig10
-rw-r--r--drivers/staging/rdma/ehca/Makefile16
-rw-r--r--drivers/staging/rdma/ehca/TODO4
-rw-r--r--drivers/staging/rdma/ehca/ehca_av.c279
-rw-r--r--drivers/staging/rdma/ehca/ehca_classes.h481
-rw-r--r--drivers/staging/rdma/ehca/ehca_classes_pSeries.h208
-rw-r--r--drivers/staging/rdma/ehca/ehca_cq.c397
-rw-r--r--drivers/staging/rdma/ehca/ehca_eq.c189
-rw-r--r--drivers/staging/rdma/ehca/ehca_hca.c414
-rw-r--r--drivers/staging/rdma/ehca/ehca_irq.c870
-rw-r--r--drivers/staging/rdma/ehca/ehca_irq.h77
-rw-r--r--drivers/staging/rdma/ehca/ehca_iverbs.h202
-rw-r--r--drivers/staging/rdma/ehca/ehca_main.c1118
-rw-r--r--drivers/staging/rdma/ehca/ehca_mcast.c131
-rw-r--r--drivers/staging/rdma/ehca/ehca_mrmw.c2202
-rw-r--r--drivers/staging/rdma/ehca/ehca_mrmw.h127
-rw-r--r--drivers/staging/rdma/ehca/ehca_pd.c123
-rw-r--r--drivers/staging/rdma/ehca/ehca_qes.h260
-rw-r--r--drivers/staging/rdma/ehca/ehca_qp.c2256
-rw-r--r--drivers/staging/rdma/ehca/ehca_reqs.c953
-rw-r--r--drivers/staging/rdma/ehca/ehca_sqp.c245
-rw-r--r--drivers/staging/rdma/ehca/ehca_tools.h155
-rw-r--r--drivers/staging/rdma/ehca/ehca_uverbs.c309
-rw-r--r--drivers/staging/rdma/ehca/hcp_if.c949
-rw-r--r--drivers/staging/rdma/ehca/hcp_if.h265
-rw-r--r--drivers/staging/rdma/ehca/hcp_phyp.c82
-rw-r--r--drivers/staging/rdma/ehca/hcp_phyp.h90
-rw-r--r--drivers/staging/rdma/ehca/hipz_fns.h68
-rw-r--r--drivers/staging/rdma/ehca/hipz_fns_core.h100
-rw-r--r--drivers/staging/rdma/ehca/hipz_hw.h414
-rw-r--r--drivers/staging/rdma/ehca/ipz_pt_fn.c289
-rw-r--r--drivers/staging/rdma/ehca/ipz_pt_fn.h289
-rw-r--r--drivers/staging/rdma/ipath/Kconfig16
-rw-r--r--drivers/staging/rdma/ipath/Makefile37
-rw-r--r--drivers/staging/rdma/ipath/TODO5
-rw-r--r--drivers/staging/rdma/ipath/ipath_common.h851
-rw-r--r--drivers/staging/rdma/ipath/ipath_cq.c483
-rw-r--r--drivers/staging/rdma/ipath/ipath_debug.h99
-rw-r--r--drivers/staging/rdma/ipath/ipath_diag.c551
-rw-r--r--drivers/staging/rdma/ipath/ipath_dma.c179
-rw-r--r--drivers/staging/rdma/ipath/ipath_driver.c2784
-rw-r--r--drivers/staging/rdma/ipath/ipath_eeprom.c1183
-rw-r--r--drivers/staging/rdma/ipath/ipath_file_ops.c2619
-rw-r--r--drivers/staging/rdma/ipath/ipath_fs.c415
-rw-r--r--drivers/staging/rdma/ipath/ipath_iba6110.c1939
-rw-r--r--drivers/staging/rdma/ipath/ipath_init_chip.c1062
-rw-r--r--drivers/staging/rdma/ipath/ipath_intr.c1271
-rw-r--r--drivers/staging/rdma/ipath/ipath_kernel.h1374
-rw-r--r--drivers/staging/rdma/ipath/ipath_keys.c270
-rw-r--r--drivers/staging/rdma/ipath/ipath_mad.c1521
-rw-r--r--drivers/staging/rdma/ipath/ipath_mmap.c174
-rw-r--r--drivers/staging/rdma/ipath/ipath_mr.c370
-rw-r--r--drivers/staging/rdma/ipath/ipath_qp.c1079
-rw-r--r--drivers/staging/rdma/ipath/ipath_rc.c1969
-rw-r--r--drivers/staging/rdma/ipath/ipath_registers.h512
-rw-r--r--drivers/staging/rdma/ipath/ipath_ruc.c733
-rw-r--r--drivers/staging/rdma/ipath/ipath_sdma.c818
-rw-r--r--drivers/staging/rdma/ipath/ipath_srq.c380
-rw-r--r--drivers/staging/rdma/ipath/ipath_stats.c347
-rw-r--r--drivers/staging/rdma/ipath/ipath_sysfs.c1237
-rw-r--r--drivers/staging/rdma/ipath/ipath_uc.c547
-rw-r--r--drivers/staging/rdma/ipath/ipath_ud.c579
-rw-r--r--drivers/staging/rdma/ipath/ipath_user_pages.c228
-rw-r--r--drivers/staging/rdma/ipath/ipath_user_sdma.c874
-rw-r--r--drivers/staging/rdma/ipath/ipath_user_sdma.h52
-rw-r--r--drivers/staging/rdma/ipath/ipath_verbs.c2376
-rw-r--r--drivers/staging/rdma/ipath/ipath_verbs.h941
-rw-r--r--drivers/staging/rdma/ipath/ipath_verbs_mcast.c363
-rw-r--r--drivers/staging/rdma/ipath/ipath_wc_ppc64.c49
-rw-r--r--drivers/staging/rdma/ipath/ipath_wc_x86_64.c144
-rw-r--r--drivers/staging/speakup/Kconfig2
-rw-r--r--drivers/staging/speakup/main.c21
-rw-r--r--drivers/staging/speakup/selection.c5
-rw-r--r--drivers/staging/speakup/serialio.c13
-rw-r--r--drivers/target/target_core_configfs.c2
-rw-r--r--drivers/target/target_core_device.c44
-rw-r--r--drivers/target/target_core_file.c29
-rw-r--r--drivers/target/target_core_iblock.c58
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_tmr.c139
-rw-r--r--drivers/target/target_core_transport.c327
-rw-r--r--drivers/target/target_core_user.c2
-rw-r--r--drivers/thermal/Kconfig6
-rw-r--r--drivers/thermal/cpu_cooling.c14
-rw-r--r--drivers/thermal/of-thermal.c18
-rw-r--r--drivers/thermal/rcar_thermal.c45
-rw-r--r--drivers/thermal/spear_thermal.c6
-rw-r--r--drivers/tty/n_tty.c7
-rw-r--r--drivers/tty/pty.c21
-rw-r--r--drivers/tty/serial/8250/8250_pci.c50
-rw-r--r--drivers/tty/serial/omap-serial.c10
-rw-r--r--drivers/tty/tty_io.c47
-rw-r--r--drivers/tty/tty_mutex.c13
-rw-r--r--drivers/tty/vt/vt.c1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_pci.c4
-rw-r--r--drivers/usb/chipidea/debug.c3
-rw-r--r--drivers/usb/chipidea/otg.c2
-rw-r--r--drivers/usb/class/cdc-acm.c14
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/dwc2/Kconfig1
-rw-r--r--drivers/usb/dwc2/core.c20
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c23
-rw-r--r--drivers/usb/dwc2/hcd_intr.c8
-rw-r--r--drivers/usb/dwc2/platform.c8
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/ep0.c5
-rw-r--r--drivers/usb/dwc3/gadget.c71
-rw-r--r--drivers/usb/gadget/legacy/inode.c7
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/udc/net2280.h15
-rw-r--r--drivers/usb/gadget/udc/udc-core.c3
-rw-r--r--drivers/usb/host/xhci-ext-caps.h4
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c16
-rw-r--r--drivers/usb/host/xhci-mtk.c23
-rw-r--r--drivers/usb/host/xhci-pci.c56
-rw-r--r--drivers/usb/host/xhci-plat.c3
-rw-r--r--drivers/usb/host/xhci-ring.c10
-rw-r--r--drivers/usb/host/xhci.c4
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/musb/musb_host.c8
-rw-r--r--drivers/usb/musb/ux500.c7
-rw-r--r--drivers/usb/phy/phy-msm-usb.c57
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c2
-rw-r--r--drivers/usb/serial/Kconfig16
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/mxu11x0.c986
-rw-r--r--drivers/usb/serial/option.c32
-rw-r--r--drivers/usb/serial/qcserial.c7
-rw-r--r--drivers/usb/serial/visor.c11
-rw-r--r--drivers/vfio/pci/vfio_pci.c9
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c9
-rw-r--r--drivers/vfio/vfio.c24
-rw-r--r--drivers/vfio/vfio_iommu_type1.c6
-rw-r--r--drivers/vhost/vhost.c15
-rw-r--r--drivers/video/console/fbcon.c2
-rw-r--r--drivers/video/fbdev/da8xx-fb.c6
-rw-r--r--drivers/video/fbdev/exynos/s6e8ax0.c13
-rw-r--r--drivers/video/fbdev/imxfb.c15
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.c3
-rw-r--r--drivers/video/fbdev/ocfb.c4
-rw-r--r--drivers/virtio/virtio_pci_common.c2
-rw-r--r--drivers/virtio/virtio_pci_modern.c2
-rw-r--r--drivers/watchdog/Kconfig17
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/max63xx_wdt.c2
-rw-r--r--drivers/watchdog/pcwd_usb.c3
-rw-r--r--drivers/watchdog/sp805_wdt.c5
-rw-r--r--drivers/watchdog/sun4v_wdt.c191
-rw-r--r--drivers/xen/tmem.c2
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c9
-rw-r--r--drivers/xen/xen-scsiback.c80
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
772 files changed, 8691 insertions, 58563 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index c570b1d9f094..0872d5fecb82 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -880,7 +880,7 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb,
880 break; 880 break;
881 case BUS_NOTIFY_DRIVER_NOT_BOUND: 881 case BUS_NOTIFY_DRIVER_NOT_BOUND:
882 case BUS_NOTIFY_UNBOUND_DRIVER: 882 case BUS_NOTIFY_UNBOUND_DRIVER:
883 pdev->dev.pm_domain = NULL; 883 dev_pm_domain_set(&pdev->dev, NULL);
884 break; 884 break;
885 case BUS_NOTIFY_ADD_DEVICE: 885 case BUS_NOTIFY_ADD_DEVICE:
886 dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain); 886 dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 305218539df2..d48cbed342c1 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -269,8 +269,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
269 */ 269 */
270 if (ACPI_SUCCESS(status) && 270 if (ACPI_SUCCESS(status) &&
271 possible_method_call && (node->type == ACPI_TYPE_METHOD)) { 271 possible_method_call && (node->type == ACPI_TYPE_METHOD)) {
272 if (GET_CURRENT_ARG_TYPE(walk_state->arg_types) == 272 if (walk_state->opcode == AML_UNLOAD_OP) {
273 ARGP_SUPERNAME) {
274 /* 273 /*
275 * acpi_ps_get_next_namestring has increased the AML pointer, 274 * acpi_ps_get_next_namestring has increased the AML pointer,
276 * so we need to restore the saved AML pointer for method call. 275 * so we need to restore the saved AML pointer for method call.
@@ -697,7 +696,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
697 * 696 *
698 * PARAMETERS: walk_state - Current state 697 * PARAMETERS: walk_state - Current state
699 * parser_state - Current parser state object 698 * parser_state - Current parser state object
700 * arg_type - The parser argument type (ARGP_*) 699 * arg_type - The argument type (AML_*_ARG)
701 * return_arg - Where the next arg is returned 700 * return_arg - Where the next arg is returned
702 * 701 *
703 * RETURN: Status, and an op object containing the next argument. 702 * RETURN: Status, and an op object containing the next argument.
@@ -817,9 +816,9 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
817 return_ACPI_STATUS(AE_NO_MEMORY); 816 return_ACPI_STATUS(AE_NO_MEMORY);
818 } 817 }
819 818
820 /* super_name allows argument to be a method call */ 819 /* To support super_name arg of Unload */
821 820
822 if (arg_type == ARGP_SUPERNAME) { 821 if (walk_state->opcode == AML_UNLOAD_OP) {
823 status = 822 status =
824 acpi_ps_get_next_namepath(walk_state, 823 acpi_ps_get_next_namepath(walk_state,
825 parser_state, arg, 824 parser_state, arg,
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index ad6d8c6b777e..35947ac87644 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -469,37 +469,16 @@ static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
469 nfit_mem->bdw = NULL; 469 nfit_mem->bdw = NULL;
470} 470}
471 471
472static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc, 472static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
473 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) 473 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
474{ 474{
475 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 475 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
476 struct nfit_memdev *nfit_memdev; 476 struct nfit_memdev *nfit_memdev;
477 struct nfit_flush *nfit_flush; 477 struct nfit_flush *nfit_flush;
478 struct nfit_dcr *nfit_dcr;
479 struct nfit_bdw *nfit_bdw; 478 struct nfit_bdw *nfit_bdw;
480 struct nfit_idt *nfit_idt; 479 struct nfit_idt *nfit_idt;
481 u16 idt_idx, range_index; 480 u16 idt_idx, range_index;
482 481
483 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
484 if (nfit_dcr->dcr->region_index != dcr)
485 continue;
486 nfit_mem->dcr = nfit_dcr->dcr;
487 break;
488 }
489
490 if (!nfit_mem->dcr) {
491 dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
492 spa->range_index, __to_nfit_memdev(nfit_mem)
493 ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
494 return -ENODEV;
495 }
496
497 /*
498 * We've found enough to create an nvdimm, optionally
499 * find an associated BDW
500 */
501 list_add(&nfit_mem->list, &acpi_desc->dimms);
502
503 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { 482 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
504 if (nfit_bdw->bdw->region_index != dcr) 483 if (nfit_bdw->bdw->region_index != dcr)
505 continue; 484 continue;
@@ -508,12 +487,12 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
508 } 487 }
509 488
510 if (!nfit_mem->bdw) 489 if (!nfit_mem->bdw)
511 return 0; 490 return;
512 491
513 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); 492 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
514 493
515 if (!nfit_mem->spa_bdw) 494 if (!nfit_mem->spa_bdw)
516 return 0; 495 return;
517 496
518 range_index = nfit_mem->spa_bdw->range_index; 497 range_index = nfit_mem->spa_bdw->range_index;
519 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 498 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
@@ -538,8 +517,6 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
538 } 517 }
539 break; 518 break;
540 } 519 }
541
542 return 0;
543} 520}
544 521
545static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, 522static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
@@ -548,7 +525,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
548 struct nfit_mem *nfit_mem, *found; 525 struct nfit_mem *nfit_mem, *found;
549 struct nfit_memdev *nfit_memdev; 526 struct nfit_memdev *nfit_memdev;
550 int type = nfit_spa_type(spa); 527 int type = nfit_spa_type(spa);
551 u16 dcr;
552 528
553 switch (type) { 529 switch (type) {
554 case NFIT_SPA_DCR: 530 case NFIT_SPA_DCR:
@@ -559,14 +535,18 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
559 } 535 }
560 536
561 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 537 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
562 int rc; 538 struct nfit_dcr *nfit_dcr;
539 u32 device_handle;
540 u16 dcr;
563 541
564 if (nfit_memdev->memdev->range_index != spa->range_index) 542 if (nfit_memdev->memdev->range_index != spa->range_index)
565 continue; 543 continue;
566 found = NULL; 544 found = NULL;
567 dcr = nfit_memdev->memdev->region_index; 545 dcr = nfit_memdev->memdev->region_index;
546 device_handle = nfit_memdev->memdev->device_handle;
568 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 547 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
569 if (__to_nfit_memdev(nfit_mem)->region_index == dcr) { 548 if (__to_nfit_memdev(nfit_mem)->device_handle
549 == device_handle) {
570 found = nfit_mem; 550 found = nfit_mem;
571 break; 551 break;
572 } 552 }
@@ -579,6 +559,31 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
579 if (!nfit_mem) 559 if (!nfit_mem)
580 return -ENOMEM; 560 return -ENOMEM;
581 INIT_LIST_HEAD(&nfit_mem->list); 561 INIT_LIST_HEAD(&nfit_mem->list);
562 list_add(&nfit_mem->list, &acpi_desc->dimms);
563 }
564
565 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
566 if (nfit_dcr->dcr->region_index != dcr)
567 continue;
568 /*
569 * Record the control region for the dimm. For
570 * the ACPI 6.1 case, where there are separate
571 * control regions for the pmem vs blk
572 * interfaces, be sure to record the extended
573 * blk details.
574 */
575 if (!nfit_mem->dcr)
576 nfit_mem->dcr = nfit_dcr->dcr;
577 else if (nfit_mem->dcr->windows == 0
578 && nfit_dcr->dcr->windows)
579 nfit_mem->dcr = nfit_dcr->dcr;
580 break;
581 }
582
583 if (dcr && !nfit_mem->dcr) {
584 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
585 spa->range_index, dcr);
586 return -ENODEV;
582 } 587 }
583 588
584 if (type == NFIT_SPA_DCR) { 589 if (type == NFIT_SPA_DCR) {
@@ -595,6 +600,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
595 nfit_mem->idt_dcr = nfit_idt->idt; 600 nfit_mem->idt_dcr = nfit_idt->idt;
596 break; 601 break;
597 } 602 }
603 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
598 } else { 604 } else {
599 /* 605 /*
600 * A single dimm may belong to multiple SPA-PM 606 * A single dimm may belong to multiple SPA-PM
@@ -603,13 +609,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
603 */ 609 */
604 nfit_mem->memdev_pmem = nfit_memdev->memdev; 610 nfit_mem->memdev_pmem = nfit_memdev->memdev;
605 } 611 }
606
607 if (found)
608 continue;
609
610 rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
611 if (rc)
612 return rc;
613 } 612 }
614 613
615 return 0; 614 return 0;
@@ -1504,9 +1503,7 @@ static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc,
1504 case 1: 1503 case 1:
1505 /* ARS unsupported, but we should never get here */ 1504 /* ARS unsupported, but we should never get here */
1506 return 0; 1505 return 0;
1507 case 2: 1506 case 6:
1508 return -EINVAL;
1509 case 3:
1510 /* ARS is in progress */ 1507 /* ARS is in progress */
1511 msleep(1000); 1508 msleep(1000);
1512 break; 1509 break;
@@ -1517,13 +1514,13 @@ static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc,
1517} 1514}
1518 1515
1519static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc, 1516static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc,
1520 struct nd_cmd_ars_status *cmd) 1517 struct nd_cmd_ars_status *cmd, u32 size)
1521{ 1518{
1522 int rc; 1519 int rc;
1523 1520
1524 while (1) { 1521 while (1) {
1525 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, cmd, 1522 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, cmd,
1526 sizeof(*cmd)); 1523 size);
1527 if (rc || cmd->status & 0xffff) 1524 if (rc || cmd->status & 0xffff)
1528 return -ENXIO; 1525 return -ENXIO;
1529 1526
@@ -1538,6 +1535,8 @@ static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc,
1538 case 2: 1535 case 2:
1539 /* No ARS performed for the current boot */ 1536 /* No ARS performed for the current boot */
1540 return 0; 1537 return 0;
1538 case 3:
1539 /* TODO: error list overflow support */
1541 default: 1540 default:
1542 return -ENXIO; 1541 return -ENXIO;
1543 } 1542 }
@@ -1581,6 +1580,7 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
1581 struct nd_cmd_ars_start *ars_start = NULL; 1580 struct nd_cmd_ars_start *ars_start = NULL;
1582 struct nd_cmd_ars_cap *ars_cap = NULL; 1581 struct nd_cmd_ars_cap *ars_cap = NULL;
1583 u64 start, len, cur, remaining; 1582 u64 start, len, cur, remaining;
1583 u32 ars_status_size;
1584 int rc; 1584 int rc;
1585 1585
1586 ars_cap = kzalloc(sizeof(*ars_cap), GFP_KERNEL); 1586 ars_cap = kzalloc(sizeof(*ars_cap), GFP_KERNEL);
@@ -1590,14 +1590,21 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
1590 start = ndr_desc->res->start; 1590 start = ndr_desc->res->start;
1591 len = ndr_desc->res->end - ndr_desc->res->start + 1; 1591 len = ndr_desc->res->end - ndr_desc->res->start + 1;
1592 1592
1593 /*
1594 * If ARS is unimplemented, unsupported, or if the 'Persistent Memory
1595 * Scrub' flag in extended status is not set, skip this but continue
1596 * initialization
1597 */
1593 rc = ars_get_cap(nd_desc, ars_cap, start, len); 1598 rc = ars_get_cap(nd_desc, ars_cap, start, len);
1599 if (rc == -ENOTTY) {
1600 dev_dbg(acpi_desc->dev,
1601 "Address Range Scrub is not implemented, won't create an error list\n");
1602 rc = 0;
1603 goto out;
1604 }
1594 if (rc) 1605 if (rc)
1595 goto out; 1606 goto out;
1596 1607
1597 /*
1598 * If ARS is unsupported, or if the 'Persistent Memory Scrub' flag in
1599 * extended status is not set, skip this but continue initialization
1600 */
1601 if ((ars_cap->status & 0xffff) || 1608 if ((ars_cap->status & 0xffff) ||
1602 !(ars_cap->status >> 16 & ND_ARS_PERSISTENT)) { 1609 !(ars_cap->status >> 16 & ND_ARS_PERSISTENT)) {
1603 dev_warn(acpi_desc->dev, 1610 dev_warn(acpi_desc->dev,
@@ -1610,14 +1617,14 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
1610 * Check if a full-range ARS has been run. If so, use those results 1617 * Check if a full-range ARS has been run. If so, use those results
1611 * without having to start a new ARS. 1618 * without having to start a new ARS.
1612 */ 1619 */
1613 ars_status = kzalloc(ars_cap->max_ars_out + sizeof(*ars_status), 1620 ars_status_size = ars_cap->max_ars_out;
1614 GFP_KERNEL); 1621 ars_status = kzalloc(ars_status_size, GFP_KERNEL);
1615 if (!ars_status) { 1622 if (!ars_status) {
1616 rc = -ENOMEM; 1623 rc = -ENOMEM;
1617 goto out; 1624 goto out;
1618 } 1625 }
1619 1626
1620 rc = ars_get_status(nd_desc, ars_status); 1627 rc = ars_get_status(nd_desc, ars_status, ars_status_size);
1621 if (rc) 1628 if (rc)
1622 goto out; 1629 goto out;
1623 1630
@@ -1647,7 +1654,7 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
1647 if (rc) 1654 if (rc)
1648 goto out; 1655 goto out;
1649 1656
1650 rc = ars_get_status(nd_desc, ars_status); 1657 rc = ars_get_status(nd_desc, ars_status, ars_status_size);
1651 if (rc) 1658 if (rc)
1652 goto out; 1659 goto out;
1653 1660
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index d30184c7f3bc..c8e169e46673 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -406,7 +406,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
406 return 0; 406 return 0;
407 } 407 }
408 408
409 if (pci_has_managed_irq(dev)) 409 if (dev->irq_managed && dev->irq > 0)
410 return 0; 410 return 0;
411 411
412 entry = acpi_pci_irq_lookup(dev, pin); 412 entry = acpi_pci_irq_lookup(dev, pin);
@@ -451,7 +451,8 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
451 kfree(entry); 451 kfree(entry);
452 return rc; 452 return rc;
453 } 453 }
454 pci_set_managed_irq(dev, rc); 454 dev->irq = rc;
455 dev->irq_managed = 1;
455 456
456 if (link) 457 if (link)
457 snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link); 458 snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link);
@@ -474,9 +475,17 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
474 u8 pin; 475 u8 pin;
475 476
476 pin = dev->pin; 477 pin = dev->pin;
477 if (!pin || !pci_has_managed_irq(dev)) 478 if (!pin || !dev->irq_managed || dev->irq <= 0)
478 return; 479 return;
479 480
481 /* Keep IOAPIC pin configuration when suspending */
482 if (dev->dev.power.is_prepared)
483 return;
484#ifdef CONFIG_PM
485 if (dev->dev.power.runtime_status == RPM_SUSPENDING)
486 return;
487#endif
488
480 entry = acpi_pci_irq_lookup(dev, pin); 489 entry = acpi_pci_irq_lookup(dev, pin);
481 if (!entry) 490 if (!entry)
482 return; 491 return;
@@ -496,6 +505,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
496 dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin)); 505 dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
497 if (gsi >= 0) { 506 if (gsi >= 0) {
498 acpi_unregister_gsi(gsi); 507 acpi_unregister_gsi(gsi);
499 pci_reset_managed_irq(dev); 508 dev->irq_managed = 0;
500 } 509 }
501} 510}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index fa2863567eed..ededa909df2f 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -4,7 +4,6 @@
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 Dominik Brodowski <devel@brodo.de> 6 * Copyright (C) 2002 Dominik Brodowski <devel@brodo.de>
7 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
8 * 7 *
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 * 9 *
@@ -438,6 +437,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
438 * enabled system. 437 * enabled system.
439 */ 438 */
440 439
440#define ACPI_MAX_IRQS 256
441#define ACPI_MAX_ISA_IRQ 16 441#define ACPI_MAX_ISA_IRQ 16
442 442
443#define PIRQ_PENALTY_PCI_AVAILABLE (0) 443#define PIRQ_PENALTY_PCI_AVAILABLE (0)
@@ -447,7 +447,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
447#define PIRQ_PENALTY_ISA_USED (16*16*16*16*16) 447#define PIRQ_PENALTY_ISA_USED (16*16*16*16*16)
448#define PIRQ_PENALTY_ISA_ALWAYS (16*16*16*16*16*16) 448#define PIRQ_PENALTY_ISA_ALWAYS (16*16*16*16*16*16)
449 449
450static int acpi_irq_isa_penalty[ACPI_MAX_ISA_IRQ] = { 450static int acpi_irq_penalty[ACPI_MAX_IRQS] = {
451 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ0 timer */ 451 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ0 timer */
452 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ1 keyboard */ 452 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ1 keyboard */
453 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ2 cascade */ 453 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ2 cascade */
@@ -464,68 +464,9 @@ static int acpi_irq_isa_penalty[ACPI_MAX_ISA_IRQ] = {
464 PIRQ_PENALTY_ISA_USED, /* IRQ13 fpe, sometimes */ 464 PIRQ_PENALTY_ISA_USED, /* IRQ13 fpe, sometimes */
465 PIRQ_PENALTY_ISA_USED, /* IRQ14 ide0 */ 465 PIRQ_PENALTY_ISA_USED, /* IRQ14 ide0 */
466 PIRQ_PENALTY_ISA_USED, /* IRQ15 ide1 */ 466 PIRQ_PENALTY_ISA_USED, /* IRQ15 ide1 */
467 /* >IRQ15 */
467}; 468};
468 469
469struct irq_penalty_info {
470 int irq;
471 int penalty;
472 struct list_head node;
473};
474
475static LIST_HEAD(acpi_irq_penalty_list);
476
477static int acpi_irq_get_penalty(int irq)
478{
479 struct irq_penalty_info *irq_info;
480
481 if (irq < ACPI_MAX_ISA_IRQ)
482 return acpi_irq_isa_penalty[irq];
483
484 list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) {
485 if (irq_info->irq == irq)
486 return irq_info->penalty;
487 }
488
489 return 0;
490}
491
492static int acpi_irq_set_penalty(int irq, int new_penalty)
493{
494 struct irq_penalty_info *irq_info;
495
496 /* see if this is a ISA IRQ */
497 if (irq < ACPI_MAX_ISA_IRQ) {
498 acpi_irq_isa_penalty[irq] = new_penalty;
499 return 0;
500 }
501
502 /* next, try to locate from the dynamic list */
503 list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) {
504 if (irq_info->irq == irq) {
505 irq_info->penalty = new_penalty;
506 return 0;
507 }
508 }
509
510 /* nope, let's allocate a slot for this IRQ */
511 irq_info = kzalloc(sizeof(*irq_info), GFP_KERNEL);
512 if (!irq_info)
513 return -ENOMEM;
514
515 irq_info->irq = irq;
516 irq_info->penalty = new_penalty;
517 list_add_tail(&irq_info->node, &acpi_irq_penalty_list);
518
519 return 0;
520}
521
522static void acpi_irq_add_penalty(int irq, int penalty)
523{
524 int curpen = acpi_irq_get_penalty(irq);
525
526 acpi_irq_set_penalty(irq, curpen + penalty);
527}
528
529int __init acpi_irq_penalty_init(void) 470int __init acpi_irq_penalty_init(void)
530{ 471{
531 struct acpi_pci_link *link; 472 struct acpi_pci_link *link;
@@ -546,16 +487,15 @@ int __init acpi_irq_penalty_init(void)
546 link->irq.possible_count; 487 link->irq.possible_count;
547 488
548 for (i = 0; i < link->irq.possible_count; i++) { 489 for (i = 0; i < link->irq.possible_count; i++) {
549 if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ) { 490 if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ)
550 int irqpos = link->irq.possible[i]; 491 acpi_irq_penalty[link->irq.
551 492 possible[i]] +=
552 acpi_irq_add_penalty(irqpos, penalty); 493 penalty;
553 }
554 } 494 }
555 495
556 } else if (link->irq.active) { 496 } else if (link->irq.active) {
557 acpi_irq_add_penalty(link->irq.active, 497 acpi_irq_penalty[link->irq.active] +=
558 PIRQ_PENALTY_PCI_POSSIBLE); 498 PIRQ_PENALTY_PCI_POSSIBLE;
559 } 499 }
560 } 500 }
561 501
@@ -607,12 +547,12 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
607 * the use of IRQs 9, 10, 11, and >15. 547 * the use of IRQs 9, 10, 11, and >15.
608 */ 548 */
609 for (i = (link->irq.possible_count - 1); i >= 0; i--) { 549 for (i = (link->irq.possible_count - 1); i >= 0; i--) {
610 if (acpi_irq_get_penalty(irq) > 550 if (acpi_irq_penalty[irq] >
611 acpi_irq_get_penalty(link->irq.possible[i])) 551 acpi_irq_penalty[link->irq.possible[i]])
612 irq = link->irq.possible[i]; 552 irq = link->irq.possible[i];
613 } 553 }
614 } 554 }
615 if (acpi_irq_get_penalty(irq) >= PIRQ_PENALTY_ISA_ALWAYS) { 555 if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
616 printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. " 556 printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
617 "Try pci=noacpi or acpi=off\n", 557 "Try pci=noacpi or acpi=off\n",
618 acpi_device_name(link->device), 558 acpi_device_name(link->device),
@@ -628,8 +568,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
628 acpi_device_bid(link->device)); 568 acpi_device_bid(link->device));
629 return -ENODEV; 569 return -ENODEV;
630 } else { 570 } else {
631 acpi_irq_add_penalty(link->irq.active, PIRQ_PENALTY_PCI_USING); 571 acpi_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_USING;
632
633 printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n", 572 printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
634 acpi_device_name(link->device), 573 acpi_device_name(link->device),
635 acpi_device_bid(link->device), link->irq.active); 574 acpi_device_bid(link->device), link->irq.active);
@@ -839,7 +778,7 @@ static void acpi_pci_link_remove(struct acpi_device *device)
839} 778}
840 779
841/* 780/*
842 * modify penalty from cmdline 781 * modify acpi_irq_penalty[] from cmdline
843 */ 782 */
844static int __init acpi_irq_penalty_update(char *str, int used) 783static int __init acpi_irq_penalty_update(char *str, int used)
845{ 784{
@@ -857,10 +796,13 @@ static int __init acpi_irq_penalty_update(char *str, int used)
857 if (irq < 0) 796 if (irq < 0)
858 continue; 797 continue;
859 798
799 if (irq >= ARRAY_SIZE(acpi_irq_penalty))
800 continue;
801
860 if (used) 802 if (used)
861 acpi_irq_add_penalty(irq, PIRQ_PENALTY_ISA_USED); 803 acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
862 else 804 else
863 acpi_irq_set_penalty(irq, PIRQ_PENALTY_PCI_AVAILABLE); 805 acpi_irq_penalty[irq] = PIRQ_PENALTY_PCI_AVAILABLE;
864 806
865 if (retval != 2) /* no next number */ 807 if (retval != 2) /* no next number */
866 break; 808 break;
@@ -877,15 +819,18 @@ static int __init acpi_irq_penalty_update(char *str, int used)
877 */ 819 */
878void acpi_penalize_isa_irq(int irq, int active) 820void acpi_penalize_isa_irq(int irq, int active)
879{ 821{
880 if (irq >= 0) 822 if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
881 acpi_irq_add_penalty(irq, active ? 823 if (active)
882 PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING); 824 acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
825 else
826 acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
827 }
883} 828}
884 829
885bool acpi_isa_irq_available(int irq) 830bool acpi_isa_irq_available(int irq)
886{ 831{
887 return irq >= 0 && 832 return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
888 (acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS); 833 acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
889} 834}
890 835
891/* 836/*
@@ -895,18 +840,13 @@ bool acpi_isa_irq_available(int irq)
895 */ 840 */
896void acpi_penalize_sci_irq(int irq, int trigger, int polarity) 841void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
897{ 842{
898 int penalty; 843 if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
899 844 if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
900 if (irq < 0) 845 polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
901 return; 846 acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS;
902 847 else
903 if (trigger != ACPI_MADT_TRIGGER_LEVEL || 848 acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
904 polarity != ACPI_MADT_POLARITY_ACTIVE_LOW) 849 }
905 penalty = PIRQ_PENALTY_ISA_ALWAYS;
906 else
907 penalty = PIRQ_PENALTY_PCI_USING;
908
909 acpi_irq_add_penalty(irq, penalty);
910} 850}
911 851
912/* 852/*
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 90e2d54be526..1316ddd92fac 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -135,14 +135,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
135 DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), 135 DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
136 }, 136 },
137 }, 137 },
138 {
139 .callback = video_detect_force_vendor,
140 .ident = "Dell Inspiron 5737",
141 .matches = {
142 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
143 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
144 },
145 },
146 138
147 /* 139 /*
148 * These models have a working acpi_video backlight control, and using 140 * These models have a working acpi_video backlight control, and using
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index a39e85f9efa9..7d00b7a015ea 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2074,7 +2074,7 @@ static int binder_thread_write(struct binder_proc *proc,
2074 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 2074 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2075 return -EFAULT; 2075 return -EFAULT;
2076 2076
2077 ptr += sizeof(void *); 2077 ptr += sizeof(cookie);
2078 list_for_each_entry(w, &proc->delivered_death, entry) { 2078 list_for_each_entry(w, &proc->delivered_death, entry) {
2079 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); 2079 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2080 2080
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 594fcabd22cd..146dc0b8ec61 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -264,6 +264,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
264 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ 264 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
265 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ 265 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
266 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ 266 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
267 { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
268 { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
269 { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
270 { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
271 { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
272 { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
273 { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
274 { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
275 { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
276 { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
277 { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
278 { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
279 { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
280 { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
281 { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
282 { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
283 { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
284 { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
285 { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
286 { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
267 { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ 287 { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
268 { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */ 288 { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
269 { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ 289 { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
@@ -347,15 +367,21 @@ static const struct pci_device_id ahci_pci_tbl[] = {
347 { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ 367 { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
348 { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ 368 { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
349 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/ 369 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
370 { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
350 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/ 371 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
372 { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
351 { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/ 373 { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
352 { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/ 374 { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
353 { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/ 375 { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
354 { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/ 376 { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
377 { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
378 { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
355 { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/ 379 { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
356 { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/ 380 { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
357 { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/ 381 { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
358 { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/ 382 { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
383 { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
384 { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
359 385
360 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 386 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
361 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 387 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -1305,6 +1331,44 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
1305{} 1331{}
1306#endif 1332#endif
1307 1333
1334#ifdef CONFIG_ARM64
1335/*
1336 * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
1337 * Workaround is to make sure all pending IRQs are served before leaving
1338 * handler.
1339 */
1340static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
1341{
1342 struct ata_host *host = dev_instance;
1343 struct ahci_host_priv *hpriv;
1344 unsigned int rc = 0;
1345 void __iomem *mmio;
1346 u32 irq_stat, irq_masked;
1347 unsigned int handled = 1;
1348
1349 VPRINTK("ENTER\n");
1350 hpriv = host->private_data;
1351 mmio = hpriv->mmio;
1352 irq_stat = readl(mmio + HOST_IRQ_STAT);
1353 if (!irq_stat)
1354 return IRQ_NONE;
1355
1356 do {
1357 irq_masked = irq_stat & hpriv->port_map;
1358 spin_lock(&host->lock);
1359 rc = ahci_handle_port_intr(host, irq_masked);
1360 if (!rc)
1361 handled = 0;
1362 writel(irq_stat, mmio + HOST_IRQ_STAT);
1363 irq_stat = readl(mmio + HOST_IRQ_STAT);
1364 spin_unlock(&host->lock);
1365 } while (irq_stat);
1366 VPRINTK("EXIT\n");
1367
1368 return IRQ_RETVAL(handled);
1369}
1370#endif
1371
1308/* 1372/*
1309 * ahci_init_msix() - optionally enable per-port MSI-X otherwise defer 1373 * ahci_init_msix() - optionally enable per-port MSI-X otherwise defer
1310 * to single msi. 1374 * to single msi.
@@ -1540,6 +1604,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1540 if (ahci_broken_devslp(pdev)) 1604 if (ahci_broken_devslp(pdev))
1541 hpriv->flags |= AHCI_HFLAG_NO_DEVSLP; 1605 hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
1542 1606
1607#ifdef CONFIG_ARM64
1608 if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
1609 hpriv->irq_handler = ahci_thunderx_irq_handler;
1610#endif
1611
1543 /* save initial config */ 1612 /* save initial config */
1544 ahci_pci_save_initial_config(pdev, hpriv); 1613 ahci_pci_save_initial_config(pdev, hpriv);
1545 1614
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index a4faa438889c..167ba7e3b92e 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -240,8 +240,7 @@ enum {
240 error-handling stage) */ 240 error-handling stage) */
241 AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */ 241 AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */
242 AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */ 242 AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */
243 AHCI_HFLAG_EDGE_IRQ = (1 << 19), /* HOST_IRQ_STAT behaves as 243
244 Edge Triggered */
245#ifdef CONFIG_PCI_MSI 244#ifdef CONFIG_PCI_MSI
246 AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */ 245 AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */
247 AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */ 246 AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */
@@ -250,6 +249,7 @@ enum {
250 AHCI_HFLAG_MULTI_MSI = 0, 249 AHCI_HFLAG_MULTI_MSI = 0,
251 AHCI_HFLAG_MULTI_MSIX = 0, 250 AHCI_HFLAG_MULTI_MSIX = 0,
252#endif 251#endif
252 AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */
253 253
254 /* ap->flags bits */ 254 /* ap->flags bits */
255 255
@@ -360,6 +360,7 @@ struct ahci_host_priv {
360 * be overridden anytime before the host is activated. 360 * be overridden anytime before the host is activated.
361 */ 361 */
362 void (*start_engine)(struct ata_port *ap); 362 void (*start_engine)(struct ata_port *ap);
363 irqreturn_t (*irq_handler)(int irq, void *dev_instance);
363}; 364};
364 365
365#ifdef CONFIG_PCI_MSI 366#ifdef CONFIG_PCI_MSI
@@ -423,6 +424,7 @@ int ahci_reset_em(struct ata_host *host);
423void ahci_print_info(struct ata_host *host, const char *scc_s); 424void ahci_print_info(struct ata_host *host, const char *scc_s);
424int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht); 425int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht);
425void ahci_error_handler(struct ata_port *ap); 426void ahci_error_handler(struct ata_port *ap);
427u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked);
426 428
427static inline void __iomem *__ahci_port_base(struct ata_host *host, 429static inline void __iomem *__ahci_port_base(struct ata_host *host,
428 unsigned int port_no) 430 unsigned int port_no)
diff --git a/drivers/ata/ahci_brcmstb.c b/drivers/ata/ahci_brcmstb.c
index b36cae2fd04b..e87bcec0fd7c 100644
--- a/drivers/ata/ahci_brcmstb.c
+++ b/drivers/ata/ahci_brcmstb.c
@@ -317,6 +317,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
317 if (IS_ERR(hpriv)) 317 if (IS_ERR(hpriv))
318 return PTR_ERR(hpriv); 318 return PTR_ERR(hpriv);
319 hpriv->plat_data = priv; 319 hpriv->plat_data = priv;
320 hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
320 321
321 brcm_sata_alpm_init(hpriv); 322 brcm_sata_alpm_init(hpriv);
322 323
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index e2c6d9e0c5ac..8e3f7faf00d3 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -548,6 +548,88 @@ softreset_retry:
548 return rc; 548 return rc;
549} 549}
550 550
551/**
552 * xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
553 * @ata_host: Host that recieved the irq
554 * @irq_masked: HOST_IRQ_STAT value
555 *
556 * For hardware with broken edge trigger latch
557 * the HOST_IRQ_STAT register misses the edge interrupt
558 * when clearing of HOST_IRQ_STAT register and hardware
559 * reporting the PORT_IRQ_STAT register at the
560 * same clock cycle.
561 * As such, the algorithm below outlines the workaround.
562 *
563 * 1. Read HOST_IRQ_STAT register and save the state.
564 * 2. Clear the HOST_IRQ_STAT register.
565 * 3. Read back the HOST_IRQ_STAT register.
566 * 4. If HOST_IRQ_STAT register equals to zero, then
567 * traverse the rest of port's PORT_IRQ_STAT register
568 * to check if an interrupt is triggered at that point else
569 * go to step 6.
570 * 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
571 * then update the state of HOST_IRQ_STAT saved in step 1.
572 * 6. Handle port interrupts.
573 * 7. Exit
574 */
575static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
576 u32 irq_masked)
577{
578 struct ahci_host_priv *hpriv = host->private_data;
579 void __iomem *port_mmio;
580 int i;
581
582 if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
583 for (i = 0; i < host->n_ports; i++) {
584 if (irq_masked & (1 << i))
585 continue;
586
587 port_mmio = ahci_port_base(host->ports[i]);
588 if (readl(port_mmio + PORT_IRQ_STAT))
589 irq_masked |= (1 << i);
590 }
591 }
592
593 return ahci_handle_port_intr(host, irq_masked);
594}
595
596static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
597{
598 struct ata_host *host = dev_instance;
599 struct ahci_host_priv *hpriv;
600 unsigned int rc = 0;
601 void __iomem *mmio;
602 u32 irq_stat, irq_masked;
603
604 VPRINTK("ENTER\n");
605
606 hpriv = host->private_data;
607 mmio = hpriv->mmio;
608
609 /* sigh. 0xffffffff is a valid return from h/w */
610 irq_stat = readl(mmio + HOST_IRQ_STAT);
611 if (!irq_stat)
612 return IRQ_NONE;
613
614 irq_masked = irq_stat & hpriv->port_map;
615
616 spin_lock(&host->lock);
617
618 /*
619 * HOST_IRQ_STAT behaves as edge triggered latch meaning that
620 * it should be cleared before all the port events are cleared.
621 */
622 writel(irq_stat, mmio + HOST_IRQ_STAT);
623
624 rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
625
626 spin_unlock(&host->lock);
627
628 VPRINTK("EXIT\n");
629
630 return IRQ_RETVAL(rc);
631}
632
551static struct ata_port_operations xgene_ahci_v1_ops = { 633static struct ata_port_operations xgene_ahci_v1_ops = {
552 .inherits = &ahci_ops, 634 .inherits = &ahci_ops,
553 .host_stop = xgene_ahci_host_stop, 635 .host_stop = xgene_ahci_host_stop,
@@ -779,7 +861,8 @@ skip_clk_phy:
779 hpriv->flags = AHCI_HFLAG_NO_NCQ; 861 hpriv->flags = AHCI_HFLAG_NO_NCQ;
780 break; 862 break;
781 case XGENE_AHCI_V2: 863 case XGENE_AHCI_V2:
782 hpriv->flags |= AHCI_HFLAG_YES_FBS | AHCI_HFLAG_EDGE_IRQ; 864 hpriv->flags |= AHCI_HFLAG_YES_FBS;
865 hpriv->irq_handler = xgene_ahci_irq_intr;
783 break; 866 break;
784 default: 867 default:
785 break; 868 break;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index d61740e78d6d..85ea5142a095 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -113,6 +113,7 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
113 const char *buf, size_t size); 113 const char *buf, size_t size);
114static ssize_t ahci_show_em_supported(struct device *dev, 114static ssize_t ahci_show_em_supported(struct device *dev,
115 struct device_attribute *attr, char *buf); 115 struct device_attribute *attr, char *buf);
116static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance);
116 117
117static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL); 118static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
118static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL); 119static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
@@ -496,8 +497,8 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
496 } 497 }
497 } 498 }
498 499
499 /* fabricate port_map from cap.nr_ports */ 500 /* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
500 if (!port_map) { 501 if (!port_map && vers < 0x10300) {
501 port_map = (1 << ahci_nr_ports(cap)) - 1; 502 port_map = (1 << ahci_nr_ports(cap)) - 1;
502 dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map); 503 dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
503 504
@@ -512,6 +513,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
512 513
513 if (!hpriv->start_engine) 514 if (!hpriv->start_engine)
514 hpriv->start_engine = ahci_start_engine; 515 hpriv->start_engine = ahci_start_engine;
516
517 if (!hpriv->irq_handler)
518 hpriv->irq_handler = ahci_single_level_irq_intr;
515} 519}
516EXPORT_SYMBOL_GPL(ahci_save_initial_config); 520EXPORT_SYMBOL_GPL(ahci_save_initial_config);
517 521
@@ -593,8 +597,22 @@ EXPORT_SYMBOL_GPL(ahci_start_engine);
593int ahci_stop_engine(struct ata_port *ap) 597int ahci_stop_engine(struct ata_port *ap)
594{ 598{
595 void __iomem *port_mmio = ahci_port_base(ap); 599 void __iomem *port_mmio = ahci_port_base(ap);
600 struct ahci_host_priv *hpriv = ap->host->private_data;
596 u32 tmp; 601 u32 tmp;
597 602
603 /*
604 * On some controllers, stopping a port's DMA engine while the port
605 * is in ALPM state (partial or slumber) results in failures on
606 * subsequent DMA engine starts. For those controllers, put the
607 * port back in active state before stopping its DMA engine.
608 */
609 if ((hpriv->flags & AHCI_HFLAG_WAKE_BEFORE_STOP) &&
610 (ap->link.lpm_policy > ATA_LPM_MAX_POWER) &&
611 ahci_set_lpm(&ap->link, ATA_LPM_MAX_POWER, ATA_LPM_WAKE_ONLY)) {
612 dev_err(ap->host->dev, "Failed to wake up port before engine stop\n");
613 return -EIO;
614 }
615
598 tmp = readl(port_mmio + PORT_CMD); 616 tmp = readl(port_mmio + PORT_CMD);
599 617
600 /* check if the HBA is idle */ 618 /* check if the HBA is idle */
@@ -689,6 +707,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
689 void __iomem *port_mmio = ahci_port_base(ap); 707 void __iomem *port_mmio = ahci_port_base(ap);
690 708
691 if (policy != ATA_LPM_MAX_POWER) { 709 if (policy != ATA_LPM_MAX_POWER) {
710 /* wakeup flag only applies to the max power policy */
711 hints &= ~ATA_LPM_WAKE_ONLY;
712
692 /* 713 /*
693 * Disable interrupts on Phy Ready. This keeps us from 714 * Disable interrupts on Phy Ready. This keeps us from
694 * getting woken up due to spurious phy ready 715 * getting woken up due to spurious phy ready
@@ -704,7 +725,8 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
704 u32 cmd = readl(port_mmio + PORT_CMD); 725 u32 cmd = readl(port_mmio + PORT_CMD);
705 726
706 if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) { 727 if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) {
707 cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE); 728 if (!(hints & ATA_LPM_WAKE_ONLY))
729 cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
708 cmd |= PORT_CMD_ICC_ACTIVE; 730 cmd |= PORT_CMD_ICC_ACTIVE;
709 731
710 writel(cmd, port_mmio + PORT_CMD); 732 writel(cmd, port_mmio + PORT_CMD);
@@ -712,6 +734,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
712 734
713 /* wait 10ms to be sure we've come out of LPM state */ 735 /* wait 10ms to be sure we've come out of LPM state */
714 ata_msleep(ap, 10); 736 ata_msleep(ap, 10);
737
738 if (hints & ATA_LPM_WAKE_ONLY)
739 return 0;
715 } else { 740 } else {
716 cmd |= PORT_CMD_ALPE; 741 cmd |= PORT_CMD_ALPE;
717 if (policy == ATA_LPM_MIN_POWER) 742 if (policy == ATA_LPM_MIN_POWER)
@@ -1143,8 +1168,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
1143 1168
1144 /* mark esata ports */ 1169 /* mark esata ports */
1145 tmp = readl(port_mmio + PORT_CMD); 1170 tmp = readl(port_mmio + PORT_CMD);
1146 if ((tmp & PORT_CMD_HPCP) || 1171 if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))
1147 ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)))
1148 ap->pflags |= ATA_PFLAG_EXTERNAL; 1172 ap->pflags |= ATA_PFLAG_EXTERNAL;
1149} 1173}
1150 1174
@@ -1825,7 +1849,7 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
1825 return IRQ_HANDLED; 1849 return IRQ_HANDLED;
1826} 1850}
1827 1851
1828static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked) 1852u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
1829{ 1853{
1830 unsigned int i, handled = 0; 1854 unsigned int i, handled = 0;
1831 1855
@@ -1851,43 +1875,7 @@ static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
1851 1875
1852 return handled; 1876 return handled;
1853} 1877}
1854 1878EXPORT_SYMBOL_GPL(ahci_handle_port_intr);
1855static irqreturn_t ahci_single_edge_irq_intr(int irq, void *dev_instance)
1856{
1857 struct ata_host *host = dev_instance;
1858 struct ahci_host_priv *hpriv;
1859 unsigned int rc = 0;
1860 void __iomem *mmio;
1861 u32 irq_stat, irq_masked;
1862
1863 VPRINTK("ENTER\n");
1864
1865 hpriv = host->private_data;
1866 mmio = hpriv->mmio;
1867
1868 /* sigh. 0xffffffff is a valid return from h/w */
1869 irq_stat = readl(mmio + HOST_IRQ_STAT);
1870 if (!irq_stat)
1871 return IRQ_NONE;
1872
1873 irq_masked = irq_stat & hpriv->port_map;
1874
1875 spin_lock(&host->lock);
1876
1877 /*
1878 * HOST_IRQ_STAT behaves as edge triggered latch meaning that
1879 * it should be cleared before all the port events are cleared.
1880 */
1881 writel(irq_stat, mmio + HOST_IRQ_STAT);
1882
1883 rc = ahci_handle_port_intr(host, irq_masked);
1884
1885 spin_unlock(&host->lock);
1886
1887 VPRINTK("EXIT\n");
1888
1889 return IRQ_RETVAL(rc);
1890}
1891 1879
1892static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance) 1880static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
1893{ 1881{
@@ -2514,14 +2502,18 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
2514 int irq = hpriv->irq; 2502 int irq = hpriv->irq;
2515 int rc; 2503 int rc;
2516 2504
2517 if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) 2505 if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) {
2506 if (hpriv->irq_handler)
2507 dev_warn(host->dev, "both AHCI_HFLAG_MULTI_MSI flag set \
2508 and custom irq handler implemented\n");
2509
2518 rc = ahci_host_activate_multi_irqs(host, sht); 2510 rc = ahci_host_activate_multi_irqs(host, sht);
2519 else if (hpriv->flags & AHCI_HFLAG_EDGE_IRQ) 2511 } else {
2520 rc = ata_host_activate(host, irq, ahci_single_edge_irq_intr, 2512 rc = ata_host_activate(host, irq, hpriv->irq_handler,
2521 IRQF_SHARED, sht);
2522 else
2523 rc = ata_host_activate(host, irq, ahci_single_level_irq_intr,
2524 IRQF_SHARED, sht); 2513 IRQF_SHARED, sht);
2514 }
2515
2516
2525 return rc; 2517 return rc;
2526} 2518}
2527EXPORT_SYMBOL_GPL(ahci_host_activate); 2519EXPORT_SYMBOL_GPL(ahci_host_activate);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index cbb74719d2c1..55e257c268dd 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4125,6 +4125,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4125 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4125 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4126 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4126 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4127 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, 4127 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4128 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
4128 /* Odd clown on sil3726/4726 PMPs */ 4129 /* Odd clown on sil3726/4726 PMPs */
4129 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4130 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4130 4131
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 7e959f90c020..e417e1a1d02c 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -675,19 +675,18 @@ static int ata_ioc32(struct ata_port *ap)
675int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, 675int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
676 int cmd, void __user *arg) 676 int cmd, void __user *arg)
677{ 677{
678 int val = -EINVAL, rc = -EINVAL; 678 unsigned long val;
679 int rc = -EINVAL;
679 unsigned long flags; 680 unsigned long flags;
680 681
681 switch (cmd) { 682 switch (cmd) {
682 case ATA_IOC_GET_IO32: 683 case HDIO_GET_32BIT:
683 spin_lock_irqsave(ap->lock, flags); 684 spin_lock_irqsave(ap->lock, flags);
684 val = ata_ioc32(ap); 685 val = ata_ioc32(ap);
685 spin_unlock_irqrestore(ap->lock, flags); 686 spin_unlock_irqrestore(ap->lock, flags);
686 if (copy_to_user(arg, &val, 1)) 687 return put_user(val, (unsigned long __user *)arg);
687 return -EFAULT;
688 return 0;
689 688
690 case ATA_IOC_SET_IO32: 689 case HDIO_SET_32BIT:
691 val = (unsigned long) arg; 690 val = (unsigned long) arg;
692 rc = 0; 691 rc = 0;
693 spin_lock_irqsave(ap->lock, flags); 692 spin_lock_irqsave(ap->lock, flags);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index cdf6215a9a22..051b6158d1b7 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
997static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) 997static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
998{ 998{
999 struct ata_port *ap = qc->ap; 999 struct ata_port *ap = qc->ap;
1000 unsigned long flags;
1001 1000
1002 if (ap->ops->error_handler) { 1001 if (ap->ops->error_handler) {
1003 if (in_wq) { 1002 if (in_wq) {
1004 spin_lock_irqsave(ap->lock, flags);
1005
1006 /* EH might have kicked in while host lock is 1003 /* EH might have kicked in while host lock is
1007 * released. 1004 * released.
1008 */ 1005 */
@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1014 } else 1011 } else
1015 ata_port_freeze(ap); 1012 ata_port_freeze(ap);
1016 } 1013 }
1017
1018 spin_unlock_irqrestore(ap->lock, flags);
1019 } else { 1014 } else {
1020 if (likely(!(qc->err_mask & AC_ERR_HSM))) 1015 if (likely(!(qc->err_mask & AC_ERR_HSM)))
1021 ata_qc_complete(qc); 1016 ata_qc_complete(qc);
@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1024 } 1019 }
1025 } else { 1020 } else {
1026 if (in_wq) { 1021 if (in_wq) {
1027 spin_lock_irqsave(ap->lock, flags);
1028 ata_sff_irq_on(ap); 1022 ata_sff_irq_on(ap);
1029 ata_qc_complete(qc); 1023 ata_qc_complete(qc);
1030 spin_unlock_irqrestore(ap->lock, flags);
1031 } else 1024 } else
1032 ata_qc_complete(qc); 1025 ata_qc_complete(qc);
1033 } 1026 }
@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1048{ 1041{
1049 struct ata_link *link = qc->dev->link; 1042 struct ata_link *link = qc->dev->link;
1050 struct ata_eh_info *ehi = &link->eh_info; 1043 struct ata_eh_info *ehi = &link->eh_info;
1051 unsigned long flags = 0;
1052 int poll_next; 1044 int poll_next;
1053 1045
1046 lockdep_assert_held(ap->lock);
1047
1054 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); 1048 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
1055 1049
1056 /* Make sure ata_sff_qc_issue() does not throw things 1050 /* Make sure ata_sff_qc_issue() does not throw things
@@ -1112,14 +1106,6 @@ fsm_start:
1112 } 1106 }
1113 } 1107 }
1114 1108
1115 /* Send the CDB (atapi) or the first data block (ata pio out).
1116 * During the state transition, interrupt handler shouldn't
1117 * be invoked before the data transfer is complete and
1118 * hsm_task_state is changed. Hence, the following locking.
1119 */
1120 if (in_wq)
1121 spin_lock_irqsave(ap->lock, flags);
1122
1123 if (qc->tf.protocol == ATA_PROT_PIO) { 1109 if (qc->tf.protocol == ATA_PROT_PIO) {
1124 /* PIO data out protocol. 1110 /* PIO data out protocol.
1125 * send first data block. 1111 * send first data block.
@@ -1135,9 +1121,6 @@ fsm_start:
1135 /* send CDB */ 1121 /* send CDB */
1136 atapi_send_cdb(ap, qc); 1122 atapi_send_cdb(ap, qc);
1137 1123
1138 if (in_wq)
1139 spin_unlock_irqrestore(ap->lock, flags);
1140
1141 /* if polling, ata_sff_pio_task() handles the rest. 1124 /* if polling, ata_sff_pio_task() handles the rest.
1142 * otherwise, interrupt handler takes over from here. 1125 * otherwise, interrupt handler takes over from here.
1143 */ 1126 */
@@ -1296,7 +1279,8 @@ fsm_start:
1296 break; 1279 break;
1297 default: 1280 default:
1298 poll_next = 0; 1281 poll_next = 0;
1299 BUG(); 1282 WARN(true, "ata%d: SFF host state machine in invalid state %d",
1283 ap->print_id, ap->hsm_task_state);
1300 } 1284 }
1301 1285
1302 return poll_next; 1286 return poll_next;
@@ -1361,12 +1345,14 @@ static void ata_sff_pio_task(struct work_struct *work)
1361 u8 status; 1345 u8 status;
1362 int poll_next; 1346 int poll_next;
1363 1347
1348 spin_lock_irq(ap->lock);
1349
1364 BUG_ON(ap->sff_pio_task_link == NULL); 1350 BUG_ON(ap->sff_pio_task_link == NULL);
1365 /* qc can be NULL if timeout occurred */ 1351 /* qc can be NULL if timeout occurred */
1366 qc = ata_qc_from_tag(ap, link->active_tag); 1352 qc = ata_qc_from_tag(ap, link->active_tag);
1367 if (!qc) { 1353 if (!qc) {
1368 ap->sff_pio_task_link = NULL; 1354 ap->sff_pio_task_link = NULL;
1369 return; 1355 goto out_unlock;
1370 } 1356 }
1371 1357
1372fsm_start: 1358fsm_start:
@@ -1381,11 +1367,14 @@ fsm_start:
1381 */ 1367 */
1382 status = ata_sff_busy_wait(ap, ATA_BUSY, 5); 1368 status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1383 if (status & ATA_BUSY) { 1369 if (status & ATA_BUSY) {
1370 spin_unlock_irq(ap->lock);
1384 ata_msleep(ap, 2); 1371 ata_msleep(ap, 2);
1372 spin_lock_irq(ap->lock);
1373
1385 status = ata_sff_busy_wait(ap, ATA_BUSY, 10); 1374 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1386 if (status & ATA_BUSY) { 1375 if (status & ATA_BUSY) {
1387 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); 1376 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1388 return; 1377 goto out_unlock;
1389 } 1378 }
1390 } 1379 }
1391 1380
@@ -1402,6 +1391,8 @@ fsm_start:
1402 */ 1391 */
1403 if (poll_next) 1392 if (poll_next)
1404 goto fsm_start; 1393 goto fsm_start;
1394out_unlock:
1395 spin_unlock_irq(ap->lock);
1405} 1396}
1406 1397
1407/** 1398/**
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 12fe0f3bb7e9..c8b6a780a290 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -32,6 +32,8 @@
32#include <linux/libata.h> 32#include <linux/libata.h>
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34 34
35#include <asm/mach-rc32434/rb.h>
36
35#define DRV_NAME "pata-rb532-cf" 37#define DRV_NAME "pata-rb532-cf"
36#define DRV_VERSION "0.1.0" 38#define DRV_VERSION "0.1.0"
37#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash" 39#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash"
@@ -107,6 +109,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
107 int gpio; 109 int gpio;
108 struct resource *res; 110 struct resource *res;
109 struct ata_host *ah; 111 struct ata_host *ah;
112 struct cf_device *pdata;
110 struct rb532_cf_info *info; 113 struct rb532_cf_info *info;
111 int ret; 114 int ret;
112 115
@@ -122,7 +125,13 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
122 return -ENOENT; 125 return -ENOENT;
123 } 126 }
124 127
125 gpio = irq_to_gpio(irq); 128 pdata = dev_get_platdata(&pdev->dev);
129 if (!pdata) {
130 dev_err(&pdev->dev, "no platform data specified\n");
131 return -EINVAL;
132 }
133
134 gpio = pdata->gpio_pin;
126 if (gpio < 0) { 135 if (gpio < 0) {
127 dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq); 136 dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq);
128 return -ENOENT; 137 return -ENOENT;
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 89f5cf68d80a..04a1582e80bb 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -206,6 +206,8 @@ static void component_match_release(struct device *master,
206 if (mc->release) 206 if (mc->release)
207 mc->release(master, mc->data); 207 mc->release(master, mc->data);
208 } 208 }
209
210 kfree(match->compare);
209} 211}
210 212
211static void devm_component_match_release(struct device *dev, void *res) 213static void devm_component_match_release(struct device *dev, void *res)
@@ -221,14 +223,14 @@ static int component_match_realloc(struct device *dev,
221 if (match->alloc == num) 223 if (match->alloc == num)
222 return 0; 224 return 0;
223 225
224 new = devm_kmalloc_array(dev, num, sizeof(*new), GFP_KERNEL); 226 new = kmalloc_array(num, sizeof(*new), GFP_KERNEL);
225 if (!new) 227 if (!new)
226 return -ENOMEM; 228 return -ENOMEM;
227 229
228 if (match->compare) { 230 if (match->compare) {
229 memcpy(new, match->compare, sizeof(*new) * 231 memcpy(new, match->compare, sizeof(*new) *
230 min(match->num, num)); 232 min(match->num, num));
231 devm_kfree(dev, match->compare); 233 kfree(match->compare);
232 } 234 }
233 match->compare = new; 235 match->compare = new;
234 match->alloc = num; 236 match->alloc = num;
@@ -283,6 +285,24 @@ void component_match_add_release(struct device *master,
283} 285}
284EXPORT_SYMBOL(component_match_add_release); 286EXPORT_SYMBOL(component_match_add_release);
285 287
288static void free_master(struct master *master)
289{
290 struct component_match *match = master->match;
291 int i;
292
293 list_del(&master->node);
294
295 if (match) {
296 for (i = 0; i < match->num; i++) {
297 struct component *c = match->compare[i].component;
298 if (c)
299 c->master = NULL;
300 }
301 }
302
303 kfree(master);
304}
305
286int component_master_add_with_match(struct device *dev, 306int component_master_add_with_match(struct device *dev,
287 const struct component_master_ops *ops, 307 const struct component_master_ops *ops,
288 struct component_match *match) 308 struct component_match *match)
@@ -309,11 +329,9 @@ int component_master_add_with_match(struct device *dev,
309 329
310 ret = try_to_bring_up_master(master, NULL); 330 ret = try_to_bring_up_master(master, NULL);
311 331
312 if (ret < 0) { 332 if (ret < 0)
313 /* Delete off the list if we weren't successful */ 333 free_master(master);
314 list_del(&master->node); 334
315 kfree(master);
316 }
317 mutex_unlock(&component_mutex); 335 mutex_unlock(&component_mutex);
318 336
319 return ret < 0 ? ret : 0; 337 return ret < 0 ? ret : 0;
@@ -324,25 +342,12 @@ void component_master_del(struct device *dev,
324 const struct component_master_ops *ops) 342 const struct component_master_ops *ops)
325{ 343{
326 struct master *master; 344 struct master *master;
327 int i;
328 345
329 mutex_lock(&component_mutex); 346 mutex_lock(&component_mutex);
330 master = __master_find(dev, ops); 347 master = __master_find(dev, ops);
331 if (master) { 348 if (master) {
332 struct component_match *match = master->match;
333
334 take_down_master(master); 349 take_down_master(master);
335 350 free_master(master);
336 list_del(&master->node);
337
338 if (match) {
339 for (i = 0; i < match->num; i++) {
340 struct component *c = match->compare[i].component;
341 if (c)
342 c->master = NULL;
343 }
344 }
345 kfree(master);
346 } 351 }
347 mutex_unlock(&component_mutex); 352 mutex_unlock(&component_mutex);
348} 353}
@@ -486,6 +491,8 @@ int component_add(struct device *dev, const struct component_ops *ops)
486 491
487 ret = try_to_bring_up_masters(component); 492 ret = try_to_bring_up_masters(component);
488 if (ret < 0) { 493 if (ret < 0) {
494 if (component->master)
495 remove_component(component->master, component);
489 list_del(&component->node); 496 list_del(&component->node);
490 497
491 kfree(component); 498 kfree(component);
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 47c43386786b..279e53989374 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -284,6 +284,7 @@ out_free_priv_data:
284 284
285 return err; 285 return err;
286} 286}
287EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
287 288
288/** 289/**
289 * platform_msi_domain_free_irqs - Free MSI interrupts for @dev 290 * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
@@ -301,6 +302,7 @@ void platform_msi_domain_free_irqs(struct device *dev)
301 msi_domain_free_irqs(dev->msi_domain, dev); 302 msi_domain_free_irqs(dev->msi_domain, dev);
302 platform_msi_free_descs(dev, 0, MAX_DEV_MSIS); 303 platform_msi_free_descs(dev, 0, MAX_DEV_MSIS);
303} 304}
305EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
304 306
305/** 307/**
306 * platform_msi_get_host_data - Query the private data associated with 308 * platform_msi_get_host_data - Query the private data associated with
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 73d6e5d39e33..f437afa17f2b 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -558,10 +558,15 @@ static int platform_drv_probe(struct device *_dev)
558 return ret; 558 return ret;
559 559
560 ret = dev_pm_domain_attach(_dev, true); 560 ret = dev_pm_domain_attach(_dev, true);
561 if (ret != -EPROBE_DEFER && drv->probe) { 561 if (ret != -EPROBE_DEFER) {
562 ret = drv->probe(dev); 562 if (drv->probe) {
563 if (ret) 563 ret = drv->probe(dev);
564 dev_pm_domain_detach(_dev, true); 564 if (ret)
565 dev_pm_domain_detach(_dev, true);
566 } else {
567 /* don't fail if just dev_pm_domain_attach failed */
568 ret = 0;
569 }
565 } 570 }
566 571
567 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { 572 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 93ed14cc2252..f6a9ad52cbbf 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -146,7 +146,7 @@ void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd)
146 if (dev->pm_domain == pd) 146 if (dev->pm_domain == pd)
147 return; 147 return;
148 148
149 WARN(device_is_bound(dev), 149 WARN(pd && device_is_bound(dev),
150 "PM domains can only be changed for unbound devices\n"); 150 "PM domains can only be changed for unbound devices\n");
151 dev->pm_domain = pd; 151 dev->pm_domain = pd;
152 device_pm_check_callbacks(dev); 152 device_pm_check_callbacks(dev);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 6ac9a7f33b64..301b785f9f56 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -162,7 +162,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
162 162
163/** 163/**
164 * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff(). 164 * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
165 * @genpd: PM domait to power off. 165 * @genpd: PM domain to power off.
166 * 166 *
167 * Queue up the execution of genpd_poweroff() unless it's already been done 167 * Queue up the execution of genpd_poweroff() unless it's already been done
168 * before. 168 * before.
@@ -172,16 +172,15 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
172 queue_work(pm_wq, &genpd->power_off_work); 172 queue_work(pm_wq, &genpd->power_off_work);
173} 173}
174 174
175static int genpd_poweron(struct generic_pm_domain *genpd);
176
177/** 175/**
178 * __genpd_poweron - Restore power to a given PM domain and its masters. 176 * genpd_poweron - Restore power to a given PM domain and its masters.
179 * @genpd: PM domain to power up. 177 * @genpd: PM domain to power up.
178 * @depth: nesting count for lockdep.
180 * 179 *
181 * Restore power to @genpd and all of its masters so that it is possible to 180 * Restore power to @genpd and all of its masters so that it is possible to
182 * resume a device belonging to it. 181 * resume a device belonging to it.
183 */ 182 */
184static int __genpd_poweron(struct generic_pm_domain *genpd) 183static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
185{ 184{
186 struct gpd_link *link; 185 struct gpd_link *link;
187 int ret = 0; 186 int ret = 0;
@@ -196,11 +195,16 @@ static int __genpd_poweron(struct generic_pm_domain *genpd)
196 * with it. 195 * with it.
197 */ 196 */
198 list_for_each_entry(link, &genpd->slave_links, slave_node) { 197 list_for_each_entry(link, &genpd->slave_links, slave_node) {
199 genpd_sd_counter_inc(link->master); 198 struct generic_pm_domain *master = link->master;
199
200 genpd_sd_counter_inc(master);
201
202 mutex_lock_nested(&master->lock, depth + 1);
203 ret = genpd_poweron(master, depth + 1);
204 mutex_unlock(&master->lock);
200 205
201 ret = genpd_poweron(link->master);
202 if (ret) { 206 if (ret) {
203 genpd_sd_counter_dec(link->master); 207 genpd_sd_counter_dec(master);
204 goto err; 208 goto err;
205 } 209 }
206 } 210 }
@@ -223,20 +227,6 @@ static int __genpd_poweron(struct generic_pm_domain *genpd)
223 return ret; 227 return ret;
224} 228}
225 229
226/**
227 * genpd_poweron - Restore power to a given PM domain and its masters.
228 * @genpd: PM domain to power up.
229 */
230static int genpd_poweron(struct generic_pm_domain *genpd)
231{
232 int ret;
233
234 mutex_lock(&genpd->lock);
235 ret = __genpd_poweron(genpd);
236 mutex_unlock(&genpd->lock);
237 return ret;
238}
239
240static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) 230static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
241{ 231{
242 return GENPD_DEV_CALLBACK(genpd, int, save_state, dev); 232 return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
@@ -484,7 +474,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
484 } 474 }
485 475
486 mutex_lock(&genpd->lock); 476 mutex_lock(&genpd->lock);
487 ret = __genpd_poweron(genpd); 477 ret = genpd_poweron(genpd, 0);
488 mutex_unlock(&genpd->lock); 478 mutex_unlock(&genpd->lock);
489 479
490 if (ret) 480 if (ret)
@@ -1339,8 +1329,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1339 if (!link) 1329 if (!link)
1340 return -ENOMEM; 1330 return -ENOMEM;
1341 1331
1342 mutex_lock(&genpd->lock); 1332 mutex_lock(&subdomain->lock);
1343 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1333 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
1344 1334
1345 if (genpd->status == GPD_STATE_POWER_OFF 1335 if (genpd->status == GPD_STATE_POWER_OFF
1346 && subdomain->status != GPD_STATE_POWER_OFF) { 1336 && subdomain->status != GPD_STATE_POWER_OFF) {
@@ -1363,8 +1353,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1363 genpd_sd_counter_inc(genpd); 1353 genpd_sd_counter_inc(genpd);
1364 1354
1365 out: 1355 out:
1366 mutex_unlock(&subdomain->lock);
1367 mutex_unlock(&genpd->lock); 1356 mutex_unlock(&genpd->lock);
1357 mutex_unlock(&subdomain->lock);
1368 if (ret) 1358 if (ret)
1369 kfree(link); 1359 kfree(link);
1370 return ret; 1360 return ret;
@@ -1385,7 +1375,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1385 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1375 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1386 return -EINVAL; 1376 return -EINVAL;
1387 1377
1388 mutex_lock(&genpd->lock); 1378 mutex_lock(&subdomain->lock);
1379 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
1389 1380
1390 if (!list_empty(&subdomain->slave_links) || subdomain->device_count) { 1381 if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
1391 pr_warn("%s: unable to remove subdomain %s\n", genpd->name, 1382 pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
@@ -1398,22 +1389,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1398 if (link->slave != subdomain) 1389 if (link->slave != subdomain)
1399 continue; 1390 continue;
1400 1391
1401 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1402
1403 list_del(&link->master_node); 1392 list_del(&link->master_node);
1404 list_del(&link->slave_node); 1393 list_del(&link->slave_node);
1405 kfree(link); 1394 kfree(link);
1406 if (subdomain->status != GPD_STATE_POWER_OFF) 1395 if (subdomain->status != GPD_STATE_POWER_OFF)
1407 genpd_sd_counter_dec(genpd); 1396 genpd_sd_counter_dec(genpd);
1408 1397
1409 mutex_unlock(&subdomain->lock);
1410
1411 ret = 0; 1398 ret = 0;
1412 break; 1399 break;
1413 } 1400 }
1414 1401
1415out: 1402out:
1416 mutex_unlock(&genpd->lock); 1403 mutex_unlock(&genpd->lock);
1404 mutex_unlock(&subdomain->lock);
1417 1405
1418 return ret; 1406 return ret;
1419} 1407}
@@ -1818,8 +1806,10 @@ int genpd_dev_pm_attach(struct device *dev)
1818 1806
1819 dev->pm_domain->detach = genpd_dev_pm_detach; 1807 dev->pm_domain->detach = genpd_dev_pm_detach;
1820 dev->pm_domain->sync = genpd_dev_pm_sync; 1808 dev->pm_domain->sync = genpd_dev_pm_sync;
1821 ret = genpd_poweron(pd);
1822 1809
1810 mutex_lock(&pd->lock);
1811 ret = genpd_poweron(pd, 0);
1812 mutex_unlock(&pd->lock);
1823out: 1813out:
1824 return ret ? -EPROBE_DEFER : 0; 1814 return ret ? -EPROBE_DEFER : 0;
1825} 1815}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index c359351d50f1..a163f2c59aa3 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -218,7 +218,7 @@ bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname)
218 bool ret; 218 bool ret;
219 219
220 ret = __fwnode_property_present(fwnode, propname); 220 ret = __fwnode_property_present(fwnode, propname);
221 if (ret == false && fwnode && fwnode->secondary) 221 if (ret == false && fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
222 ret = __fwnode_property_present(fwnode->secondary, propname); 222 ret = __fwnode_property_present(fwnode->secondary, propname);
223 return ret; 223 return ret;
224} 224}
@@ -423,7 +423,7 @@ EXPORT_SYMBOL_GPL(device_property_match_string);
423 int _ret_; \ 423 int _ret_; \
424 _ret_ = FWNODE_PROP_READ(_fwnode_, _propname_, _type_, _proptype_, \ 424 _ret_ = FWNODE_PROP_READ(_fwnode_, _propname_, _type_, _proptype_, \
425 _val_, _nval_); \ 425 _val_, _nval_); \
426 if (_ret_ == -EINVAL && _fwnode_ && _fwnode_->secondary) \ 426 if (_ret_ == -EINVAL && _fwnode_ && !IS_ERR_OR_NULL(_fwnode_->secondary)) \
427 _ret_ = FWNODE_PROP_READ(_fwnode_->secondary, _propname_, _type_, \ 427 _ret_ = FWNODE_PROP_READ(_fwnode_->secondary, _propname_, _type_, \
428 _proptype_, _val_, _nval_); \ 428 _proptype_, _val_, _nval_); \
429 _ret_; \ 429 _ret_; \
@@ -593,7 +593,7 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
593 int ret; 593 int ret;
594 594
595 ret = __fwnode_property_read_string_array(fwnode, propname, val, nval); 595 ret = __fwnode_property_read_string_array(fwnode, propname, val, nval);
596 if (ret == -EINVAL && fwnode && fwnode->secondary) 596 if (ret == -EINVAL && fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
597 ret = __fwnode_property_read_string_array(fwnode->secondary, 597 ret = __fwnode_property_read_string_array(fwnode->secondary,
598 propname, val, nval); 598 propname, val, nval);
599 return ret; 599 return ret;
@@ -621,7 +621,7 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode,
621 int ret; 621 int ret;
622 622
623 ret = __fwnode_property_read_string(fwnode, propname, val); 623 ret = __fwnode_property_read_string(fwnode, propname, val);
624 if (ret == -EINVAL && fwnode && fwnode->secondary) 624 if (ret == -EINVAL && fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
625 ret = __fwnode_property_read_string(fwnode->secondary, 625 ret = __fwnode_property_read_string(fwnode->secondary,
626 propname, val); 626 propname, val);
627 return ret; 627 return ret;
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 8812bfb9e3b8..eea51569f0eb 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -133,17 +133,17 @@ static int regmap_mmio_gather_write(void *context,
133 while (val_size) { 133 while (val_size) {
134 switch (ctx->val_bytes) { 134 switch (ctx->val_bytes) {
135 case 1: 135 case 1:
136 __raw_writeb(*(u8 *)val, ctx->regs + offset); 136 writeb(*(u8 *)val, ctx->regs + offset);
137 break; 137 break;
138 case 2: 138 case 2:
139 __raw_writew(*(u16 *)val, ctx->regs + offset); 139 writew(*(u16 *)val, ctx->regs + offset);
140 break; 140 break;
141 case 4: 141 case 4:
142 __raw_writel(*(u32 *)val, ctx->regs + offset); 142 writel(*(u32 *)val, ctx->regs + offset);
143 break; 143 break;
144#ifdef CONFIG_64BIT 144#ifdef CONFIG_64BIT
145 case 8: 145 case 8:
146 __raw_writeq(*(u64 *)val, ctx->regs + offset); 146 writeq(*(u64 *)val, ctx->regs + offset);
147 break; 147 break;
148#endif 148#endif
149 default: 149 default:
@@ -193,17 +193,17 @@ static int regmap_mmio_read(void *context,
193 while (val_size) { 193 while (val_size) {
194 switch (ctx->val_bytes) { 194 switch (ctx->val_bytes) {
195 case 1: 195 case 1:
196 *(u8 *)val = __raw_readb(ctx->regs + offset); 196 *(u8 *)val = readb(ctx->regs + offset);
197 break; 197 break;
198 case 2: 198 case 2:
199 *(u16 *)val = __raw_readw(ctx->regs + offset); 199 *(u16 *)val = readw(ctx->regs + offset);
200 break; 200 break;
201 case 4: 201 case 4:
202 *(u32 *)val = __raw_readl(ctx->regs + offset); 202 *(u32 *)val = readl(ctx->regs + offset);
203 break; 203 break;
204#ifdef CONFIG_64BIT 204#ifdef CONFIG_64BIT
205 case 8: 205 case 8:
206 *(u64 *)val = __raw_readq(ctx->regs + offset); 206 *(u64 *)val = readq(ctx->regs + offset);
207 break; 207 break;
208#endif 208#endif
209 default: 209 default:
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9e251201dd48..84708a5f8c52 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -866,7 +866,7 @@ static void set_fdc(int drive)
866} 866}
867 867
868/* locks the driver */ 868/* locks the driver */
869static int lock_fdc(int drive, bool interruptible) 869static int lock_fdc(int drive)
870{ 870{
871 if (WARN(atomic_read(&usage_count) == 0, 871 if (WARN(atomic_read(&usage_count) == 0,
872 "Trying to lock fdc while usage count=0\n")) 872 "Trying to lock fdc while usage count=0\n"))
@@ -2173,7 +2173,7 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
2173{ 2173{
2174 int ret; 2174 int ret;
2175 2175
2176 if (lock_fdc(drive, true)) 2176 if (lock_fdc(drive))
2177 return -EINTR; 2177 return -EINTR;
2178 2178
2179 set_floppy(drive); 2179 set_floppy(drive);
@@ -2960,7 +2960,7 @@ static int user_reset_fdc(int drive, int arg, bool interruptible)
2960{ 2960{
2961 int ret; 2961 int ret;
2962 2962
2963 if (lock_fdc(drive, interruptible)) 2963 if (lock_fdc(drive))
2964 return -EINTR; 2964 return -EINTR;
2965 2965
2966 if (arg == FD_RESET_ALWAYS) 2966 if (arg == FD_RESET_ALWAYS)
@@ -3243,7 +3243,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
3243 if (!capable(CAP_SYS_ADMIN)) 3243 if (!capable(CAP_SYS_ADMIN))
3244 return -EPERM; 3244 return -EPERM;
3245 mutex_lock(&open_lock); 3245 mutex_lock(&open_lock);
3246 if (lock_fdc(drive, true)) { 3246 if (lock_fdc(drive)) {
3247 mutex_unlock(&open_lock); 3247 mutex_unlock(&open_lock);
3248 return -EINTR; 3248 return -EINTR;
3249 } 3249 }
@@ -3263,7 +3263,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
3263 } else { 3263 } else {
3264 int oldStretch; 3264 int oldStretch;
3265 3265
3266 if (lock_fdc(drive, true)) 3266 if (lock_fdc(drive))
3267 return -EINTR; 3267 return -EINTR;
3268 if (cmd != FDDEFPRM) { 3268 if (cmd != FDDEFPRM) {
3269 /* notice a disk change immediately, else 3269 /* notice a disk change immediately, else
@@ -3349,7 +3349,7 @@ static int get_floppy_geometry(int drive, int type, struct floppy_struct **g)
3349 if (type) 3349 if (type)
3350 *g = &floppy_type[type]; 3350 *g = &floppy_type[type];
3351 else { 3351 else {
3352 if (lock_fdc(drive, false)) 3352 if (lock_fdc(drive))
3353 return -EINTR; 3353 return -EINTR;
3354 if (poll_drive(false, 0) == -EINTR) 3354 if (poll_drive(false, 0) == -EINTR)
3355 return -EINTR; 3355 return -EINTR;
@@ -3433,7 +3433,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3433 if (UDRS->fd_ref != 1) 3433 if (UDRS->fd_ref != 1)
3434 /* somebody else has this drive open */ 3434 /* somebody else has this drive open */
3435 return -EBUSY; 3435 return -EBUSY;
3436 if (lock_fdc(drive, true)) 3436 if (lock_fdc(drive))
3437 return -EINTR; 3437 return -EINTR;
3438 3438
3439 /* do the actual eject. Fails on 3439 /* do the actual eject. Fails on
@@ -3445,7 +3445,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3445 process_fd_request(); 3445 process_fd_request();
3446 return ret; 3446 return ret;
3447 case FDCLRPRM: 3447 case FDCLRPRM:
3448 if (lock_fdc(drive, true)) 3448 if (lock_fdc(drive))
3449 return -EINTR; 3449 return -EINTR;
3450 current_type[drive] = NULL; 3450 current_type[drive] = NULL;
3451 floppy_sizes[drive] = MAX_DISK_SIZE << 1; 3451 floppy_sizes[drive] = MAX_DISK_SIZE << 1;
@@ -3467,7 +3467,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3467 UDP->flags &= ~FTD_MSG; 3467 UDP->flags &= ~FTD_MSG;
3468 return 0; 3468 return 0;
3469 case FDFMTBEG: 3469 case FDFMTBEG:
3470 if (lock_fdc(drive, true)) 3470 if (lock_fdc(drive))
3471 return -EINTR; 3471 return -EINTR;
3472 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) 3472 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
3473 return -EINTR; 3473 return -EINTR;
@@ -3484,7 +3484,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3484 return do_format(drive, &inparam.f); 3484 return do_format(drive, &inparam.f);
3485 case FDFMTEND: 3485 case FDFMTEND:
3486 case FDFLUSH: 3486 case FDFLUSH:
3487 if (lock_fdc(drive, true)) 3487 if (lock_fdc(drive))
3488 return -EINTR; 3488 return -EINTR;
3489 return invalidate_drive(bdev); 3489 return invalidate_drive(bdev);
3490 case FDSETEMSGTRESH: 3490 case FDSETEMSGTRESH:
@@ -3507,7 +3507,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3507 outparam = UDP; 3507 outparam = UDP;
3508 break; 3508 break;
3509 case FDPOLLDRVSTAT: 3509 case FDPOLLDRVSTAT:
3510 if (lock_fdc(drive, true)) 3510 if (lock_fdc(drive))
3511 return -EINTR; 3511 return -EINTR;
3512 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) 3512 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
3513 return -EINTR; 3513 return -EINTR;
@@ -3530,7 +3530,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3530 case FDRAWCMD: 3530 case FDRAWCMD:
3531 if (type) 3531 if (type)
3532 return -EINVAL; 3532 return -EINVAL;
3533 if (lock_fdc(drive, true)) 3533 if (lock_fdc(drive))
3534 return -EINTR; 3534 return -EINTR;
3535 set_floppy(drive); 3535 set_floppy(drive);
3536 i = raw_cmd_ioctl(cmd, (void __user *)param); 3536 i = raw_cmd_ioctl(cmd, (void __user *)param);
@@ -3539,7 +3539,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3539 process_fd_request(); 3539 process_fd_request();
3540 return i; 3540 return i;
3541 case FDTWADDLE: 3541 case FDTWADDLE:
3542 if (lock_fdc(drive, true)) 3542 if (lock_fdc(drive))
3543 return -EINTR; 3543 return -EINTR;
3544 twaddle(); 3544 twaddle();
3545 process_fd_request(); 3545 process_fd_request();
@@ -3663,6 +3663,11 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
3663 3663
3664 opened_bdev[drive] = bdev; 3664 opened_bdev[drive] = bdev;
3665 3665
3666 if (!(mode & (FMODE_READ|FMODE_WRITE))) {
3667 res = -EINVAL;
3668 goto out;
3669 }
3670
3666 res = -ENXIO; 3671 res = -ENXIO;
3667 3672
3668 if (!floppy_track_buffer) { 3673 if (!floppy_track_buffer) {
@@ -3706,21 +3711,20 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
3706 if (UFDCS->rawcmd == 1) 3711 if (UFDCS->rawcmd == 1)
3707 UFDCS->rawcmd = 2; 3712 UFDCS->rawcmd = 2;
3708 3713
3709 if (!(mode & FMODE_NDELAY)) { 3714 UDRS->last_checked = 0;
3710 if (mode & (FMODE_READ|FMODE_WRITE)) { 3715 clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
3711 UDRS->last_checked = 0; 3716 check_disk_change(bdev);
3712 clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); 3717 if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
3713 check_disk_change(bdev); 3718 goto out;
3714 if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) 3719 if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
3715 goto out; 3720 goto out;
3716 if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) 3721
3717 goto out; 3722 res = -EROFS;
3718 } 3723
3719 res = -EROFS; 3724 if ((mode & FMODE_WRITE) &&
3720 if ((mode & FMODE_WRITE) && 3725 !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
3721 !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) 3726 goto out;
3722 goto out; 3727
3723 }
3724 mutex_unlock(&open_lock); 3728 mutex_unlock(&open_lock);
3725 mutex_unlock(&floppy_mutex); 3729 mutex_unlock(&floppy_mutex);
3726 return 0; 3730 return 0;
@@ -3748,7 +3752,8 @@ static unsigned int floppy_check_events(struct gendisk *disk,
3748 return DISK_EVENT_MEDIA_CHANGE; 3752 return DISK_EVENT_MEDIA_CHANGE;
3749 3753
3750 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { 3754 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
3751 lock_fdc(drive, false); 3755 if (lock_fdc(drive))
3756 return -EINTR;
3752 poll_drive(false, 0); 3757 poll_drive(false, 0);
3753 process_fd_request(); 3758 process_fd_request();
3754 } 3759 }
@@ -3847,7 +3852,9 @@ static int floppy_revalidate(struct gendisk *disk)
3847 "VFS: revalidate called on non-open device.\n")) 3852 "VFS: revalidate called on non-open device.\n"))
3848 return -EFAULT; 3853 return -EFAULT;
3849 3854
3850 lock_fdc(drive, false); 3855 res = lock_fdc(drive);
3856 if (res)
3857 return res;
3851 cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || 3858 cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
3852 test_bit(FD_VERIFY_BIT, &UDRS->flags)); 3859 test_bit(FD_VERIFY_BIT, &UDRS->flags));
3853 if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) { 3860 if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) {
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 8ba1e97d573c..64a7b5971b57 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -478,7 +478,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
478 id->ver_id = 0x1; 478 id->ver_id = 0x1;
479 id->vmnt = 0; 479 id->vmnt = 0;
480 id->cgrps = 1; 480 id->cgrps = 1;
481 id->cap = 0x3; 481 id->cap = 0x2;
482 id->dom = 0x1; 482 id->dom = 0x1;
483 483
484 id->ppaf.blk_offset = 0; 484 id->ppaf.blk_offset = 0;
@@ -707,9 +707,7 @@ static int null_add_dev(void)
707 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); 707 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
708 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); 708 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
709 709
710
711 mutex_lock(&lock); 710 mutex_lock(&lock);
712 list_add_tail(&nullb->list, &nullb_list);
713 nullb->index = nullb_indexes++; 711 nullb->index = nullb_indexes++;
714 mutex_unlock(&lock); 712 mutex_unlock(&lock);
715 713
@@ -743,6 +741,10 @@ static int null_add_dev(void)
743 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); 741 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
744 742
745 add_disk(disk); 743 add_disk(disk);
744
745 mutex_lock(&lock);
746 list_add_tail(&nullb->list, &nullb_list);
747 mutex_unlock(&lock);
746done: 748done:
747 return 0; 749 return 0;
748 750
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8a8dc91c39f7..83eb9e6bf8b0 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1873,6 +1873,43 @@ again:
1873 return err; 1873 return err;
1874} 1874}
1875 1875
1876static int negotiate_mq(struct blkfront_info *info)
1877{
1878 unsigned int backend_max_queues = 0;
1879 int err;
1880 unsigned int i;
1881
1882 BUG_ON(info->nr_rings);
1883
1884 /* Check if backend supports multiple queues. */
1885 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1886 "multi-queue-max-queues", "%u", &backend_max_queues);
1887 if (err < 0)
1888 backend_max_queues = 1;
1889
1890 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
1891 /* We need at least one ring. */
1892 if (!info->nr_rings)
1893 info->nr_rings = 1;
1894
1895 info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
1896 if (!info->rinfo) {
1897 xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
1898 return -ENOMEM;
1899 }
1900
1901 for (i = 0; i < info->nr_rings; i++) {
1902 struct blkfront_ring_info *rinfo;
1903
1904 rinfo = &info->rinfo[i];
1905 INIT_LIST_HEAD(&rinfo->indirect_pages);
1906 INIT_LIST_HEAD(&rinfo->grants);
1907 rinfo->dev_info = info;
1908 INIT_WORK(&rinfo->work, blkif_restart_queue);
1909 spin_lock_init(&rinfo->ring_lock);
1910 }
1911 return 0;
1912}
1876/** 1913/**
1877 * Entry point to this code when a new device is created. Allocate the basic 1914 * Entry point to this code when a new device is created. Allocate the basic
1878 * structures and the ring buffer for communication with the backend, and 1915 * structures and the ring buffer for communication with the backend, and
@@ -1883,9 +1920,7 @@ static int blkfront_probe(struct xenbus_device *dev,
1883 const struct xenbus_device_id *id) 1920 const struct xenbus_device_id *id)
1884{ 1921{
1885 int err, vdevice; 1922 int err, vdevice;
1886 unsigned int r_index;
1887 struct blkfront_info *info; 1923 struct blkfront_info *info;
1888 unsigned int backend_max_queues = 0;
1889 1924
1890 /* FIXME: Use dynamic device id if this is not set. */ 1925 /* FIXME: Use dynamic device id if this is not set. */
1891 err = xenbus_scanf(XBT_NIL, dev->nodename, 1926 err = xenbus_scanf(XBT_NIL, dev->nodename,
@@ -1936,33 +1971,10 @@ static int blkfront_probe(struct xenbus_device *dev,
1936 } 1971 }
1937 1972
1938 info->xbdev = dev; 1973 info->xbdev = dev;
1939 /* Check if backend supports multiple queues. */ 1974 err = negotiate_mq(info);
1940 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 1975 if (err) {
1941 "multi-queue-max-queues", "%u", &backend_max_queues);
1942 if (err < 0)
1943 backend_max_queues = 1;
1944
1945 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
1946 /* We need at least one ring. */
1947 if (!info->nr_rings)
1948 info->nr_rings = 1;
1949
1950 info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
1951 if (!info->rinfo) {
1952 xenbus_dev_fatal(dev, -ENOMEM, "allocating ring_info structure");
1953 kfree(info); 1976 kfree(info);
1954 return -ENOMEM; 1977 return err;
1955 }
1956
1957 for (r_index = 0; r_index < info->nr_rings; r_index++) {
1958 struct blkfront_ring_info *rinfo;
1959
1960 rinfo = &info->rinfo[r_index];
1961 INIT_LIST_HEAD(&rinfo->indirect_pages);
1962 INIT_LIST_HEAD(&rinfo->grants);
1963 rinfo->dev_info = info;
1964 INIT_WORK(&rinfo->work, blkif_restart_queue);
1965 spin_lock_init(&rinfo->ring_lock);
1966 } 1978 }
1967 1979
1968 mutex_init(&info->mutex); 1980 mutex_init(&info->mutex);
@@ -2123,12 +2135,16 @@ static int blkif_recover(struct blkfront_info *info)
2123static int blkfront_resume(struct xenbus_device *dev) 2135static int blkfront_resume(struct xenbus_device *dev)
2124{ 2136{
2125 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 2137 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2126 int err; 2138 int err = 0;
2127 2139
2128 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); 2140 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
2129 2141
2130 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); 2142 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
2131 2143
2144 err = negotiate_mq(info);
2145 if (err)
2146 return err;
2147
2132 err = talk_to_blkback(dev, info); 2148 err = talk_to_blkback(dev, info);
2133 2149
2134 /* 2150 /*
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 129d47bcc5fc..9a92c072a485 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -132,7 +132,7 @@ config SUNXI_RSB
132 and AC100/AC200 ICs. 132 and AC100/AC200 ICs.
133 133
134config UNIPHIER_SYSTEM_BUS 134config UNIPHIER_SYSTEM_BUS
135 bool "UniPhier System Bus driver" 135 tristate "UniPhier System Bus driver"
136 depends on ARCH_UNIPHIER && OF 136 depends on ARCH_UNIPHIER && OF
137 default y 137 default y
138 help 138 help
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index 6575c0fe6a4e..c3cb76b363c6 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -192,8 +192,10 @@ static int __init vexpress_config_init(void)
192 /* Need the config devices early, before the "normal" devices... */ 192 /* Need the config devices early, before the "normal" devices... */
193 for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") { 193 for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") {
194 err = vexpress_config_populate(node); 194 err = vexpress_config_populate(node);
195 if (err) 195 if (err) {
196 of_node_put(node);
196 break; 197 break;
198 }
197 } 199 }
198 200
199 return err; 201 return err;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 240b6cf1d97c..be54e5331a45 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -42,7 +42,7 @@
42/* 42/*
43 * The High Precision Event Timer driver. 43 * The High Precision Event Timer driver.
44 * This driver is closely modelled after the rtc.c driver. 44 * This driver is closely modelled after the rtc.c driver.
45 * http://www.intel.com/hardwaredesign/hpetspec_1.pdf 45 * See HPET spec revision 1.
46 */ 46 */
47#define HPET_USER_FREQ (64) 47#define HPET_USER_FREQ (64)
48#define HPET_DRIFT (500) 48#define HPET_DRIFT (500)
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index dbf22719462f..ff00331bff49 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -372,6 +372,7 @@ config HW_RANDOM_XGENE
372config HW_RANDOM_STM32 372config HW_RANDOM_STM32
373 tristate "STMicroelectronics STM32 random number generator" 373 tristate "STMicroelectronics STM32 random number generator"
374 depends on HW_RANDOM && (ARCH_STM32 || COMPILE_TEST) 374 depends on HW_RANDOM && (ARCH_STM32 || COMPILE_TEST)
375 depends on HAS_IOMEM
375 help 376 help
376 This driver provides kernel-side support for the Random Number 377 This driver provides kernel-side support for the Random Number
377 Generator hardware found on STM32 microcontrollers. 378 Generator hardware found on STM32 microcontrollers.
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 9fda22e3387e..7fddd8696211 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -68,6 +68,7 @@
68#include <linux/of_platform.h> 68#include <linux/of_platform.h>
69#include <linux/of_address.h> 69#include <linux/of_address.h>
70#include <linux/of_irq.h> 70#include <linux/of_irq.h>
71#include <linux/acpi.h>
71 72
72#ifdef CONFIG_PARISC 73#ifdef CONFIG_PARISC
73#include <asm/hardware.h> /* for register_parisc_driver() stuff */ 74#include <asm/hardware.h> /* for register_parisc_driver() stuff */
@@ -2054,8 +2055,6 @@ static int hardcode_find_bmc(void)
2054 2055
2055#ifdef CONFIG_ACPI 2056#ifdef CONFIG_ACPI
2056 2057
2057#include <linux/acpi.h>
2058
2059/* 2058/*
2060 * Once we get an ACPI failure, we don't try any more, because we go 2059 * Once we get an ACPI failure, we don't try any more, because we go
2061 * through the tables sequentially. Once we don't find a table, there 2060 * through the tables sequentially. Once we don't find a table, there
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d0da5d852d41..b583e5336630 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1819,6 +1819,28 @@ unsigned int get_random_int(void)
1819EXPORT_SYMBOL(get_random_int); 1819EXPORT_SYMBOL(get_random_int);
1820 1820
1821/* 1821/*
1822 * Same as get_random_int(), but returns unsigned long.
1823 */
1824unsigned long get_random_long(void)
1825{
1826 __u32 *hash;
1827 unsigned long ret;
1828
1829 if (arch_get_random_long(&ret))
1830 return ret;
1831
1832 hash = get_cpu_var(get_random_int_hash);
1833
1834 hash[0] += current->pid + jiffies + random_get_entropy();
1835 md5_transform(hash, random_int_secret);
1836 ret = *(unsigned long *)hash;
1837 put_cpu_var(get_random_int_hash);
1838
1839 return ret;
1840}
1841EXPORT_SYMBOL(get_random_long);
1842
1843/*
1822 * randomize_range() returns a start address such that 1844 * randomize_range() returns a start address such that
1823 * 1845 *
1824 * [...... <range> .....] 1846 * [...... <range> .....]
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index b038e3666058..bae4be6501df 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -43,7 +43,7 @@ obj-$(CONFIG_COMMON_CLK_SI514) += clk-si514.o
43obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o 43obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
44obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o 44obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
45obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o 45obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o
46obj-$(CONFIG_ARCH_TANGOX) += clk-tango4.o 46obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o
47obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o 47obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
48obj-$(CONFIG_ARCH_U300) += clk-u300.o 48obj-$(CONFIG_ARCH_U300) += clk-u300.o
49obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o 49obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 19fed65587e8..7b09a265d79f 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -289,7 +289,7 @@ static void __init of_gpio_clk_setup(struct device_node *node,
289 289
290 num_parents = of_clk_get_parent_count(node); 290 num_parents = of_clk_get_parent_count(node);
291 if (num_parents < 0) 291 if (num_parents < 0)
292 return; 292 num_parents = 0;
293 293
294 data = kzalloc(sizeof(*data), GFP_KERNEL); 294 data = kzalloc(sizeof(*data), GFP_KERNEL);
295 if (!data) 295 if (!data)
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index cd0f2726f5e0..89e9ca78bb94 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -299,7 +299,7 @@ static int scpi_clocks_probe(struct platform_device *pdev)
299 /* Add the virtual cpufreq device */ 299 /* Add the virtual cpufreq device */
300 cpufreq_dev = platform_device_register_simple("scpi-cpufreq", 300 cpufreq_dev = platform_device_register_simple("scpi-cpufreq",
301 -1, NULL, 0); 301 -1, NULL, 0);
302 if (!cpufreq_dev) 302 if (IS_ERR(cpufreq_dev))
303 pr_warn("unable to register cpufreq device"); 303 pr_warn("unable to register cpufreq device");
304 304
305 return 0; 305 return 0;
diff --git a/drivers/clk/mvebu/dove-divider.c b/drivers/clk/mvebu/dove-divider.c
index d5c5bfa35a5a..3e0b52daa35f 100644
--- a/drivers/clk/mvebu/dove-divider.c
+++ b/drivers/clk/mvebu/dove-divider.c
@@ -247,7 +247,7 @@ static struct clk_onecell_data dove_divider_data = {
247 247
248void __init dove_divider_clk_init(struct device_node *np) 248void __init dove_divider_clk_init(struct device_node *np)
249{ 249{
250 void *base; 250 void __iomem *base;
251 251
252 base = of_iomap(np, 0); 252 base = of_iomap(np, 0);
253 if (WARN_ON(!base)) 253 if (WARN_ON(!base))
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index cf73e539e9f6..070037a29ea5 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -3587,7 +3587,6 @@ static const struct regmap_config gcc_apq8084_regmap_config = {
3587 .val_bits = 32, 3587 .val_bits = 32,
3588 .max_register = 0x1fc0, 3588 .max_register = 0x1fc0,
3589 .fast_io = true, 3589 .fast_io = true,
3590 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3591}; 3590};
3592 3591
3593static const struct qcom_cc_desc gcc_apq8084_desc = { 3592static const struct qcom_cc_desc gcc_apq8084_desc = {
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index b692ae881d6a..dd5402bac620 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -3005,7 +3005,6 @@ static const struct regmap_config gcc_ipq806x_regmap_config = {
3005 .val_bits = 32, 3005 .val_bits = 32,
3006 .max_register = 0x3e40, 3006 .max_register = 0x3e40,
3007 .fast_io = true, 3007 .fast_io = true,
3008 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3009}; 3008};
3010 3009
3011static const struct qcom_cc_desc gcc_ipq806x_desc = { 3010static const struct qcom_cc_desc gcc_ipq806x_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index f6a2b14dfec4..ad413036f7c7 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -2702,7 +2702,6 @@ static const struct regmap_config gcc_msm8660_regmap_config = {
2702 .val_bits = 32, 2702 .val_bits = 32,
2703 .max_register = 0x363c, 2703 .max_register = 0x363c,
2704 .fast_io = true, 2704 .fast_io = true,
2705 .val_format_endian = REGMAP_ENDIAN_LITTLE,
2706}; 2705};
2707 2706
2708static const struct qcom_cc_desc gcc_msm8660_desc = { 2707static const struct qcom_cc_desc gcc_msm8660_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index e3bf09d7d0ef..8cc9b2868b41 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -3336,7 +3336,6 @@ static const struct regmap_config gcc_msm8916_regmap_config = {
3336 .val_bits = 32, 3336 .val_bits = 32,
3337 .max_register = 0x80000, 3337 .max_register = 0x80000,
3338 .fast_io = true, 3338 .fast_io = true,
3339 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3340}; 3339};
3341 3340
3342static const struct qcom_cc_desc gcc_msm8916_desc = { 3341static const struct qcom_cc_desc gcc_msm8916_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index f31111e32d44..983dd7dc89a7 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -3468,7 +3468,6 @@ static const struct regmap_config gcc_msm8960_regmap_config = {
3468 .val_bits = 32, 3468 .val_bits = 32,
3469 .max_register = 0x3660, 3469 .max_register = 0x3660,
3470 .fast_io = true, 3470 .fast_io = true,
3471 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3472}; 3471};
3473 3472
3474static const struct regmap_config gcc_apq8064_regmap_config = { 3473static const struct regmap_config gcc_apq8064_regmap_config = {
@@ -3477,7 +3476,6 @@ static const struct regmap_config gcc_apq8064_regmap_config = {
3477 .val_bits = 32, 3476 .val_bits = 32,
3478 .max_register = 0x3880, 3477 .max_register = 0x3880,
3479 .fast_io = true, 3478 .fast_io = true,
3480 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3481}; 3479};
3482 3480
3483static const struct qcom_cc_desc gcc_msm8960_desc = { 3481static const struct qcom_cc_desc gcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index df164d618e34..335952db309b 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -2680,7 +2680,6 @@ static const struct regmap_config gcc_msm8974_regmap_config = {
2680 .val_bits = 32, 2680 .val_bits = 32,
2681 .max_register = 0x1fc0, 2681 .max_register = 0x1fc0,
2682 .fast_io = true, 2682 .fast_io = true,
2683 .val_format_endian = REGMAP_ENDIAN_LITTLE,
2684}; 2683};
2685 2684
2686static const struct qcom_cc_desc gcc_msm8974_desc = { 2685static const struct qcom_cc_desc gcc_msm8974_desc = {
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index 62e79fadd5f7..db3998e5e2d8 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -419,7 +419,6 @@ static const struct regmap_config lcc_ipq806x_regmap_config = {
419 .val_bits = 32, 419 .val_bits = 32,
420 .max_register = 0xfc, 420 .max_register = 0xfc,
421 .fast_io = true, 421 .fast_io = true,
422 .val_format_endian = REGMAP_ENDIAN_LITTLE,
423}; 422};
424 423
425static const struct qcom_cc_desc lcc_ipq806x_desc = { 424static const struct qcom_cc_desc lcc_ipq806x_desc = {
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
index bf95bb0ea1b8..4fcf9d1d233c 100644
--- a/drivers/clk/qcom/lcc-msm8960.c
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -524,7 +524,6 @@ static const struct regmap_config lcc_msm8960_regmap_config = {
524 .val_bits = 32, 524 .val_bits = 32,
525 .max_register = 0xfc, 525 .max_register = 0xfc,
526 .fast_io = true, 526 .fast_io = true,
527 .val_format_endian = REGMAP_ENDIAN_LITTLE,
528}; 527};
529 528
530static const struct qcom_cc_desc lcc_msm8960_desc = { 529static const struct qcom_cc_desc lcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index 1e703fda8a0f..30777f9f1a43 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -3368,7 +3368,6 @@ static const struct regmap_config mmcc_apq8084_regmap_config = {
3368 .val_bits = 32, 3368 .val_bits = 32,
3369 .max_register = 0x5104, 3369 .max_register = 0x5104,
3370 .fast_io = true, 3370 .fast_io = true,
3371 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3372}; 3371};
3373 3372
3374static const struct qcom_cc_desc mmcc_apq8084_desc = { 3373static const struct qcom_cc_desc mmcc_apq8084_desc = {
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index d73a048d3b9d..00e36192a1de 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -3029,7 +3029,6 @@ static const struct regmap_config mmcc_msm8960_regmap_config = {
3029 .val_bits = 32, 3029 .val_bits = 32,
3030 .max_register = 0x334, 3030 .max_register = 0x334,
3031 .fast_io = true, 3031 .fast_io = true,
3032 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3033}; 3032};
3034 3033
3035static const struct regmap_config mmcc_apq8064_regmap_config = { 3034static const struct regmap_config mmcc_apq8064_regmap_config = {
@@ -3038,7 +3037,6 @@ static const struct regmap_config mmcc_apq8064_regmap_config = {
3038 .val_bits = 32, 3037 .val_bits = 32,
3039 .max_register = 0x350, 3038 .max_register = 0x350,
3040 .fast_io = true, 3039 .fast_io = true,
3041 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3042}; 3040};
3043 3041
3044static const struct qcom_cc_desc mmcc_msm8960_desc = { 3042static const struct qcom_cc_desc mmcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index bbe28ed93669..9d790bcadf25 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -2594,7 +2594,6 @@ static const struct regmap_config mmcc_msm8974_regmap_config = {
2594 .val_bits = 32, 2594 .val_bits = 32,
2595 .max_register = 0x5104, 2595 .max_register = 0x5104,
2596 .fast_io = true, 2596 .fast_io = true,
2597 .val_format_endian = REGMAP_ENDIAN_LITTLE,
2598}; 2597};
2599 2598
2600static const struct qcom_cc_desc mmcc_msm8974_desc = { 2599static const struct qcom_cc_desc mmcc_msm8974_desc = {
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index ebce98033fbb..bc7fbac83ab7 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -133,7 +133,7 @@ PNAME(mux_spdif_p) = { "spdif_src", "spdif_frac", "xin12m" };
133PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" }; 133PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
134PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; 134PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
135PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" }; 135PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
136PNAME(mux_mac_p) = { "mac_pll_src", "ext_gmac" }; 136PNAME(mux_mac_p) = { "mac_pll_src", "rmii_clkin" };
137PNAME(mux_dclk_p) = { "dclk_lcdc", "dclk_cru" }; 137PNAME(mux_dclk_p) = { "dclk_lcdc", "dclk_cru" };
138 138
139static struct rockchip_pll_clock rk3036_pll_clks[] __initdata = { 139static struct rockchip_pll_clock rk3036_pll_clks[] __initdata = {
@@ -224,16 +224,16 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
224 RK2928_CLKGATE_CON(2), 2, GFLAGS), 224 RK2928_CLKGATE_CON(2), 2, GFLAGS),
225 225
226 COMPOSITE_NODIV(SCLK_TIMER0, "sclk_timer0", mux_timer_p, CLK_IGNORE_UNUSED, 226 COMPOSITE_NODIV(SCLK_TIMER0, "sclk_timer0", mux_timer_p, CLK_IGNORE_UNUSED,
227 RK2928_CLKSEL_CON(2), 4, 1, DFLAGS, 227 RK2928_CLKSEL_CON(2), 4, 1, MFLAGS,
228 RK2928_CLKGATE_CON(1), 0, GFLAGS), 228 RK2928_CLKGATE_CON(1), 0, GFLAGS),
229 COMPOSITE_NODIV(SCLK_TIMER1, "sclk_timer1", mux_timer_p, CLK_IGNORE_UNUSED, 229 COMPOSITE_NODIV(SCLK_TIMER1, "sclk_timer1", mux_timer_p, CLK_IGNORE_UNUSED,
230 RK2928_CLKSEL_CON(2), 5, 1, DFLAGS, 230 RK2928_CLKSEL_CON(2), 5, 1, MFLAGS,
231 RK2928_CLKGATE_CON(1), 1, GFLAGS), 231 RK2928_CLKGATE_CON(1), 1, GFLAGS),
232 COMPOSITE_NODIV(SCLK_TIMER2, "sclk_timer2", mux_timer_p, CLK_IGNORE_UNUSED, 232 COMPOSITE_NODIV(SCLK_TIMER2, "sclk_timer2", mux_timer_p, CLK_IGNORE_UNUSED,
233 RK2928_CLKSEL_CON(2), 6, 1, DFLAGS, 233 RK2928_CLKSEL_CON(2), 6, 1, MFLAGS,
234 RK2928_CLKGATE_CON(2), 4, GFLAGS), 234 RK2928_CLKGATE_CON(2), 4, GFLAGS),
235 COMPOSITE_NODIV(SCLK_TIMER3, "sclk_timer3", mux_timer_p, CLK_IGNORE_UNUSED, 235 COMPOSITE_NODIV(SCLK_TIMER3, "sclk_timer3", mux_timer_p, CLK_IGNORE_UNUSED,
236 RK2928_CLKSEL_CON(2), 7, 1, DFLAGS, 236 RK2928_CLKSEL_CON(2), 7, 1, MFLAGS,
237 RK2928_CLKGATE_CON(2), 5, GFLAGS), 237 RK2928_CLKGATE_CON(2), 5, GFLAGS),
238 238
239 MUX(0, "uart_pll_clk", mux_pll_src_apll_dpll_gpll_usb480m_p, 0, 239 MUX(0, "uart_pll_clk", mux_pll_src_apll_dpll_gpll_usb480m_p, 0,
@@ -242,11 +242,11 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
242 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS, 242 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS,
243 RK2928_CLKGATE_CON(1), 8, GFLAGS), 243 RK2928_CLKGATE_CON(1), 8, GFLAGS),
244 COMPOSITE_NOMUX(0, "uart1_src", "uart_pll_clk", 0, 244 COMPOSITE_NOMUX(0, "uart1_src", "uart_pll_clk", 0,
245 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS, 245 RK2928_CLKSEL_CON(14), 0, 7, DFLAGS,
246 RK2928_CLKGATE_CON(1), 8, GFLAGS), 246 RK2928_CLKGATE_CON(1), 10, GFLAGS),
247 COMPOSITE_NOMUX(0, "uart2_src", "uart_pll_clk", 0, 247 COMPOSITE_NOMUX(0, "uart2_src", "uart_pll_clk", 0,
248 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS, 248 RK2928_CLKSEL_CON(15), 0, 7, DFLAGS,
249 RK2928_CLKGATE_CON(1), 8, GFLAGS), 249 RK2928_CLKGATE_CON(1), 12, GFLAGS),
250 COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT, 250 COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
251 RK2928_CLKSEL_CON(17), 0, 251 RK2928_CLKSEL_CON(17), 0,
252 RK2928_CLKGATE_CON(1), 9, GFLAGS, 252 RK2928_CLKGATE_CON(1), 9, GFLAGS,
@@ -279,13 +279,13 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
279 RK2928_CLKGATE_CON(3), 2, GFLAGS), 279 RK2928_CLKGATE_CON(3), 2, GFLAGS),
280 280
281 COMPOSITE_NODIV(0, "sclk_sdmmc_src", mux_mmc_src_p, 0, 281 COMPOSITE_NODIV(0, "sclk_sdmmc_src", mux_mmc_src_p, 0,
282 RK2928_CLKSEL_CON(12), 8, 2, DFLAGS, 282 RK2928_CLKSEL_CON(12), 8, 2, MFLAGS,
283 RK2928_CLKGATE_CON(2), 11, GFLAGS), 283 RK2928_CLKGATE_CON(2), 11, GFLAGS),
284 DIV(SCLK_SDMMC, "sclk_sdmmc", "sclk_sdmmc_src", 0, 284 DIV(SCLK_SDMMC, "sclk_sdmmc", "sclk_sdmmc_src", 0,
285 RK2928_CLKSEL_CON(11), 0, 7, DFLAGS), 285 RK2928_CLKSEL_CON(11), 0, 7, DFLAGS),
286 286
287 COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0, 287 COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0,
288 RK2928_CLKSEL_CON(12), 10, 2, DFLAGS, 288 RK2928_CLKSEL_CON(12), 10, 2, MFLAGS,
289 RK2928_CLKGATE_CON(2), 13, GFLAGS), 289 RK2928_CLKGATE_CON(2), 13, GFLAGS),
290 DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0, 290 DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0,
291 RK2928_CLKSEL_CON(11), 8, 7, DFLAGS), 291 RK2928_CLKSEL_CON(11), 8, 7, DFLAGS),
@@ -344,12 +344,12 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
344 RK2928_CLKGATE_CON(10), 5, GFLAGS), 344 RK2928_CLKGATE_CON(10), 5, GFLAGS),
345 345
346 COMPOSITE_NOGATE(0, "mac_pll_src", mux_pll_src_3plls_p, 0, 346 COMPOSITE_NOGATE(0, "mac_pll_src", mux_pll_src_3plls_p, 0,
347 RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 4, 5, DFLAGS), 347 RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 9, 5, DFLAGS),
348 MUX(SCLK_MACREF, "mac_clk_ref", mux_mac_p, CLK_SET_RATE_PARENT, 348 MUX(SCLK_MACREF, "mac_clk_ref", mux_mac_p, CLK_SET_RATE_PARENT,
349 RK2928_CLKSEL_CON(21), 3, 1, MFLAGS), 349 RK2928_CLKSEL_CON(21), 3, 1, MFLAGS),
350 350
351 COMPOSITE_NOMUX(SCLK_MAC, "mac_clk", "mac_clk_ref", 0, 351 COMPOSITE_NOMUX(SCLK_MAC, "mac_clk", "mac_clk_ref", 0,
352 RK2928_CLKSEL_CON(21), 9, 5, DFLAGS, 352 RK2928_CLKSEL_CON(21), 4, 5, DFLAGS,
353 RK2928_CLKGATE_CON(2), 6, GFLAGS), 353 RK2928_CLKGATE_CON(2), 6, GFLAGS),
354 354
355 MUX(SCLK_HDMI, "dclk_hdmi", mux_dclk_p, 0, 355 MUX(SCLK_HDMI, "dclk_hdmi", mux_dclk_p, 0,
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index be0ede522269..21f3ea909fab 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -780,13 +780,13 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
780 GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3368_CLKGATE_CON(20), 0, GFLAGS), 780 GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3368_CLKGATE_CON(20), 0, GFLAGS),
781 781
782 /* pclk_pd_alive gates */ 782 /* pclk_pd_alive gates */
783 GATE(PCLK_TIMER1, "pclk_timer1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 8, GFLAGS), 783 GATE(PCLK_TIMER1, "pclk_timer1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 13, GFLAGS),
784 GATE(PCLK_TIMER0, "pclk_timer0", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 7, GFLAGS), 784 GATE(PCLK_TIMER0, "pclk_timer0", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 12, GFLAGS),
785 GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 12, GFLAGS), 785 GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 9, GFLAGS),
786 GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 11, GFLAGS), 786 GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 8, GFLAGS),
787 GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 3, GFLAGS), 787 GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 3, GFLAGS),
788 GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 2, GFLAGS), 788 GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 2, GFLAGS),
789 GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 1, GFLAGS), 789 GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 1, GFLAGS),
790 790
791 /* 791 /*
792 * pclk_vio gates 792 * pclk_vio gates
@@ -796,12 +796,12 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
796 GATE(0, "pclk_dphytx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS), 796 GATE(0, "pclk_dphytx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS),
797 797
798 /* pclk_pd_pmu gates */ 798 /* pclk_pd_pmu gates */
799 GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 0, GFLAGS), 799 GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 5, GFLAGS),
800 GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3368_CLKGATE_CON(17), 4, GFLAGS), 800 GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3368_CLKGATE_CON(23), 4, GFLAGS),
801 GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 3, GFLAGS), 801 GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 3, GFLAGS),
802 GATE(0, "pclk_pmu_noc", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS), 802 GATE(0, "pclk_pmu_noc", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 2, GFLAGS),
803 GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 1, GFLAGS), 803 GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 1, GFLAGS),
804 GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS), 804 GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 0, GFLAGS),
805 805
806 /* timer gates */ 806 /* timer gates */
807 GATE(0, "sclk_timer15", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 11, GFLAGS), 807 GATE(0, "sclk_timer15", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 11, GFLAGS),
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index e1fe8f35d45c..74e7544f861b 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -450,8 +450,10 @@ static int load_timings_from_dt(struct tegra_clk_emc *tegra,
450 struct emc_timing *timing = tegra->timings + (i++); 450 struct emc_timing *timing = tegra->timings + (i++);
451 451
452 err = load_one_timing_from_dt(tegra, timing, child); 452 err = load_one_timing_from_dt(tegra, timing, child);
453 if (err) 453 if (err) {
454 of_node_put(child);
454 return err; 455 return err;
456 }
455 457
456 timing->ram_code = ram_code; 458 timing->ram_code = ram_code;
457 } 459 }
@@ -499,9 +501,9 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
499 * fuses until the apbmisc driver is loaded. 501 * fuses until the apbmisc driver is loaded.
500 */ 502 */
501 err = load_timings_from_dt(tegra, node, node_ram_code); 503 err = load_timings_from_dt(tegra, node, node_ram_code);
504 of_node_put(node);
502 if (err) 505 if (err)
503 return ERR_PTR(err); 506 return ERR_PTR(err);
504 of_node_put(node);
505 break; 507 break;
506 } 508 }
507 509
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index 19ce0738ee76..62ea38187b71 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -11,6 +11,7 @@ enum clk_id {
11 tegra_clk_afi, 11 tegra_clk_afi,
12 tegra_clk_amx, 12 tegra_clk_amx,
13 tegra_clk_amx1, 13 tegra_clk_amx1,
14 tegra_clk_apb2ape,
14 tegra_clk_apbdma, 15 tegra_clk_apbdma,
15 tegra_clk_apbif, 16 tegra_clk_apbif,
16 tegra_clk_ape, 17 tegra_clk_ape,
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index a534bfab30b3..6ac3f843e7ca 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -86,15 +86,21 @@
86#define PLLE_SS_DISABLE (PLLE_SS_CNTL_BYPASS_SS | PLLE_SS_CNTL_INTERP_RESET |\ 86#define PLLE_SS_DISABLE (PLLE_SS_CNTL_BYPASS_SS | PLLE_SS_CNTL_INTERP_RESET |\
87 PLLE_SS_CNTL_SSC_BYP) 87 PLLE_SS_CNTL_SSC_BYP)
88#define PLLE_SS_MAX_MASK 0x1ff 88#define PLLE_SS_MAX_MASK 0x1ff
89#define PLLE_SS_MAX_VAL 0x25 89#define PLLE_SS_MAX_VAL_TEGRA114 0x25
90#define PLLE_SS_MAX_VAL_TEGRA210 0x21
90#define PLLE_SS_INC_MASK (0xff << 16) 91#define PLLE_SS_INC_MASK (0xff << 16)
91#define PLLE_SS_INC_VAL (0x1 << 16) 92#define PLLE_SS_INC_VAL (0x1 << 16)
92#define PLLE_SS_INCINTRV_MASK (0x3f << 24) 93#define PLLE_SS_INCINTRV_MASK (0x3f << 24)
93#define PLLE_SS_INCINTRV_VAL (0x20 << 24) 94#define PLLE_SS_INCINTRV_VAL_TEGRA114 (0x20 << 24)
95#define PLLE_SS_INCINTRV_VAL_TEGRA210 (0x23 << 24)
94#define PLLE_SS_COEFFICIENTS_MASK \ 96#define PLLE_SS_COEFFICIENTS_MASK \
95 (PLLE_SS_MAX_MASK | PLLE_SS_INC_MASK | PLLE_SS_INCINTRV_MASK) 97 (PLLE_SS_MAX_MASK | PLLE_SS_INC_MASK | PLLE_SS_INCINTRV_MASK)
96#define PLLE_SS_COEFFICIENTS_VAL \ 98#define PLLE_SS_COEFFICIENTS_VAL_TEGRA114 \
97 (PLLE_SS_MAX_VAL | PLLE_SS_INC_VAL | PLLE_SS_INCINTRV_VAL) 99 (PLLE_SS_MAX_VAL_TEGRA114 | PLLE_SS_INC_VAL |\
100 PLLE_SS_INCINTRV_VAL_TEGRA114)
101#define PLLE_SS_COEFFICIENTS_VAL_TEGRA210 \
102 (PLLE_SS_MAX_VAL_TEGRA210 | PLLE_SS_INC_VAL |\
103 PLLE_SS_INCINTRV_VAL_TEGRA210)
98 104
99#define PLLE_AUX_PLLP_SEL BIT(2) 105#define PLLE_AUX_PLLP_SEL BIT(2)
100#define PLLE_AUX_USE_LOCKDET BIT(3) 106#define PLLE_AUX_USE_LOCKDET BIT(3)
@@ -880,7 +886,7 @@ static int clk_plle_training(struct tegra_clk_pll *pll)
880static int clk_plle_enable(struct clk_hw *hw) 886static int clk_plle_enable(struct clk_hw *hw)
881{ 887{
882 struct tegra_clk_pll *pll = to_clk_pll(hw); 888 struct tegra_clk_pll *pll = to_clk_pll(hw);
883 unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk)); 889 unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
884 struct tegra_clk_pll_freq_table sel; 890 struct tegra_clk_pll_freq_table sel;
885 u32 val; 891 u32 val;
886 int err; 892 int err;
@@ -1378,7 +1384,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1378 u32 val; 1384 u32 val;
1379 int ret; 1385 int ret;
1380 unsigned long flags = 0; 1386 unsigned long flags = 0;
1381 unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk)); 1387 unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
1382 1388
1383 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate)) 1389 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
1384 return -EINVAL; 1390 return -EINVAL;
@@ -1401,7 +1407,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1401 val |= PLLE_MISC_IDDQ_SW_CTRL; 1407 val |= PLLE_MISC_IDDQ_SW_CTRL;
1402 val &= ~PLLE_MISC_IDDQ_SW_VALUE; 1408 val &= ~PLLE_MISC_IDDQ_SW_VALUE;
1403 val |= PLLE_MISC_PLLE_PTS; 1409 val |= PLLE_MISC_PLLE_PTS;
1404 val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK; 1410 val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK);
1405 pll_writel_misc(val, pll); 1411 pll_writel_misc(val, pll);
1406 udelay(5); 1412 udelay(5);
1407 1413
@@ -1428,7 +1434,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1428 val = pll_readl(PLLE_SS_CTRL, pll); 1434 val = pll_readl(PLLE_SS_CTRL, pll);
1429 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT); 1435 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
1430 val &= ~PLLE_SS_COEFFICIENTS_MASK; 1436 val &= ~PLLE_SS_COEFFICIENTS_MASK;
1431 val |= PLLE_SS_COEFFICIENTS_VAL; 1437 val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA114;
1432 pll_writel(val, PLLE_SS_CTRL, pll); 1438 pll_writel(val, PLLE_SS_CTRL, pll);
1433 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS); 1439 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
1434 pll_writel(val, PLLE_SS_CTRL, pll); 1440 pll_writel(val, PLLE_SS_CTRL, pll);
@@ -2012,9 +2018,9 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
2012 struct tegra_clk_pll *pll = to_clk_pll(hw); 2018 struct tegra_clk_pll *pll = to_clk_pll(hw);
2013 struct tegra_clk_pll_freq_table sel; 2019 struct tegra_clk_pll_freq_table sel;
2014 u32 val; 2020 u32 val;
2015 int ret; 2021 int ret = 0;
2016 unsigned long flags = 0; 2022 unsigned long flags = 0;
2017 unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk)); 2023 unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
2018 2024
2019 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate)) 2025 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
2020 return -EINVAL; 2026 return -EINVAL;
@@ -2022,22 +2028,20 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
2022 if (pll->lock) 2028 if (pll->lock)
2023 spin_lock_irqsave(pll->lock, flags); 2029 spin_lock_irqsave(pll->lock, flags);
2024 2030
2031 val = pll_readl(pll->params->aux_reg, pll);
2032 if (val & PLLE_AUX_SEQ_ENABLE)
2033 goto out;
2034
2025 val = pll_readl_base(pll); 2035 val = pll_readl_base(pll);
2026 val &= ~BIT(30); /* Disable lock override */ 2036 val &= ~BIT(30); /* Disable lock override */
2027 pll_writel_base(val, pll); 2037 pll_writel_base(val, pll);
2028 2038
2029 val = pll_readl(pll->params->aux_reg, pll);
2030 val |= PLLE_AUX_ENABLE_SWCTL;
2031 val &= ~PLLE_AUX_SEQ_ENABLE;
2032 pll_writel(val, pll->params->aux_reg, pll);
2033 udelay(1);
2034
2035 val = pll_readl_misc(pll); 2039 val = pll_readl_misc(pll);
2036 val |= PLLE_MISC_LOCK_ENABLE; 2040 val |= PLLE_MISC_LOCK_ENABLE;
2037 val |= PLLE_MISC_IDDQ_SW_CTRL; 2041 val |= PLLE_MISC_IDDQ_SW_CTRL;
2038 val &= ~PLLE_MISC_IDDQ_SW_VALUE; 2042 val &= ~PLLE_MISC_IDDQ_SW_VALUE;
2039 val |= PLLE_MISC_PLLE_PTS; 2043 val |= PLLE_MISC_PLLE_PTS;
2040 val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK; 2044 val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK);
2041 pll_writel_misc(val, pll); 2045 pll_writel_misc(val, pll);
2042 udelay(5); 2046 udelay(5);
2043 2047
@@ -2067,7 +2071,7 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
2067 val = pll_readl(PLLE_SS_CTRL, pll); 2071 val = pll_readl(PLLE_SS_CTRL, pll);
2068 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT); 2072 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
2069 val &= ~PLLE_SS_COEFFICIENTS_MASK; 2073 val &= ~PLLE_SS_COEFFICIENTS_MASK;
2070 val |= PLLE_SS_COEFFICIENTS_VAL; 2074 val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA210;
2071 pll_writel(val, PLLE_SS_CTRL, pll); 2075 pll_writel(val, PLLE_SS_CTRL, pll);
2072 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS); 2076 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
2073 pll_writel(val, PLLE_SS_CTRL, pll); 2077 pll_writel(val, PLLE_SS_CTRL, pll);
@@ -2104,15 +2108,25 @@ static void clk_plle_tegra210_disable(struct clk_hw *hw)
2104 if (pll->lock) 2108 if (pll->lock)
2105 spin_lock_irqsave(pll->lock, flags); 2109 spin_lock_irqsave(pll->lock, flags);
2106 2110
2111 /* If PLLE HW sequencer is enabled, SW should not disable PLLE */
2112 val = pll_readl(pll->params->aux_reg, pll);
2113 if (val & PLLE_AUX_SEQ_ENABLE)
2114 goto out;
2115
2107 val = pll_readl_base(pll); 2116 val = pll_readl_base(pll);
2108 val &= ~PLLE_BASE_ENABLE; 2117 val &= ~PLLE_BASE_ENABLE;
2109 pll_writel_base(val, pll); 2118 pll_writel_base(val, pll);
2110 2119
2120 val = pll_readl(pll->params->aux_reg, pll);
2121 val |= PLLE_AUX_ENABLE_SWCTL | PLLE_AUX_SS_SWCTL;
2122 pll_writel(val, pll->params->aux_reg, pll);
2123
2111 val = pll_readl_misc(pll); 2124 val = pll_readl_misc(pll);
2112 val |= PLLE_MISC_IDDQ_SW_CTRL | PLLE_MISC_IDDQ_SW_VALUE; 2125 val |= PLLE_MISC_IDDQ_SW_CTRL | PLLE_MISC_IDDQ_SW_VALUE;
2113 pll_writel_misc(val, pll); 2126 pll_writel_misc(val, pll);
2114 udelay(1); 2127 udelay(1);
2115 2128
2129out:
2116 if (pll->lock) 2130 if (pll->lock)
2117 spin_unlock_irqrestore(pll->lock, flags); 2131 spin_unlock_irqrestore(pll->lock, flags);
2118} 2132}
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 6ad381a888a6..ea2b9cbf9e70 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -773,7 +773,7 @@ static struct tegra_periph_init_data periph_clks[] = {
773 XUSB("xusb_dev_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src), 773 XUSB("xusb_dev_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src),
774 XUSB("xusb_dev_src", mux_clkm_pllp_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src_8), 774 XUSB("xusb_dev_src", mux_clkm_pllp_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src_8),
775 MUX8("dbgapb", mux_pllp_clkm_2, CLK_SOURCE_DBGAPB, 185, TEGRA_PERIPH_NO_RESET, tegra_clk_dbgapb), 775 MUX8("dbgapb", mux_pllp_clkm_2, CLK_SOURCE_DBGAPB, 185, TEGRA_PERIPH_NO_RESET, tegra_clk_dbgapb),
776 MUX8("msenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc), 776 MUX8("nvenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc),
777 MUX8("nvdec", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVDEC, 194, 0, tegra_clk_nvdec), 777 MUX8("nvdec", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVDEC, 194, 0, tegra_clk_nvdec),
778 MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg), 778 MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg),
779 MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape), 779 MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape),
@@ -782,7 +782,7 @@ static struct tegra_periph_init_data periph_clks[] = {
782 NODIV("sor1", mux_clkm_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 15, MASK(1), 183, 0, tegra_clk_sor1, &sor1_lock), 782 NODIV("sor1", mux_clkm_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 15, MASK(1), 183, 0, tegra_clk_sor1, &sor1_lock),
783 MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy), 783 MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy),
784 MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi), 784 MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi),
785 MUX("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, TEGRA_PERIPH_ON_APB, tegra_clk_vi_i2c), 785 I2C("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, tegra_clk_vi_i2c),
786 MUX("mipibif", mux_pllp_clkm, CLK_SOURCE_MIPIBIF, 173, TEGRA_PERIPH_ON_APB, tegra_clk_mipibif), 786 MUX("mipibif", mux_pllp_clkm, CLK_SOURCE_MIPIBIF, 173, TEGRA_PERIPH_ON_APB, tegra_clk_mipibif),
787 MUX("uartape", mux_pllp_pllc_clkm, CLK_SOURCE_UARTAPE, 212, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_uartape), 787 MUX("uartape", mux_pllp_pllc_clkm, CLK_SOURCE_UARTAPE, 212, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_uartape),
788 MUX8("tsecb", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_TSECB, 206, 0, tegra_clk_tsecb), 788 MUX8("tsecb", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_TSECB, 206, 0, tegra_clk_tsecb),
@@ -829,6 +829,7 @@ static struct tegra_periph_init_data gate_clks[] = {
829 GATE("xusb_gate", "osc", 143, 0, tegra_clk_xusb_gate, 0), 829 GATE("xusb_gate", "osc", 143, 0, tegra_clk_xusb_gate, 0),
830 GATE("pll_p_out_cpu", "pll_p", 223, 0, tegra_clk_pll_p_out_cpu, 0), 830 GATE("pll_p_out_cpu", "pll_p", 223, 0, tegra_clk_pll_p_out_cpu, 0),
831 GATE("pll_p_out_adsp", "pll_p", 187, 0, tegra_clk_pll_p_out_adsp, 0), 831 GATE("pll_p_out_adsp", "pll_p", 187, 0, tegra_clk_pll_p_out_adsp, 0),
832 GATE("apb2ape", "clk_m", 107, 0, tegra_clk_apb2ape, 0),
832}; 833};
833 834
834static struct tegra_periph_init_data div_clks[] = { 835static struct tegra_periph_init_data div_clks[] = {
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
index 4559a20e3af6..474de0f0c26d 100644
--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -67,7 +67,7 @@ static const char *cclk_lp_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
67 "pll_p", "pll_p_out4", "unused", 67 "pll_p", "pll_p_out4", "unused",
68 "unused", "pll_x", "pll_x_out0" }; 68 "unused", "pll_x", "pll_x_out0" };
69 69
70const struct tegra_super_gen_info tegra_super_gen_info_gen4 = { 70static const struct tegra_super_gen_info tegra_super_gen_info_gen4 = {
71 .gen = gen4, 71 .gen = gen4,
72 .sclk_parents = sclk_parents, 72 .sclk_parents = sclk_parents,
73 .cclk_g_parents = cclk_g_parents, 73 .cclk_g_parents = cclk_g_parents,
@@ -93,7 +93,7 @@ static const char *cclk_lp_parents_gen5[] = { "clk_m", "unused", "clk_32k", "unu
93 "unused", "unused", "unused", "unused", 93 "unused", "unused", "unused", "unused",
94 "dfllCPU_out" }; 94 "dfllCPU_out" };
95 95
96const struct tegra_super_gen_info tegra_super_gen_info_gen5 = { 96static const struct tegra_super_gen_info tegra_super_gen_info_gen5 = {
97 .gen = gen5, 97 .gen = gen5,
98 .sclk_parents = sclk_parents_gen5, 98 .sclk_parents = sclk_parents_gen5,
99 .cclk_g_parents = cclk_g_parents_gen5, 99 .cclk_g_parents = cclk_g_parents_gen5,
@@ -171,7 +171,7 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
171 *dt_clk = clk; 171 *dt_clk = clk;
172} 172}
173 173
174void __init tegra_super_clk_init(void __iomem *clk_base, 174static void __init tegra_super_clk_init(void __iomem *clk_base,
175 void __iomem *pmc_base, 175 void __iomem *pmc_base,
176 struct tegra_clk *tegra_clks, 176 struct tegra_clk *tegra_clks,
177 struct tegra_clk_pll_params *params, 177 struct tegra_clk_pll_params *params,
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 58514c44ea83..637041fd53ad 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -59,8 +59,8 @@
59#define PLLC3_MISC3 0x50c 59#define PLLC3_MISC3 0x50c
60 60
61#define PLLM_BASE 0x90 61#define PLLM_BASE 0x90
62#define PLLM_MISC0 0x9c
63#define PLLM_MISC1 0x98 62#define PLLM_MISC1 0x98
63#define PLLM_MISC2 0x9c
64#define PLLP_BASE 0xa0 64#define PLLP_BASE 0xa0
65#define PLLP_MISC0 0xac 65#define PLLP_MISC0 0xac
66#define PLLP_MISC1 0x680 66#define PLLP_MISC1 0x680
@@ -99,7 +99,7 @@
99#define PLLC4_MISC0 0x5a8 99#define PLLC4_MISC0 0x5a8
100#define PLLC4_OUT 0x5e4 100#define PLLC4_OUT 0x5e4
101#define PLLMB_BASE 0x5e8 101#define PLLMB_BASE 0x5e8
102#define PLLMB_MISC0 0x5ec 102#define PLLMB_MISC1 0x5ec
103#define PLLA1_BASE 0x6a4 103#define PLLA1_BASE 0x6a4
104#define PLLA1_MISC0 0x6a8 104#define PLLA1_MISC0 0x6a8
105#define PLLA1_MISC1 0x6ac 105#define PLLA1_MISC1 0x6ac
@@ -243,7 +243,8 @@ static unsigned long tegra210_input_freq[] = {
243}; 243};
244 244
245static const char *mux_pllmcp_clkm[] = { 245static const char *mux_pllmcp_clkm[] = {
246 "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3", 246 "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_mb", "pll_mb",
247 "pll_p",
247}; 248};
248#define mux_pllmcp_clkm_idx NULL 249#define mux_pllmcp_clkm_idx NULL
249 250
@@ -367,12 +368,12 @@ static const char *mux_pllmcp_clkm[] = {
367/* PLLMB */ 368/* PLLMB */
368#define PLLMB_BASE_LOCK (1 << 27) 369#define PLLMB_BASE_LOCK (1 << 27)
369 370
370#define PLLMB_MISC0_LOCK_OVERRIDE (1 << 18) 371#define PLLMB_MISC1_LOCK_OVERRIDE (1 << 18)
371#define PLLMB_MISC0_IDDQ (1 << 17) 372#define PLLMB_MISC1_IDDQ (1 << 17)
372#define PLLMB_MISC0_LOCK_ENABLE (1 << 16) 373#define PLLMB_MISC1_LOCK_ENABLE (1 << 16)
373 374
374#define PLLMB_MISC0_DEFAULT_VALUE 0x00030000 375#define PLLMB_MISC1_DEFAULT_VALUE 0x00030000
375#define PLLMB_MISC0_WRITE_MASK 0x0007ffff 376#define PLLMB_MISC1_WRITE_MASK 0x0007ffff
376 377
377/* PLLP */ 378/* PLLP */
378#define PLLP_BASE_OVERRIDE (1 << 28) 379#define PLLP_BASE_OVERRIDE (1 << 28)
@@ -457,7 +458,8 @@ static void pllcx_check_defaults(struct tegra_clk_pll_params *params)
457 PLLCX_MISC3_WRITE_MASK); 458 PLLCX_MISC3_WRITE_MASK);
458} 459}
459 460
460void tegra210_pllcx_set_defaults(const char *name, struct tegra_clk_pll *pllcx) 461static void tegra210_pllcx_set_defaults(const char *name,
462 struct tegra_clk_pll *pllcx)
461{ 463{
462 pllcx->params->defaults_set = true; 464 pllcx->params->defaults_set = true;
463 465
@@ -482,22 +484,22 @@ void tegra210_pllcx_set_defaults(const char *name, struct tegra_clk_pll *pllcx)
482 udelay(1); 484 udelay(1);
483} 485}
484 486
485void _pllc_set_defaults(struct tegra_clk_pll *pllcx) 487static void _pllc_set_defaults(struct tegra_clk_pll *pllcx)
486{ 488{
487 tegra210_pllcx_set_defaults("PLL_C", pllcx); 489 tegra210_pllcx_set_defaults("PLL_C", pllcx);
488} 490}
489 491
490void _pllc2_set_defaults(struct tegra_clk_pll *pllcx) 492static void _pllc2_set_defaults(struct tegra_clk_pll *pllcx)
491{ 493{
492 tegra210_pllcx_set_defaults("PLL_C2", pllcx); 494 tegra210_pllcx_set_defaults("PLL_C2", pllcx);
493} 495}
494 496
495void _pllc3_set_defaults(struct tegra_clk_pll *pllcx) 497static void _pllc3_set_defaults(struct tegra_clk_pll *pllcx)
496{ 498{
497 tegra210_pllcx_set_defaults("PLL_C3", pllcx); 499 tegra210_pllcx_set_defaults("PLL_C3", pllcx);
498} 500}
499 501
500void _plla1_set_defaults(struct tegra_clk_pll *pllcx) 502static void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
501{ 503{
502 tegra210_pllcx_set_defaults("PLL_A1", pllcx); 504 tegra210_pllcx_set_defaults("PLL_A1", pllcx);
503} 505}
@@ -507,7 +509,7 @@ void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
507 * PLL with dynamic ramp and fractional SDM. Dynamic ramp is not used. 509 * PLL with dynamic ramp and fractional SDM. Dynamic ramp is not used.
508 * Fractional SDM is allowed to provide exact audio rates. 510 * Fractional SDM is allowed to provide exact audio rates.
509 */ 511 */
510void tegra210_plla_set_defaults(struct tegra_clk_pll *plla) 512static void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
511{ 513{
512 u32 mask; 514 u32 mask;
513 u32 val = readl_relaxed(clk_base + plla->params->base_reg); 515 u32 val = readl_relaxed(clk_base + plla->params->base_reg);
@@ -559,7 +561,7 @@ void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
559 * PLLD 561 * PLLD
560 * PLL with fractional SDM. 562 * PLL with fractional SDM.
561 */ 563 */
562void tegra210_plld_set_defaults(struct tegra_clk_pll *plld) 564static void tegra210_plld_set_defaults(struct tegra_clk_pll *plld)
563{ 565{
564 u32 val; 566 u32 val;
565 u32 mask = 0xffff; 567 u32 mask = 0xffff;
@@ -698,7 +700,7 @@ static void plldss_defaults(const char *pll_name, struct tegra_clk_pll *plldss,
698 udelay(1); 700 udelay(1);
699} 701}
700 702
701void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2) 703static void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
702{ 704{
703 plldss_defaults("PLL_D2", plld2, PLLD2_MISC0_DEFAULT_VALUE, 705 plldss_defaults("PLL_D2", plld2, PLLD2_MISC0_DEFAULT_VALUE,
704 PLLD2_MISC1_CFG_DEFAULT_VALUE, 706 PLLD2_MISC1_CFG_DEFAULT_VALUE,
@@ -706,7 +708,7 @@ void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
706 PLLD2_MISC3_CTRL2_DEFAULT_VALUE); 708 PLLD2_MISC3_CTRL2_DEFAULT_VALUE);
707} 709}
708 710
709void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp) 711static void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
710{ 712{
711 plldss_defaults("PLL_DP", plldp, PLLDP_MISC0_DEFAULT_VALUE, 713 plldss_defaults("PLL_DP", plldp, PLLDP_MISC0_DEFAULT_VALUE,
712 PLLDP_MISC1_CFG_DEFAULT_VALUE, 714 PLLDP_MISC1_CFG_DEFAULT_VALUE,
@@ -719,7 +721,7 @@ void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
719 * Base and misc0 layout is the same as PLLD2/PLLDP, but no SDM/SSC support. 721 * Base and misc0 layout is the same as PLLD2/PLLDP, but no SDM/SSC support.
720 * VCO is exposed to the clock tree via fixed 1/3 and 1/5 dividers. 722 * VCO is exposed to the clock tree via fixed 1/3 and 1/5 dividers.
721 */ 723 */
722void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4) 724static void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
723{ 725{
724 plldss_defaults("PLL_C4", pllc4, PLLC4_MISC0_DEFAULT_VALUE, 0, 0, 0); 726 plldss_defaults("PLL_C4", pllc4, PLLC4_MISC0_DEFAULT_VALUE, 0, 0, 0);
725} 727}
@@ -728,7 +730,7 @@ void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
728 * PLLRE 730 * PLLRE
729 * VCO is exposed to the clock tree directly along with post-divider output 731 * VCO is exposed to the clock tree directly along with post-divider output
730 */ 732 */
731void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre) 733static void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre)
732{ 734{
733 u32 mask; 735 u32 mask;
734 u32 val = readl_relaxed(clk_base + pllre->params->base_reg); 736 u32 val = readl_relaxed(clk_base + pllre->params->base_reg);
@@ -780,13 +782,13 @@ static void pllx_get_dyn_steps(struct clk_hw *hw, u32 *step_a, u32 *step_b)
780{ 782{
781 unsigned long input_rate; 783 unsigned long input_rate;
782 784
783 if (!IS_ERR_OR_NULL(hw->clk)) { 785 /* cf rate */
786 if (!IS_ERR_OR_NULL(hw->clk))
784 input_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); 787 input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
785 /* cf rate */ 788 else
786 input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate);
787 } else {
788 input_rate = 38400000; 789 input_rate = 38400000;
789 } 790
791 input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate);
790 792
791 switch (input_rate) { 793 switch (input_rate) {
792 case 12000000: 794 case 12000000:
@@ -841,7 +843,7 @@ static void pllx_check_defaults(struct tegra_clk_pll *pll)
841 PLLX_MISC5_WRITE_MASK); 843 PLLX_MISC5_WRITE_MASK);
842} 844}
843 845
844void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx) 846static void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
845{ 847{
846 u32 val; 848 u32 val;
847 u32 step_a, step_b; 849 u32 step_a, step_b;
@@ -901,7 +903,7 @@ void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
901} 903}
902 904
903/* PLLMB */ 905/* PLLMB */
904void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb) 906static void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
905{ 907{
906 u32 mask, val = readl_relaxed(clk_base + pllmb->params->base_reg); 908 u32 mask, val = readl_relaxed(clk_base + pllmb->params->base_reg);
907 909
@@ -914,15 +916,15 @@ void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
914 * PLL is ON: check if defaults already set, then set those 916 * PLL is ON: check if defaults already set, then set those
915 * that can be updated in flight. 917 * that can be updated in flight.
916 */ 918 */
917 val = PLLMB_MISC0_DEFAULT_VALUE & (~PLLMB_MISC0_IDDQ); 919 val = PLLMB_MISC1_DEFAULT_VALUE & (~PLLMB_MISC1_IDDQ);
918 mask = PLLMB_MISC0_LOCK_ENABLE | PLLMB_MISC0_LOCK_OVERRIDE; 920 mask = PLLMB_MISC1_LOCK_ENABLE | PLLMB_MISC1_LOCK_OVERRIDE;
919 _pll_misc_chk_default(clk_base, pllmb->params, 0, val, 921 _pll_misc_chk_default(clk_base, pllmb->params, 0, val,
920 ~mask & PLLMB_MISC0_WRITE_MASK); 922 ~mask & PLLMB_MISC1_WRITE_MASK);
921 923
922 /* Enable lock detect */ 924 /* Enable lock detect */
923 val = readl_relaxed(clk_base + pllmb->params->ext_misc_reg[0]); 925 val = readl_relaxed(clk_base + pllmb->params->ext_misc_reg[0]);
924 val &= ~mask; 926 val &= ~mask;
925 val |= PLLMB_MISC0_DEFAULT_VALUE & mask; 927 val |= PLLMB_MISC1_DEFAULT_VALUE & mask;
926 writel_relaxed(val, clk_base + pllmb->params->ext_misc_reg[0]); 928 writel_relaxed(val, clk_base + pllmb->params->ext_misc_reg[0]);
927 udelay(1); 929 udelay(1);
928 930
@@ -930,7 +932,7 @@ void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
930 } 932 }
931 933
932 /* set IDDQ, enable lock detect */ 934 /* set IDDQ, enable lock detect */
933 writel_relaxed(PLLMB_MISC0_DEFAULT_VALUE, 935 writel_relaxed(PLLMB_MISC1_DEFAULT_VALUE,
934 clk_base + pllmb->params->ext_misc_reg[0]); 936 clk_base + pllmb->params->ext_misc_reg[0]);
935 udelay(1); 937 udelay(1);
936} 938}
@@ -960,7 +962,7 @@ static void pllp_check_defaults(struct tegra_clk_pll *pll, bool enabled)
960 ~mask & PLLP_MISC1_WRITE_MASK); 962 ~mask & PLLP_MISC1_WRITE_MASK);
961} 963}
962 964
963void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp) 965static void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp)
964{ 966{
965 u32 mask; 967 u32 mask;
966 u32 val = readl_relaxed(clk_base + pllp->params->base_reg); 968 u32 val = readl_relaxed(clk_base + pllp->params->base_reg);
@@ -1022,7 +1024,7 @@ static void pllu_check_defaults(struct tegra_clk_pll *pll, bool hw_control)
1022 ~mask & PLLU_MISC1_WRITE_MASK); 1024 ~mask & PLLU_MISC1_WRITE_MASK);
1023} 1025}
1024 1026
1025void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu) 1027static void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu)
1026{ 1028{
1027 u32 val = readl_relaxed(clk_base + pllu->params->base_reg); 1029 u32 val = readl_relaxed(clk_base + pllu->params->base_reg);
1028 1030
@@ -1212,8 +1214,9 @@ static void tegra210_clk_pll_set_gain(struct tegra_clk_pll_freq_table *cfg)
1212 cfg->m *= PLL_SDM_COEFF; 1214 cfg->m *= PLL_SDM_COEFF;
1213} 1215}
1214 1216
1215unsigned long tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params, 1217static unsigned long
1216 unsigned long parent_rate) 1218tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params,
1219 unsigned long parent_rate)
1217{ 1220{
1218 unsigned long vco_min = params->vco_min; 1221 unsigned long vco_min = params->vco_min;
1219 1222
@@ -1386,7 +1389,7 @@ static struct tegra_clk_pll_params pll_c_params = {
1386 .mdiv_default = 3, 1389 .mdiv_default = 3,
1387 .div_nmp = &pllc_nmp, 1390 .div_nmp = &pllc_nmp,
1388 .freq_table = pll_cx_freq_table, 1391 .freq_table = pll_cx_freq_table,
1389 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1392 .flags = TEGRA_PLL_USE_LOCK,
1390 .set_defaults = _pllc_set_defaults, 1393 .set_defaults = _pllc_set_defaults,
1391 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1394 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1392}; 1395};
@@ -1425,7 +1428,7 @@ static struct tegra_clk_pll_params pll_c2_params = {
1425 .ext_misc_reg[2] = PLLC2_MISC2, 1428 .ext_misc_reg[2] = PLLC2_MISC2,
1426 .ext_misc_reg[3] = PLLC2_MISC3, 1429 .ext_misc_reg[3] = PLLC2_MISC3,
1427 .freq_table = pll_cx_freq_table, 1430 .freq_table = pll_cx_freq_table,
1428 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1431 .flags = TEGRA_PLL_USE_LOCK,
1429 .set_defaults = _pllc2_set_defaults, 1432 .set_defaults = _pllc2_set_defaults,
1430 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1433 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1431}; 1434};
@@ -1455,7 +1458,7 @@ static struct tegra_clk_pll_params pll_c3_params = {
1455 .ext_misc_reg[2] = PLLC3_MISC2, 1458 .ext_misc_reg[2] = PLLC3_MISC2,
1456 .ext_misc_reg[3] = PLLC3_MISC3, 1459 .ext_misc_reg[3] = PLLC3_MISC3,
1457 .freq_table = pll_cx_freq_table, 1460 .freq_table = pll_cx_freq_table,
1458 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1461 .flags = TEGRA_PLL_USE_LOCK,
1459 .set_defaults = _pllc3_set_defaults, 1462 .set_defaults = _pllc3_set_defaults,
1460 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1463 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1461}; 1464};
@@ -1505,7 +1508,6 @@ static struct tegra_clk_pll_params pll_c4_vco_params = {
1505 .base_reg = PLLC4_BASE, 1508 .base_reg = PLLC4_BASE,
1506 .misc_reg = PLLC4_MISC0, 1509 .misc_reg = PLLC4_MISC0,
1507 .lock_mask = PLL_BASE_LOCK, 1510 .lock_mask = PLL_BASE_LOCK,
1508 .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
1509 .lock_delay = 300, 1511 .lock_delay = 300,
1510 .max_p = PLL_QLIN_PDIV_MAX, 1512 .max_p = PLL_QLIN_PDIV_MAX,
1511 .ext_misc_reg[0] = PLLC4_MISC0, 1513 .ext_misc_reg[0] = PLLC4_MISC0,
@@ -1517,8 +1519,7 @@ static struct tegra_clk_pll_params pll_c4_vco_params = {
1517 .div_nmp = &pllss_nmp, 1519 .div_nmp = &pllss_nmp,
1518 .freq_table = pll_c4_vco_freq_table, 1520 .freq_table = pll_c4_vco_freq_table,
1519 .set_defaults = tegra210_pllc4_set_defaults, 1521 .set_defaults = tegra210_pllc4_set_defaults,
1520 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE | 1522 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
1521 TEGRA_PLL_VCO_OUT,
1522 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1523 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1523}; 1524};
1524 1525
@@ -1559,15 +1560,15 @@ static struct tegra_clk_pll_params pll_m_params = {
1559 .vco_min = 800000000, 1560 .vco_min = 800000000,
1560 .vco_max = 1866000000, 1561 .vco_max = 1866000000,
1561 .base_reg = PLLM_BASE, 1562 .base_reg = PLLM_BASE,
1562 .misc_reg = PLLM_MISC1, 1563 .misc_reg = PLLM_MISC2,
1563 .lock_mask = PLL_BASE_LOCK, 1564 .lock_mask = PLL_BASE_LOCK,
1564 .lock_enable_bit_idx = PLLM_MISC_LOCK_ENABLE, 1565 .lock_enable_bit_idx = PLLM_MISC_LOCK_ENABLE,
1565 .lock_delay = 300, 1566 .lock_delay = 300,
1566 .iddq_reg = PLLM_MISC0, 1567 .iddq_reg = PLLM_MISC2,
1567 .iddq_bit_idx = PLLM_IDDQ_BIT, 1568 .iddq_bit_idx = PLLM_IDDQ_BIT,
1568 .max_p = PLL_QLIN_PDIV_MAX, 1569 .max_p = PLL_QLIN_PDIV_MAX,
1569 .ext_misc_reg[0] = PLLM_MISC0, 1570 .ext_misc_reg[0] = PLLM_MISC2,
1570 .ext_misc_reg[0] = PLLM_MISC1, 1571 .ext_misc_reg[1] = PLLM_MISC1,
1571 .round_p_to_pdiv = pll_qlin_p_to_pdiv, 1572 .round_p_to_pdiv = pll_qlin_p_to_pdiv,
1572 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1573 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1573 .div_nmp = &pllm_nmp, 1574 .div_nmp = &pllm_nmp,
@@ -1586,19 +1587,18 @@ static struct tegra_clk_pll_params pll_mb_params = {
1586 .vco_min = 800000000, 1587 .vco_min = 800000000,
1587 .vco_max = 1866000000, 1588 .vco_max = 1866000000,
1588 .base_reg = PLLMB_BASE, 1589 .base_reg = PLLMB_BASE,
1589 .misc_reg = PLLMB_MISC0, 1590 .misc_reg = PLLMB_MISC1,
1590 .lock_mask = PLL_BASE_LOCK, 1591 .lock_mask = PLL_BASE_LOCK,
1591 .lock_enable_bit_idx = PLLMB_MISC_LOCK_ENABLE,
1592 .lock_delay = 300, 1592 .lock_delay = 300,
1593 .iddq_reg = PLLMB_MISC0, 1593 .iddq_reg = PLLMB_MISC1,
1594 .iddq_bit_idx = PLLMB_IDDQ_BIT, 1594 .iddq_bit_idx = PLLMB_IDDQ_BIT,
1595 .max_p = PLL_QLIN_PDIV_MAX, 1595 .max_p = PLL_QLIN_PDIV_MAX,
1596 .ext_misc_reg[0] = PLLMB_MISC0, 1596 .ext_misc_reg[0] = PLLMB_MISC1,
1597 .round_p_to_pdiv = pll_qlin_p_to_pdiv, 1597 .round_p_to_pdiv = pll_qlin_p_to_pdiv,
1598 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1598 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1599 .div_nmp = &pllm_nmp, 1599 .div_nmp = &pllm_nmp,
1600 .freq_table = pll_m_freq_table, 1600 .freq_table = pll_m_freq_table,
1601 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1601 .flags = TEGRA_PLL_USE_LOCK,
1602 .set_defaults = tegra210_pllmb_set_defaults, 1602 .set_defaults = tegra210_pllmb_set_defaults,
1603 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1603 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1604}; 1604};
@@ -1671,7 +1671,6 @@ static struct tegra_clk_pll_params pll_re_vco_params = {
1671 .base_reg = PLLRE_BASE, 1671 .base_reg = PLLRE_BASE,
1672 .misc_reg = PLLRE_MISC0, 1672 .misc_reg = PLLRE_MISC0,
1673 .lock_mask = PLLRE_MISC_LOCK, 1673 .lock_mask = PLLRE_MISC_LOCK,
1674 .lock_enable_bit_idx = PLLRE_MISC_LOCK_ENABLE,
1675 .lock_delay = 300, 1674 .lock_delay = 300,
1676 .max_p = PLL_QLIN_PDIV_MAX, 1675 .max_p = PLL_QLIN_PDIV_MAX,
1677 .ext_misc_reg[0] = PLLRE_MISC0, 1676 .ext_misc_reg[0] = PLLRE_MISC0,
@@ -1681,8 +1680,7 @@ static struct tegra_clk_pll_params pll_re_vco_params = {
1681 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1680 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1682 .div_nmp = &pllre_nmp, 1681 .div_nmp = &pllre_nmp,
1683 .freq_table = pll_re_vco_freq_table, 1682 .freq_table = pll_re_vco_freq_table,
1684 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC | 1683 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC | TEGRA_PLL_VCO_OUT,
1685 TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT,
1686 .set_defaults = tegra210_pllre_set_defaults, 1684 .set_defaults = tegra210_pllre_set_defaults,
1687 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1685 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1688}; 1686};
@@ -1712,7 +1710,6 @@ static struct tegra_clk_pll_params pll_p_params = {
1712 .base_reg = PLLP_BASE, 1710 .base_reg = PLLP_BASE,
1713 .misc_reg = PLLP_MISC0, 1711 .misc_reg = PLLP_MISC0,
1714 .lock_mask = PLL_BASE_LOCK, 1712 .lock_mask = PLL_BASE_LOCK,
1715 .lock_enable_bit_idx = PLLP_MISC_LOCK_ENABLE,
1716 .lock_delay = 300, 1713 .lock_delay = 300,
1717 .iddq_reg = PLLP_MISC0, 1714 .iddq_reg = PLLP_MISC0,
1718 .iddq_bit_idx = PLLXP_IDDQ_BIT, 1715 .iddq_bit_idx = PLLXP_IDDQ_BIT,
@@ -1721,8 +1718,7 @@ static struct tegra_clk_pll_params pll_p_params = {
1721 .div_nmp = &pllp_nmp, 1718 .div_nmp = &pllp_nmp,
1722 .freq_table = pll_p_freq_table, 1719 .freq_table = pll_p_freq_table,
1723 .fixed_rate = 408000000, 1720 .fixed_rate = 408000000,
1724 .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK | 1721 .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
1725 TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT,
1726 .set_defaults = tegra210_pllp_set_defaults, 1722 .set_defaults = tegra210_pllp_set_defaults,
1727 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1723 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1728}; 1724};
@@ -1750,7 +1746,7 @@ static struct tegra_clk_pll_params pll_a1_params = {
1750 .ext_misc_reg[2] = PLLA1_MISC2, 1746 .ext_misc_reg[2] = PLLA1_MISC2,
1751 .ext_misc_reg[3] = PLLA1_MISC3, 1747 .ext_misc_reg[3] = PLLA1_MISC3,
1752 .freq_table = pll_cx_freq_table, 1748 .freq_table = pll_cx_freq_table,
1753 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1749 .flags = TEGRA_PLL_USE_LOCK,
1754 .set_defaults = _plla1_set_defaults, 1750 .set_defaults = _plla1_set_defaults,
1755 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1751 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1756}; 1752};
@@ -1787,7 +1783,6 @@ static struct tegra_clk_pll_params pll_a_params = {
1787 .base_reg = PLLA_BASE, 1783 .base_reg = PLLA_BASE,
1788 .misc_reg = PLLA_MISC0, 1784 .misc_reg = PLLA_MISC0,
1789 .lock_mask = PLL_BASE_LOCK, 1785 .lock_mask = PLL_BASE_LOCK,
1790 .lock_enable_bit_idx = PLLA_MISC_LOCK_ENABLE,
1791 .lock_delay = 300, 1786 .lock_delay = 300,
1792 .round_p_to_pdiv = pll_qlin_p_to_pdiv, 1787 .round_p_to_pdiv = pll_qlin_p_to_pdiv,
1793 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1788 .pdiv_tohw = pll_qlin_pdiv_to_hw,
@@ -1802,8 +1797,7 @@ static struct tegra_clk_pll_params pll_a_params = {
1802 .ext_misc_reg[1] = PLLA_MISC1, 1797 .ext_misc_reg[1] = PLLA_MISC1,
1803 .ext_misc_reg[2] = PLLA_MISC2, 1798 .ext_misc_reg[2] = PLLA_MISC2,
1804 .freq_table = pll_a_freq_table, 1799 .freq_table = pll_a_freq_table,
1805 .flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW | 1800 .flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW,
1806 TEGRA_PLL_HAS_LOCK_ENABLE,
1807 .set_defaults = tegra210_plla_set_defaults, 1801 .set_defaults = tegra210_plla_set_defaults,
1808 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1802 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1809 .set_gain = tegra210_clk_pll_set_gain, 1803 .set_gain = tegra210_clk_pll_set_gain,
@@ -1836,7 +1830,6 @@ static struct tegra_clk_pll_params pll_d_params = {
1836 .base_reg = PLLD_BASE, 1830 .base_reg = PLLD_BASE,
1837 .misc_reg = PLLD_MISC0, 1831 .misc_reg = PLLD_MISC0,
1838 .lock_mask = PLL_BASE_LOCK, 1832 .lock_mask = PLL_BASE_LOCK,
1839 .lock_enable_bit_idx = PLLD_MISC_LOCK_ENABLE,
1840 .lock_delay = 1000, 1833 .lock_delay = 1000,
1841 .iddq_reg = PLLD_MISC0, 1834 .iddq_reg = PLLD_MISC0,
1842 .iddq_bit_idx = PLLD_IDDQ_BIT, 1835 .iddq_bit_idx = PLLD_IDDQ_BIT,
@@ -1850,7 +1843,7 @@ static struct tegra_clk_pll_params pll_d_params = {
1850 .ext_misc_reg[0] = PLLD_MISC0, 1843 .ext_misc_reg[0] = PLLD_MISC0,
1851 .ext_misc_reg[1] = PLLD_MISC1, 1844 .ext_misc_reg[1] = PLLD_MISC1,
1852 .freq_table = pll_d_freq_table, 1845 .freq_table = pll_d_freq_table,
1853 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1846 .flags = TEGRA_PLL_USE_LOCK,
1854 .mdiv_default = 1, 1847 .mdiv_default = 1,
1855 .set_defaults = tegra210_plld_set_defaults, 1848 .set_defaults = tegra210_plld_set_defaults,
1856 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1849 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
@@ -1876,7 +1869,6 @@ static struct tegra_clk_pll_params pll_d2_params = {
1876 .base_reg = PLLD2_BASE, 1869 .base_reg = PLLD2_BASE,
1877 .misc_reg = PLLD2_MISC0, 1870 .misc_reg = PLLD2_MISC0,
1878 .lock_mask = PLL_BASE_LOCK, 1871 .lock_mask = PLL_BASE_LOCK,
1879 .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
1880 .lock_delay = 300, 1872 .lock_delay = 300,
1881 .iddq_reg = PLLD2_BASE, 1873 .iddq_reg = PLLD2_BASE,
1882 .iddq_bit_idx = PLLSS_IDDQ_BIT, 1874 .iddq_bit_idx = PLLSS_IDDQ_BIT,
@@ -1897,7 +1889,7 @@ static struct tegra_clk_pll_params pll_d2_params = {
1897 .mdiv_default = 1, 1889 .mdiv_default = 1,
1898 .freq_table = tegra210_pll_d2_freq_table, 1890 .freq_table = tegra210_pll_d2_freq_table,
1899 .set_defaults = tegra210_plld2_set_defaults, 1891 .set_defaults = tegra210_plld2_set_defaults,
1900 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1892 .flags = TEGRA_PLL_USE_LOCK,
1901 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1893 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1902 .set_gain = tegra210_clk_pll_set_gain, 1894 .set_gain = tegra210_clk_pll_set_gain,
1903 .adjust_vco = tegra210_clk_adjust_vco_min, 1895 .adjust_vco = tegra210_clk_adjust_vco_min,
@@ -1920,7 +1912,6 @@ static struct tegra_clk_pll_params pll_dp_params = {
1920 .base_reg = PLLDP_BASE, 1912 .base_reg = PLLDP_BASE,
1921 .misc_reg = PLLDP_MISC, 1913 .misc_reg = PLLDP_MISC,
1922 .lock_mask = PLL_BASE_LOCK, 1914 .lock_mask = PLL_BASE_LOCK,
1923 .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
1924 .lock_delay = 300, 1915 .lock_delay = 300,
1925 .iddq_reg = PLLDP_BASE, 1916 .iddq_reg = PLLDP_BASE,
1926 .iddq_bit_idx = PLLSS_IDDQ_BIT, 1917 .iddq_bit_idx = PLLSS_IDDQ_BIT,
@@ -1941,7 +1932,7 @@ static struct tegra_clk_pll_params pll_dp_params = {
1941 .mdiv_default = 1, 1932 .mdiv_default = 1,
1942 .freq_table = pll_dp_freq_table, 1933 .freq_table = pll_dp_freq_table,
1943 .set_defaults = tegra210_plldp_set_defaults, 1934 .set_defaults = tegra210_plldp_set_defaults,
1944 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1935 .flags = TEGRA_PLL_USE_LOCK,
1945 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1936 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1946 .set_gain = tegra210_clk_pll_set_gain, 1937 .set_gain = tegra210_clk_pll_set_gain,
1947 .adjust_vco = tegra210_clk_adjust_vco_min, 1938 .adjust_vco = tegra210_clk_adjust_vco_min,
@@ -1973,7 +1964,6 @@ static struct tegra_clk_pll_params pll_u_vco_params = {
1973 .base_reg = PLLU_BASE, 1964 .base_reg = PLLU_BASE,
1974 .misc_reg = PLLU_MISC0, 1965 .misc_reg = PLLU_MISC0,
1975 .lock_mask = PLL_BASE_LOCK, 1966 .lock_mask = PLL_BASE_LOCK,
1976 .lock_enable_bit_idx = PLLU_MISC_LOCK_ENABLE,
1977 .lock_delay = 1000, 1967 .lock_delay = 1000,
1978 .iddq_reg = PLLU_MISC0, 1968 .iddq_reg = PLLU_MISC0,
1979 .iddq_bit_idx = PLLU_IDDQ_BIT, 1969 .iddq_bit_idx = PLLU_IDDQ_BIT,
@@ -1983,8 +1973,7 @@ static struct tegra_clk_pll_params pll_u_vco_params = {
1983 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1973 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1984 .div_nmp = &pllu_nmp, 1974 .div_nmp = &pllu_nmp,
1985 .freq_table = pll_u_freq_table, 1975 .freq_table = pll_u_freq_table,
1986 .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE | 1976 .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
1987 TEGRA_PLL_VCO_OUT,
1988 .set_defaults = tegra210_pllu_set_defaults, 1977 .set_defaults = tegra210_pllu_set_defaults,
1989 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1978 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1990}; 1979};
@@ -2218,6 +2207,7 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
2218 [tegra_clk_pll_c4_out1] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT1, .present = true }, 2207 [tegra_clk_pll_c4_out1] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT1, .present = true },
2219 [tegra_clk_pll_c4_out2] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT2, .present = true }, 2208 [tegra_clk_pll_c4_out2] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT2, .present = true },
2220 [tegra_clk_pll_c4_out3] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT3, .present = true }, 2209 [tegra_clk_pll_c4_out3] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT3, .present = true },
2210 [tegra_clk_apb2ape] = { .dt_id = TEGRA210_CLK_APB2APE, .present = true },
2221}; 2211};
2222 2212
2223static struct tegra_devclk devclks[] __initdata = { 2213static struct tegra_devclk devclks[] __initdata = {
@@ -2519,7 +2509,7 @@ static void __init tegra210_pll_init(void __iomem *clk_base,
2519 2509
2520 /* PLLU_VCO */ 2510 /* PLLU_VCO */
2521 val = readl(clk_base + pll_u_vco_params.base_reg); 2511 val = readl(clk_base + pll_u_vco_params.base_reg);
2522 val &= ~BIT(24); /* disable PLLU_OVERRIDE */ 2512 val &= ~PLLU_BASE_OVERRIDE; /* disable PLLU_OVERRIDE */
2523 writel(val, clk_base + pll_u_vco_params.base_reg); 2513 writel(val, clk_base + pll_u_vco_params.base_reg);
2524 2514
2525 clk = tegra_clk_register_pllre("pll_u_vco", "pll_ref", clk_base, pmc, 2515 clk = tegra_clk_register_pllre("pll_u_vco", "pll_ref", clk_base, pmc,
@@ -2738,8 +2728,6 @@ static struct tegra_clk_init_table init_table[] __initdata = {
2738 { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 }, 2728 { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
2739 { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 }, 2729 { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
2740 { TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 }, 2730 { TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 },
2741 { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
2742 { TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
2743 { TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 }, 2731 { TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 },
2744 { TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 }, 2732 { TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 },
2745 { TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 }, 2733 { TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 },
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 1c300388782b..cc739291a3ce 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -460,7 +460,8 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw)
460 460
461 parent = clk_hw_get_parent(hw); 461 parent = clk_hw_get_parent(hw);
462 462
463 if (clk_hw_get_rate(hw) == clk_get_rate(dd->clk_bypass)) { 463 if (clk_hw_get_rate(hw) ==
464 clk_hw_get_rate(__clk_get_hw(dd->clk_bypass))) {
464 WARN_ON(parent != __clk_get_hw(dd->clk_bypass)); 465 WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
465 r = _omap3_noncore_dpll_bypass(clk); 466 r = _omap3_noncore_dpll_bypass(clk);
466 } else { 467 } else {
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index e62f8cb2c9b5..3bca438ecd19 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -78,6 +78,9 @@ static int vco_set(struct clk_icst *icst, struct icst_vco vco)
78 ret = regmap_read(icst->map, icst->vcoreg_off, &val); 78 ret = regmap_read(icst->map, icst->vcoreg_off, &val);
79 if (ret) 79 if (ret)
80 return ret; 80 return ret;
81
82 /* Mask the 18 bits used by the VCO */
83 val &= ~0x7ffff;
81 val |= vco.v | (vco.r << 9) | (vco.s << 16); 84 val |= vco.v | (vco.r << 9) | (vco.s << 16);
82 85
83 /* This magic unlocks the VCO so it can be controlled */ 86 /* This magic unlocks the VCO so it can be controlled */
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 56777f04d2d9..33db7406c0e2 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -30,6 +30,8 @@ config CLKSRC_MMIO
30config DIGICOLOR_TIMER 30config DIGICOLOR_TIMER
31 bool "Digicolor timer driver" if COMPILE_TEST 31 bool "Digicolor timer driver" if COMPILE_TEST
32 depends on GENERIC_CLOCKEVENTS 32 depends on GENERIC_CLOCKEVENTS
33 select CLKSRC_MMIO
34 depends on HAS_IOMEM
33 help 35 help
34 Enables the support for the digicolor timer driver. 36 Enables the support for the digicolor timer driver.
35 37
@@ -55,6 +57,7 @@ config ARMADA_370_XP_TIMER
55 bool "Armada 370 and XP timer driver" if COMPILE_TEST 57 bool "Armada 370 and XP timer driver" if COMPILE_TEST
56 depends on ARM 58 depends on ARM
57 select CLKSRC_OF 59 select CLKSRC_OF
60 select CLKSRC_MMIO
58 help 61 help
59 Enables the support for the Armada 370 and XP timer driver. 62 Enables the support for the Armada 370 and XP timer driver.
60 63
@@ -76,6 +79,7 @@ config ORION_TIMER
76config SUN4I_TIMER 79config SUN4I_TIMER
77 bool "Sun4i timer driver" if COMPILE_TEST 80 bool "Sun4i timer driver" if COMPILE_TEST
78 depends on GENERIC_CLOCKEVENTS 81 depends on GENERIC_CLOCKEVENTS
82 depends on HAS_IOMEM
79 select CLKSRC_MMIO 83 select CLKSRC_MMIO
80 help 84 help
81 Enables support for the Sun4i timer. 85 Enables support for the Sun4i timer.
@@ -89,6 +93,7 @@ config SUN5I_HSTIMER
89 93
90config TEGRA_TIMER 94config TEGRA_TIMER
91 bool "Tegra timer driver" if COMPILE_TEST 95 bool "Tegra timer driver" if COMPILE_TEST
96 select CLKSRC_MMIO
92 depends on ARM 97 depends on ARM
93 help 98 help
94 Enables support for the Tegra driver. 99 Enables support for the Tegra driver.
@@ -96,6 +101,7 @@ config TEGRA_TIMER
96config VT8500_TIMER 101config VT8500_TIMER
97 bool "VT8500 timer driver" if COMPILE_TEST 102 bool "VT8500 timer driver" if COMPILE_TEST
98 depends on GENERIC_CLOCKEVENTS 103 depends on GENERIC_CLOCKEVENTS
104 depends on HAS_IOMEM
99 help 105 help
100 Enables support for the VT8500 driver. 106 Enables support for the VT8500 driver.
101 107
@@ -131,6 +137,7 @@ config CLKSRC_NOMADIK_MTU_SCHED_CLOCK
131config CLKSRC_DBX500_PRCMU 137config CLKSRC_DBX500_PRCMU
132 bool "Clocksource PRCMU Timer" if COMPILE_TEST 138 bool "Clocksource PRCMU Timer" if COMPILE_TEST
133 depends on GENERIC_CLOCKEVENTS 139 depends on GENERIC_CLOCKEVENTS
140 depends on HAS_IOMEM
134 help 141 help
135 Use the always on PRCMU Timer as clocksource 142 Use the always on PRCMU Timer as clocksource
136 143
@@ -248,6 +255,7 @@ config CLKSRC_EXYNOS_MCT
248config CLKSRC_SAMSUNG_PWM 255config CLKSRC_SAMSUNG_PWM
249 bool "PWM timer drvier for Samsung S3C, S5P" if COMPILE_TEST 256 bool "PWM timer drvier for Samsung S3C, S5P" if COMPILE_TEST
250 depends on GENERIC_CLOCKEVENTS 257 depends on GENERIC_CLOCKEVENTS
258 depends on HAS_IOMEM
251 help 259 help
252 This is a new clocksource driver for the PWM timer found in 260 This is a new clocksource driver for the PWM timer found in
253 Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver 261 Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver
@@ -257,12 +265,14 @@ config CLKSRC_SAMSUNG_PWM
257config FSL_FTM_TIMER 265config FSL_FTM_TIMER
258 bool "Freescale FlexTimer Module driver" if COMPILE_TEST 266 bool "Freescale FlexTimer Module driver" if COMPILE_TEST
259 depends on GENERIC_CLOCKEVENTS 267 depends on GENERIC_CLOCKEVENTS
268 depends on HAS_IOMEM
260 select CLKSRC_MMIO 269 select CLKSRC_MMIO
261 help 270 help
262 Support for Freescale FlexTimer Module (FTM) timer. 271 Support for Freescale FlexTimer Module (FTM) timer.
263 272
264config VF_PIT_TIMER 273config VF_PIT_TIMER
265 bool 274 bool
275 select CLKSRC_MMIO
266 help 276 help
267 Support for Period Interrupt Timer on Freescale Vybrid Family SoCs. 277 Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
268 278
@@ -360,6 +370,7 @@ config CLKSRC_TANGO_XTAL
360config CLKSRC_PXA 370config CLKSRC_PXA
361 bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST 371 bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST
362 depends on GENERIC_CLOCKEVENTS 372 depends on GENERIC_CLOCKEVENTS
373 depends on HAS_IOMEM
363 select CLKSRC_MMIO 374 select CLKSRC_MMIO
364 help 375 help
365 This enables OST0 support available on PXA and SA-11x0 376 This enables OST0 support available on PXA and SA-11x0
@@ -394,6 +405,7 @@ config CLKSRC_ST_LPC
394 bool "Low power clocksource found in the LPC" if COMPILE_TEST 405 bool "Low power clocksource found in the LPC" if COMPILE_TEST
395 select CLKSRC_OF if OF 406 select CLKSRC_OF if OF
396 depends on HAS_IOMEM 407 depends on HAS_IOMEM
408 select CLKSRC_MMIO
397 help 409 help
398 Enable this option to use the Low Power controller timer 410 Enable this option to use the Low Power controller timer
399 as clocksource. 411 as clocksource.
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 6ee91401918e..4da2af9694a2 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -98,7 +98,8 @@ static int tc_shutdown(struct clock_event_device *d)
98 98
99 __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); 99 __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
100 __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); 100 __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
101 clk_disable(tcd->clk); 101 if (!clockevent_state_detached(d))
102 clk_disable(tcd->clk);
102 103
103 return 0; 104 return 0;
104} 105}
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 659879a56dba..f93511031177 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -296,6 +296,7 @@ endif
296config QORIQ_CPUFREQ 296config QORIQ_CPUFREQ
297 tristate "CPU frequency scaling driver for Freescale QorIQ SoCs" 297 tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
298 depends on OF && COMMON_CLK && (PPC_E500MC || ARM) 298 depends on OF && COMMON_CLK && (PPC_E500MC || ARM)
299 depends on !CPU_THERMAL || THERMAL
299 select CLK_QORIQ 300 select CLK_QORIQ
300 help 301 help
301 This adds the CPUFreq driver support for Freescale QorIQ SoCs 302 This adds the CPUFreq driver support for Freescale QorIQ SoCs
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0031069b64c9..14b1f9393b05 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -84,10 +84,10 @@ config ARM_KIRKWOOD_CPUFREQ
84 SoCs. 84 SoCs.
85 85
86config ARM_MT8173_CPUFREQ 86config ARM_MT8173_CPUFREQ
87 bool "Mediatek MT8173 CPUFreq support" 87 tristate "Mediatek MT8173 CPUFreq support"
88 depends on ARCH_MEDIATEK && REGULATOR 88 depends on ARCH_MEDIATEK && REGULATOR
89 depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST) 89 depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST)
90 depends on !CPU_THERMAL || THERMAL=y 90 depends on !CPU_THERMAL || THERMAL
91 select PM_OPP 91 select PM_OPP
92 help 92 help
93 This adds the CPUFreq driver support for Mediatek MT8173 SoC. 93 This adds the CPUFreq driver support for Mediatek MT8173 SoC.
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 9bc37c437874..0ca74d070058 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -142,15 +142,16 @@ static int allocate_resources(int cpu, struct device **cdev,
142 142
143try_again: 143try_again:
144 cpu_reg = regulator_get_optional(cpu_dev, reg); 144 cpu_reg = regulator_get_optional(cpu_dev, reg);
145 if (IS_ERR(cpu_reg)) { 145 ret = PTR_ERR_OR_ZERO(cpu_reg);
146 if (ret) {
146 /* 147 /*
147 * If cpu's regulator supply node is present, but regulator is 148 * If cpu's regulator supply node is present, but regulator is
148 * not yet registered, we should try defering probe. 149 * not yet registered, we should try defering probe.
149 */ 150 */
150 if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { 151 if (ret == -EPROBE_DEFER) {
151 dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n", 152 dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
152 cpu); 153 cpu);
153 return -EPROBE_DEFER; 154 return ret;
154 } 155 }
155 156
156 /* Try with "cpu-supply" */ 157 /* Try with "cpu-supply" */
@@ -159,18 +160,16 @@ try_again:
159 goto try_again; 160 goto try_again;
160 } 161 }
161 162
162 dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n", 163 dev_dbg(cpu_dev, "no regulator for cpu%d: %d\n", cpu, ret);
163 cpu, PTR_ERR(cpu_reg));
164 } 164 }
165 165
166 cpu_clk = clk_get(cpu_dev, NULL); 166 cpu_clk = clk_get(cpu_dev, NULL);
167 if (IS_ERR(cpu_clk)) { 167 ret = PTR_ERR_OR_ZERO(cpu_clk);
168 if (ret) {
168 /* put regulator */ 169 /* put regulator */
169 if (!IS_ERR(cpu_reg)) 170 if (!IS_ERR(cpu_reg))
170 regulator_put(cpu_reg); 171 regulator_put(cpu_reg);
171 172
172 ret = PTR_ERR(cpu_clk);
173
174 /* 173 /*
175 * If cpu's clk node is present, but clock is not yet 174 * If cpu's clk node is present, but clock is not yet
176 * registered, we should try defering probe. 175 * registered, we should try defering probe.
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index c35e7da1ed7a..e979ec78b695 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -48,11 +48,11 @@ static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
48 bool active) 48 bool active)
49{ 49{
50 do { 50 do {
51 policy = list_next_entry(policy, policy_list);
52
53 /* No more policies in the list */ 51 /* No more policies in the list */
54 if (&policy->policy_list == &cpufreq_policy_list) 52 if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
55 return NULL; 53 return NULL;
54
55 policy = list_next_entry(policy, policy_list);
56 } while (!suitable_policy(policy, active)); 56 } while (!suitable_policy(policy, active));
57 57
58 return policy; 58 return policy;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index bab3a514ec12..e0d111024d48 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -387,16 +387,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
387 if (!have_governor_per_policy()) 387 if (!have_governor_per_policy())
388 cdata->gdbs_data = dbs_data; 388 cdata->gdbs_data = dbs_data;
389 389
390 policy->governor_data = dbs_data;
391
390 ret = sysfs_create_group(get_governor_parent_kobj(policy), 392 ret = sysfs_create_group(get_governor_parent_kobj(policy),
391 get_sysfs_attr(dbs_data)); 393 get_sysfs_attr(dbs_data));
392 if (ret) 394 if (ret)
393 goto reset_gdbs_data; 395 goto reset_gdbs_data;
394 396
395 policy->governor_data = dbs_data;
396
397 return 0; 397 return 0;
398 398
399reset_gdbs_data: 399reset_gdbs_data:
400 policy->governor_data = NULL;
401
400 if (!have_governor_per_policy()) 402 if (!have_governor_per_policy())
401 cdata->gdbs_data = NULL; 403 cdata->gdbs_data = NULL;
402 cdata->exit(dbs_data, !policy->governor->initialized); 404 cdata->exit(dbs_data, !policy->governor->initialized);
@@ -417,16 +419,19 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy,
417 if (!cdbs->shared || cdbs->shared->policy) 419 if (!cdbs->shared || cdbs->shared->policy)
418 return -EBUSY; 420 return -EBUSY;
419 421
420 policy->governor_data = NULL;
421 if (!--dbs_data->usage_count) { 422 if (!--dbs_data->usage_count) {
422 sysfs_remove_group(get_governor_parent_kobj(policy), 423 sysfs_remove_group(get_governor_parent_kobj(policy),
423 get_sysfs_attr(dbs_data)); 424 get_sysfs_attr(dbs_data));
424 425
426 policy->governor_data = NULL;
427
425 if (!have_governor_per_policy()) 428 if (!have_governor_per_policy())
426 cdata->gdbs_data = NULL; 429 cdata->gdbs_data = NULL;
427 430
428 cdata->exit(dbs_data, policy->governor->initialized == 1); 431 cdata->exit(dbs_data, policy->governor->initialized == 1);
429 kfree(dbs_data); 432 kfree(dbs_data);
433 } else {
434 policy->governor_data = NULL;
430 } 435 }
431 436
432 free_common_dbs_info(policy, cdata); 437 free_common_dbs_info(policy, cdata);
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c
index 1efba340456d..2058e6d292ce 100644
--- a/drivers/cpufreq/mt8173-cpufreq.c
+++ b/drivers/cpufreq/mt8173-cpufreq.c
@@ -17,6 +17,7 @@
17#include <linux/cpu_cooling.h> 17#include <linux/cpu_cooling.h>
18#include <linux/cpufreq.h> 18#include <linux/cpufreq.h>
19#include <linux/cpumask.h> 19#include <linux/cpumask.h>
20#include <linux/module.h>
20#include <linux/of.h> 21#include <linux/of.h>
21#include <linux/platform_device.h> 22#include <linux/platform_device.h>
22#include <linux/pm_opp.h> 23#include <linux/pm_opp.h>
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 1d99c97defa9..096377232747 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void)
202 } 202 }
203} 203}
204#else 204#else
205static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq) 205static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
206{ 206{
207 return 0; 207 return 0;
208} 208}
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 344058f8501a..d5657d50ac40 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -119,7 +119,6 @@ struct cpuidle_coupled {
119 119
120#define CPUIDLE_COUPLED_NOT_IDLE (-1) 120#define CPUIDLE_COUPLED_NOT_IDLE (-1)
121 121
122static DEFINE_MUTEX(cpuidle_coupled_lock);
123static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb); 122static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
124 123
125/* 124/*
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 046423b0c5ca..f996efc56605 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -153,7 +153,7 @@ int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
153 * be frozen safely. 153 * be frozen safely.
154 */ 154 */
155 index = find_deepest_state(drv, dev, UINT_MAX, 0, true); 155 index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
156 if (index >= 0) 156 if (index > 0)
157 enter_freeze_proper(drv, dev, index); 157 enter_freeze_proper(drv, dev, index);
158 158
159 return index; 159 return index;
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 6dd3317ca365..3eb3f1279fb7 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -400,7 +400,7 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
400{ 400{
401 int err; 401 int err;
402 402
403 err = clk_prepare_enable(dd->iclk); 403 err = clk_enable(dd->iclk);
404 if (err) 404 if (err)
405 return err; 405 return err;
406 406
@@ -430,7 +430,7 @@ static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
430 430
431 dev_info(dd->dev, "version: 0x%x\n", dd->hw_version); 431 dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
432 432
433 clk_disable_unprepare(dd->iclk); 433 clk_disable(dd->iclk);
434 return 0; 434 return 0;
435} 435}
436 436
@@ -448,7 +448,7 @@ static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
448 448
449static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) 449static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
450{ 450{
451 clk_disable_unprepare(dd->iclk); 451 clk_disable(dd->iclk);
452 dd->flags &= ~AES_FLAGS_BUSY; 452 dd->flags &= ~AES_FLAGS_BUSY;
453 453
454 if (dd->is_async) 454 if (dd->is_async)
@@ -2091,10 +2091,14 @@ static int atmel_aes_probe(struct platform_device *pdev)
2091 goto res_err; 2091 goto res_err;
2092 } 2092 }
2093 2093
2094 err = atmel_aes_hw_version_init(aes_dd); 2094 err = clk_prepare(aes_dd->iclk);
2095 if (err) 2095 if (err)
2096 goto res_err; 2096 goto res_err;
2097 2097
2098 err = atmel_aes_hw_version_init(aes_dd);
2099 if (err)
2100 goto iclk_unprepare;
2101
2098 atmel_aes_get_cap(aes_dd); 2102 atmel_aes_get_cap(aes_dd);
2099 2103
2100 err = atmel_aes_buff_init(aes_dd); 2104 err = atmel_aes_buff_init(aes_dd);
@@ -2127,6 +2131,8 @@ err_algs:
2127err_aes_dma: 2131err_aes_dma:
2128 atmel_aes_buff_cleanup(aes_dd); 2132 atmel_aes_buff_cleanup(aes_dd);
2129err_aes_buff: 2133err_aes_buff:
2134iclk_unprepare:
2135 clk_unprepare(aes_dd->iclk);
2130res_err: 2136res_err:
2131 tasklet_kill(&aes_dd->done_task); 2137 tasklet_kill(&aes_dd->done_task);
2132 tasklet_kill(&aes_dd->queue_task); 2138 tasklet_kill(&aes_dd->queue_task);
@@ -2155,6 +2161,8 @@ static int atmel_aes_remove(struct platform_device *pdev)
2155 atmel_aes_dma_cleanup(aes_dd); 2161 atmel_aes_dma_cleanup(aes_dd);
2156 atmel_aes_buff_cleanup(aes_dd); 2162 atmel_aes_buff_cleanup(aes_dd);
2157 2163
2164 clk_unprepare(aes_dd->iclk);
2165
2158 return 0; 2166 return 0;
2159} 2167}
2160 2168
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 20de861aa0ea..8bf9914d4d15 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -782,7 +782,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
782 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU | 782 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
783 SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY); 783 SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
784 784
785 clk_disable_unprepare(dd->iclk); 785 clk_disable(dd->iclk);
786 786
787 if (req->base.complete) 787 if (req->base.complete)
788 req->base.complete(&req->base, err); 788 req->base.complete(&req->base, err);
@@ -795,7 +795,7 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
795{ 795{
796 int err; 796 int err;
797 797
798 err = clk_prepare_enable(dd->iclk); 798 err = clk_enable(dd->iclk);
799 if (err) 799 if (err)
800 return err; 800 return err;
801 801
@@ -822,7 +822,7 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
822 dev_info(dd->dev, 822 dev_info(dd->dev,
823 "version: 0x%x\n", dd->hw_version); 823 "version: 0x%x\n", dd->hw_version);
824 824
825 clk_disable_unprepare(dd->iclk); 825 clk_disable(dd->iclk);
826} 826}
827 827
828static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, 828static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
@@ -1410,6 +1410,10 @@ static int atmel_sha_probe(struct platform_device *pdev)
1410 goto res_err; 1410 goto res_err;
1411 } 1411 }
1412 1412
1413 err = clk_prepare(sha_dd->iclk);
1414 if (err)
1415 goto res_err;
1416
1413 atmel_sha_hw_version_init(sha_dd); 1417 atmel_sha_hw_version_init(sha_dd);
1414 1418
1415 atmel_sha_get_cap(sha_dd); 1419 atmel_sha_get_cap(sha_dd);
@@ -1421,12 +1425,12 @@ static int atmel_sha_probe(struct platform_device *pdev)
1421 if (IS_ERR(pdata)) { 1425 if (IS_ERR(pdata)) {
1422 dev_err(&pdev->dev, "platform data not available\n"); 1426 dev_err(&pdev->dev, "platform data not available\n");
1423 err = PTR_ERR(pdata); 1427 err = PTR_ERR(pdata);
1424 goto res_err; 1428 goto iclk_unprepare;
1425 } 1429 }
1426 } 1430 }
1427 if (!pdata->dma_slave) { 1431 if (!pdata->dma_slave) {
1428 err = -ENXIO; 1432 err = -ENXIO;
1429 goto res_err; 1433 goto iclk_unprepare;
1430 } 1434 }
1431 err = atmel_sha_dma_init(sha_dd, pdata); 1435 err = atmel_sha_dma_init(sha_dd, pdata);
1432 if (err) 1436 if (err)
@@ -1457,6 +1461,8 @@ err_algs:
1457 if (sha_dd->caps.has_dma) 1461 if (sha_dd->caps.has_dma)
1458 atmel_sha_dma_cleanup(sha_dd); 1462 atmel_sha_dma_cleanup(sha_dd);
1459err_sha_dma: 1463err_sha_dma:
1464iclk_unprepare:
1465 clk_unprepare(sha_dd->iclk);
1460res_err: 1466res_err:
1461 tasklet_kill(&sha_dd->done_task); 1467 tasklet_kill(&sha_dd->done_task);
1462sha_dd_err: 1468sha_dd_err:
@@ -1483,12 +1489,7 @@ static int atmel_sha_remove(struct platform_device *pdev)
1483 if (sha_dd->caps.has_dma) 1489 if (sha_dd->caps.has_dma)
1484 atmel_sha_dma_cleanup(sha_dd); 1490 atmel_sha_dma_cleanup(sha_dd);
1485 1491
1486 iounmap(sha_dd->io_base); 1492 clk_unprepare(sha_dd->iclk);
1487
1488 clk_put(sha_dd->iclk);
1489
1490 if (sha_dd->irq >= 0)
1491 free_irq(sha_dd->irq, sha_dd);
1492 1493
1493 return 0; 1494 return 0;
1494} 1495}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 8abb4bc548cc..69d4a1326fee 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -534,8 +534,8 @@ static int caam_probe(struct platform_device *pdev)
534 * long pointers in master configuration register 534 * long pointers in master configuration register
535 */ 535 */
536 clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH | 536 clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
537 MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ? 537 MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE |
538 MCFGR_LONG_PTR : 0)); 538 (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
539 539
540 /* 540 /*
541 * Read the Compile Time paramters and SCFGR to determine 541 * Read the Compile Time paramters and SCFGR to determine
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 0643e3366e33..c0656e7f37b5 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -306,7 +306,7 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
306 return -ENOMEM; 306 return -ENOMEM;
307 307
308 dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0); 308 dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
309 if (!dma->cache_pool) 309 if (!dma->padding_pool)
310 return -ENOMEM; 310 return -ENOMEM;
311 311
312 cesa->dma = dma; 312 cesa->dma = dma;
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index 848b93ee930f..fe9dce0245bf 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -500,6 +500,8 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
500 clk_set_min_rate(tegra->emc_clock, rate); 500 clk_set_min_rate(tegra->emc_clock, rate);
501 clk_set_rate(tegra->emc_clock, 0); 501 clk_set_rate(tegra->emc_clock, 0);
502 502
503 *freq = rate;
504
503 return 0; 505 return 0;
504} 506}
505 507
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 64f5d1bdbb48..8e304b1befc5 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -176,6 +176,7 @@
176#define AT_XDMAC_MAX_CHAN 0x20 176#define AT_XDMAC_MAX_CHAN 0x20
177#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */ 177#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
178#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */ 178#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
179#define AT_XDMAC_RESIDUE_MAX_RETRIES 5
179 180
180#define AT_XDMAC_DMA_BUSWIDTHS\ 181#define AT_XDMAC_DMA_BUSWIDTHS\
181 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ 182 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
@@ -1395,8 +1396,8 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1395 struct at_xdmac_desc *desc, *_desc; 1396 struct at_xdmac_desc *desc, *_desc;
1396 struct list_head *descs_list; 1397 struct list_head *descs_list;
1397 enum dma_status ret; 1398 enum dma_status ret;
1398 int residue; 1399 int residue, retry;
1399 u32 cur_nda, mask, value; 1400 u32 cur_nda, check_nda, cur_ubc, mask, value;
1400 u8 dwidth = 0; 1401 u8 dwidth = 0;
1401 unsigned long flags; 1402 unsigned long flags;
1402 1403
@@ -1433,7 +1434,42 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1433 cpu_relax(); 1434 cpu_relax();
1434 } 1435 }
1435 1436
1437 /*
1438 * When processing the residue, we need to read two registers but we
1439 * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
1440 * we stand in the descriptor list and AT_XDMAC_CUBC is used
1441 * to know how many data are remaining for the current descriptor.
1442 * Since the dma channel is not paused to not loose data, between the
1443 * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
1444 * descriptor.
1445 * For that reason, after reading AT_XDMAC_CUBC, we check if we are
1446 * still using the same descriptor by reading a second time
1447 * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
1448 * read again AT_XDMAC_CUBC.
1449 * Memory barriers are used to ensure the read order of the registers.
1450 * A max number of retries is set because unlikely it can never ends if
1451 * we are transferring a lot of data with small buffers.
1452 */
1436 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; 1453 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1454 rmb();
1455 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1456 for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
1457 rmb();
1458 check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1459
1460 if (likely(cur_nda == check_nda))
1461 break;
1462
1463 cur_nda = check_nda;
1464 rmb();
1465 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1466 }
1467
1468 if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
1469 ret = DMA_ERROR;
1470 goto spin_unlock;
1471 }
1472
1437 /* 1473 /*
1438 * Remove size of all microblocks already transferred and the current 1474 * Remove size of all microblocks already transferred and the current
1439 * one. Then add the remaining size to transfer of the current 1475 * one. Then add the remaining size to transfer of the current
@@ -1446,7 +1482,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1446 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) 1482 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
1447 break; 1483 break;
1448 } 1484 }
1449 residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; 1485 residue += cur_ubc << dwidth;
1450 1486
1451 dma_set_residue(txstate, residue); 1487 dma_set_residue(txstate, residue);
1452 1488
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index e893318560db..5ad0ec1f0e29 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -156,7 +156,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
156 156
157 /* Enable interrupts */ 157 /* Enable interrupts */
158 channel_set_bit(dw, MASK.XFER, dwc->mask); 158 channel_set_bit(dw, MASK.XFER, dwc->mask);
159 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
160 channel_set_bit(dw, MASK.ERROR, dwc->mask); 159 channel_set_bit(dw, MASK.ERROR, dwc->mask);
161 160
162 dwc->initialized = true; 161 dwc->initialized = true;
@@ -588,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
588 587
589 spin_unlock_irqrestore(&dwc->lock, flags); 588 spin_unlock_irqrestore(&dwc->lock, flags);
590 } 589 }
590
591 /* Re-enable interrupts */
592 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
591} 593}
592 594
593/* ------------------------------------------------------------------------- */ 595/* ------------------------------------------------------------------------- */
@@ -618,11 +620,8 @@ static void dw_dma_tasklet(unsigned long data)
618 dwc_scan_descriptors(dw, dwc); 620 dwc_scan_descriptors(dw, dwc);
619 } 621 }
620 622
621 /* 623 /* Re-enable interrupts */
622 * Re-enable interrupts.
623 */
624 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); 624 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
625 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
626 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); 625 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
627} 626}
628 627
@@ -1261,6 +1260,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1261int dw_dma_cyclic_start(struct dma_chan *chan) 1260int dw_dma_cyclic_start(struct dma_chan *chan)
1262{ 1261{
1263 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1262 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1263 struct dw_dma *dw = to_dw_dma(chan->device);
1264 unsigned long flags; 1264 unsigned long flags;
1265 1265
1266 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { 1266 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
@@ -1269,7 +1269,12 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1269 } 1269 }
1270 1270
1271 spin_lock_irqsave(&dwc->lock, flags); 1271 spin_lock_irqsave(&dwc->lock, flags);
1272
1273 /* Enable interrupts to perform cyclic transfer */
1274 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1275
1272 dwc_dostart(dwc, dwc->cdesc->desc[0]); 1276 dwc_dostart(dwc, dwc->cdesc->desc[0]);
1277
1273 spin_unlock_irqrestore(&dwc->lock, flags); 1278 spin_unlock_irqrestore(&dwc->lock, flags);
1274 1279
1275 return 0; 1280 return 0;
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 4c30fdd092b3..358f9689a3f5 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -108,6 +108,10 @@ static const struct pci_device_id dw_pci_id_table[] = {
108 108
109 /* Haswell */ 109 /* Haswell */
110 { PCI_VDEVICE(INTEL, 0x9c60) }, 110 { PCI_VDEVICE(INTEL, 0x9c60) },
111
112 /* Broadwell */
113 { PCI_VDEVICE(INTEL, 0x9ce0) },
114
111 { } 115 { }
112}; 116};
113MODULE_DEVICE_TABLE(pci, dw_pci_id_table); 117MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index d92d65549406..e3d7fcb69b4c 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -113,6 +113,9 @@
113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ 113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
114#define CHMAP_EXIST BIT(24) 114#define CHMAP_EXIST BIT(24)
115 115
116/* CCSTAT register */
117#define EDMA_CCSTAT_ACTV BIT(4)
118
116/* 119/*
117 * Max of 20 segments per channel to conserve PaRAM slots 120 * Max of 20 segments per channel to conserve PaRAM slots
118 * Also note that MAX_NR_SG should be atleast the no.of periods 121 * Also note that MAX_NR_SG should be atleast the no.of periods
@@ -1680,9 +1683,20 @@ static void edma_issue_pending(struct dma_chan *chan)
1680 spin_unlock_irqrestore(&echan->vchan.lock, flags); 1683 spin_unlock_irqrestore(&echan->vchan.lock, flags);
1681} 1684}
1682 1685
1686/*
1687 * This limit exists to avoid a possible infinite loop when waiting for proof
1688 * that a particular transfer is completed. This limit can be hit if there
1689 * are large bursts to/from slow devices or the CPU is never able to catch
1690 * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
1691 * RX-FIFO, as many as 55 loops have been seen.
1692 */
1693#define EDMA_MAX_TR_WAIT_LOOPS 1000
1694
1683static u32 edma_residue(struct edma_desc *edesc) 1695static u32 edma_residue(struct edma_desc *edesc)
1684{ 1696{
1685 bool dst = edesc->direction == DMA_DEV_TO_MEM; 1697 bool dst = edesc->direction == DMA_DEV_TO_MEM;
1698 int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
1699 struct edma_chan *echan = edesc->echan;
1686 struct edma_pset *pset = edesc->pset; 1700 struct edma_pset *pset = edesc->pset;
1687 dma_addr_t done, pos; 1701 dma_addr_t done, pos;
1688 int i; 1702 int i;
@@ -1691,7 +1705,32 @@ static u32 edma_residue(struct edma_desc *edesc)
1691 * We always read the dst/src position from the first RamPar 1705 * We always read the dst/src position from the first RamPar
1692 * pset. That's the one which is active now. 1706 * pset. That's the one which is active now.
1693 */ 1707 */
1694 pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst); 1708 pos = edma_get_position(echan->ecc, echan->slot[0], dst);
1709
1710 /*
1711 * "pos" may represent a transfer request that is still being
1712 * processed by the EDMACC or EDMATC. We will busy wait until
1713 * any one of the situations occurs:
1714 * 1. the DMA hardware is idle
1715 * 2. a new transfer request is setup
1716 * 3. we hit the loop limit
1717 */
1718 while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
1719 /* check if a new transfer request is setup */
1720 if (edma_get_position(echan->ecc,
1721 echan->slot[0], dst) != pos) {
1722 break;
1723 }
1724
1725 if (!--loop_count) {
1726 dev_dbg_ratelimited(echan->vchan.chan.device->dev,
1727 "%s: timeout waiting for PaRAM update\n",
1728 __func__);
1729 break;
1730 }
1731
1732 cpu_relax();
1733 }
1695 1734
1696 /* 1735 /*
1697 * Cyclic is simple. Just subtract pset[0].addr from pos. 1736 * Cyclic is simple. Just subtract pset[0].addr from pos.
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 2209f75fdf05..aac85c30c2cf 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -522,6 +522,8 @@ static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
522 chan_dbg(chan, "LD %p callback\n", desc); 522 chan_dbg(chan, "LD %p callback\n", desc);
523 txd->callback(txd->callback_param); 523 txd->callback(txd->callback_param);
524 } 524 }
525
526 dma_descriptor_unmap(txd);
525 } 527 }
526 528
527 /* Run any dependencies */ 529 /* Run any dependencies */
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1d5df2ef148b..21539d5c54c3 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -861,32 +861,42 @@ void ioat_timer_event(unsigned long data)
861 return; 861 return;
862 } 862 }
863 863
864 spin_lock_bh(&ioat_chan->cleanup_lock);
865
866 /* handle the no-actives case */
867 if (!ioat_ring_active(ioat_chan)) {
868 spin_lock_bh(&ioat_chan->prep_lock);
869 check_active(ioat_chan);
870 spin_unlock_bh(&ioat_chan->prep_lock);
871 spin_unlock_bh(&ioat_chan->cleanup_lock);
872 return;
873 }
874
864 /* if we haven't made progress and we have already 875 /* if we haven't made progress and we have already
865 * acknowledged a pending completion once, then be more 876 * acknowledged a pending completion once, then be more
866 * forceful with a restart 877 * forceful with a restart
867 */ 878 */
868 spin_lock_bh(&ioat_chan->cleanup_lock);
869 if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) 879 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
870 __cleanup(ioat_chan, phys_complete); 880 __cleanup(ioat_chan, phys_complete);
871 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { 881 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
882 u32 chanerr;
883
884 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
885 dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
886 dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
887 status, chanerr);
888 dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n",
889 ioat_ring_active(ioat_chan));
890
872 spin_lock_bh(&ioat_chan->prep_lock); 891 spin_lock_bh(&ioat_chan->prep_lock);
873 ioat_restart_channel(ioat_chan); 892 ioat_restart_channel(ioat_chan);
874 spin_unlock_bh(&ioat_chan->prep_lock); 893 spin_unlock_bh(&ioat_chan->prep_lock);
875 spin_unlock_bh(&ioat_chan->cleanup_lock); 894 spin_unlock_bh(&ioat_chan->cleanup_lock);
876 return; 895 return;
877 } else { 896 } else
878 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); 897 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
879 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
880 }
881
882 898
883 if (ioat_ring_active(ioat_chan)) 899 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
884 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
885 else {
886 spin_lock_bh(&ioat_chan->prep_lock);
887 check_active(ioat_chan);
888 spin_unlock_bh(&ioat_chan->prep_lock);
889 }
890 spin_unlock_bh(&ioat_chan->cleanup_lock); 900 spin_unlock_bh(&ioat_chan->cleanup_lock);
891} 901}
892 902
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index f2a0310ae771..debca824bed6 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -583,6 +583,8 @@ static void set_updater_desc(struct pxad_desc_sw *sw_desc,
583 (PXA_DCMD_LENGTH & sizeof(u32)); 583 (PXA_DCMD_LENGTH & sizeof(u32));
584 if (flags & DMA_PREP_INTERRUPT) 584 if (flags & DMA_PREP_INTERRUPT)
585 updater->dcmd |= PXA_DCMD_ENDIRQEN; 585 updater->dcmd |= PXA_DCMD_ENDIRQEN;
586 if (sw_desc->cyclic)
587 sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
586} 588}
587 589
588static bool is_desc_completed(struct virt_dma_desc *vd) 590static bool is_desc_completed(struct virt_dma_desc *vd)
@@ -673,6 +675,10 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
673 dev_dbg(&chan->vc.chan.dev->device, 675 dev_dbg(&chan->vc.chan.dev->device,
674 "%s(): checking txd %p[%x]: completed=%d\n", 676 "%s(): checking txd %p[%x]: completed=%d\n",
675 __func__, vd, vd->tx.cookie, is_desc_completed(vd)); 677 __func__, vd, vd->tx.cookie, is_desc_completed(vd));
678 if (to_pxad_sw_desc(vd)->cyclic) {
679 vchan_cyclic_callback(vd);
680 break;
681 }
676 if (is_desc_completed(vd)) { 682 if (is_desc_completed(vd)) {
677 list_del(&vd->node); 683 list_del(&vd->node);
678 vchan_cookie_complete(vd); 684 vchan_cookie_complete(vd);
@@ -1080,7 +1086,7 @@ pxad_prep_dma_cyclic(struct dma_chan *dchan,
1080 return NULL; 1086 return NULL;
1081 1087
1082 pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr); 1088 pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
1083 dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH | period_len); 1089 dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
1084 dev_dbg(&chan->vc.chan.dev->device, 1090 dev_dbg(&chan->vc.chan.dev->device,
1085 "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n", 1091 "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
1086 __func__, (unsigned long)buf_addr, len, period_len, dir, flags); 1092 __func__, (unsigned long)buf_addr, len, period_len, dir, flags);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index e438ee5b433f..f5c6b97c8958 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1574,7 +1574,7 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
1574 for (cha = 0; cha < KNL_MAX_CHAS; cha++) { 1574 for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
1575 if (knl_get_mc_route(target, 1575 if (knl_get_mc_route(target,
1576 mc_route_reg[cha]) == channel 1576 mc_route_reg[cha]) == channel
1577 && participants[channel]) { 1577 && !participants[channel]) {
1578 participant_count++; 1578 participant_count++;
1579 participants[channel] = 1; 1579 participants[channel] = 1;
1580 break; 1580 break;
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 756eca8c4cf8..10e6774ab2a2 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -221,7 +221,7 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
221 } 221 }
222 222
223 if ((attributes & ~EFI_VARIABLE_MASK) != 0 || 223 if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
224 efivar_validate(name, data, size) == false) { 224 efivar_validate(vendor, name, data, size) == false) {
225 printk(KERN_ERR "efivars: Malformed variable content\n"); 225 printk(KERN_ERR "efivars: Malformed variable content\n");
226 return -EINVAL; 226 return -EINVAL;
227 } 227 }
@@ -447,7 +447,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
447 } 447 }
448 448
449 if ((attributes & ~EFI_VARIABLE_MASK) != 0 || 449 if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
450 efivar_validate(name, data, size) == false) { 450 efivar_validate(new_var->VendorGuid, name, data,
451 size) == false) {
451 printk(KERN_ERR "efivars: Malformed variable content\n"); 452 printk(KERN_ERR "efivars: Malformed variable content\n");
452 return -EINVAL; 453 return -EINVAL;
453 } 454 }
@@ -540,38 +541,30 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
540static int 541static int
541efivar_create_sysfs_entry(struct efivar_entry *new_var) 542efivar_create_sysfs_entry(struct efivar_entry *new_var)
542{ 543{
543 int i, short_name_size; 544 int short_name_size;
544 char *short_name; 545 char *short_name;
545 unsigned long variable_name_size; 546 unsigned long utf8_name_size;
546 efi_char16_t *variable_name; 547 efi_char16_t *variable_name = new_var->var.VariableName;
547 int ret; 548 int ret;
548 549
549 variable_name = new_var->var.VariableName;
550 variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
551
552 /* 550 /*
553 * Length of the variable bytes in ASCII, plus the '-' separator, 551 * Length of the variable bytes in UTF8, plus the '-' separator,
554 * plus the GUID, plus trailing NUL 552 * plus the GUID, plus trailing NUL
555 */ 553 */
556 short_name_size = variable_name_size / sizeof(efi_char16_t) 554 utf8_name_size = ucs2_utf8size(variable_name);
557 + 1 + EFI_VARIABLE_GUID_LEN + 1; 555 short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
558
559 short_name = kzalloc(short_name_size, GFP_KERNEL);
560 556
557 short_name = kmalloc(short_name_size, GFP_KERNEL);
561 if (!short_name) 558 if (!short_name)
562 return -ENOMEM; 559 return -ENOMEM;
563 560
564 /* Convert Unicode to normal chars (assume top bits are 0), 561 ucs2_as_utf8(short_name, variable_name, short_name_size);
565 ala UTF-8 */ 562
566 for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
567 short_name[i] = variable_name[i] & 0xFF;
568 }
569 /* This is ugly, but necessary to separate one vendor's 563 /* This is ugly, but necessary to separate one vendor's
570 private variables from another's. */ 564 private variables from another's. */
571 565 short_name[utf8_name_size] = '-';
572 *(short_name + strlen(short_name)) = '-';
573 efi_guid_to_str(&new_var->var.VendorGuid, 566 efi_guid_to_str(&new_var->var.VendorGuid,
574 short_name + strlen(short_name)); 567 short_name + utf8_name_size + 1);
575 568
576 new_var->kobj.kset = efivars_kset; 569 new_var->kobj.kset = efivars_kset;
577 570
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 70a0fb10517f..7f2ea21c730d 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -165,67 +165,133 @@ validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
165} 165}
166 166
167struct variable_validate { 167struct variable_validate {
168 efi_guid_t vendor;
168 char *name; 169 char *name;
169 bool (*validate)(efi_char16_t *var_name, int match, u8 *data, 170 bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
170 unsigned long len); 171 unsigned long len);
171}; 172};
172 173
174/*
175 * This is the list of variables we need to validate, as well as the
176 * whitelist for what we think is safe not to default to immutable.
177 *
178 * If it has a validate() method that's not NULL, it'll go into the
179 * validation routine. If not, it is assumed valid, but still used for
180 * whitelisting.
181 *
182 * Note that it's sorted by {vendor,name}, but globbed names must come after
183 * any other name with the same prefix.
184 */
173static const struct variable_validate variable_validate[] = { 185static const struct variable_validate variable_validate[] = {
174 { "BootNext", validate_uint16 }, 186 { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
175 { "BootOrder", validate_boot_order }, 187 { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
176 { "DriverOrder", validate_boot_order }, 188 { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
177 { "Boot*", validate_load_option }, 189 { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
178 { "Driver*", validate_load_option }, 190 { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
179 { "ConIn", validate_device_path }, 191 { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
180 { "ConInDev", validate_device_path }, 192 { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
181 { "ConOut", validate_device_path }, 193 { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
182 { "ConOutDev", validate_device_path }, 194 { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
183 { "ErrOut", validate_device_path }, 195 { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
184 { "ErrOutDev", validate_device_path }, 196 { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
185 { "Timeout", validate_uint16 }, 197 { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
186 { "Lang", validate_ascii_string }, 198 { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
187 { "PlatformLang", validate_ascii_string }, 199 { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
188 { "", NULL }, 200 { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
201 { LINUX_EFI_CRASH_GUID, "*", NULL },
202 { NULL_GUID, "", NULL },
189}; 203};
190 204
205static bool
206variable_matches(const char *var_name, size_t len, const char *match_name,
207 int *match)
208{
209 for (*match = 0; ; (*match)++) {
210 char c = match_name[*match];
211 char u = var_name[*match];
212
213 /* Wildcard in the matching name means we've matched */
214 if (c == '*')
215 return true;
216
217 /* Case sensitive match */
218 if (!c && *match == len)
219 return true;
220
221 if (c != u)
222 return false;
223
224 if (!c)
225 return true;
226 }
227 return true;
228}
229
191bool 230bool
192efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len) 231efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
232 unsigned long data_size)
193{ 233{
194 int i; 234 int i;
195 u16 *unicode_name = var_name; 235 unsigned long utf8_size;
236 u8 *utf8_name;
196 237
197 for (i = 0; variable_validate[i].validate != NULL; i++) { 238 utf8_size = ucs2_utf8size(var_name);
198 const char *name = variable_validate[i].name; 239 utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
199 int match; 240 if (!utf8_name)
241 return false;
200 242
201 for (match = 0; ; match++) { 243 ucs2_as_utf8(utf8_name, var_name, utf8_size);
202 char c = name[match]; 244 utf8_name[utf8_size] = '\0';
203 u16 u = unicode_name[match];
204 245
205 /* All special variables are plain ascii */ 246 for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
206 if (u > 127) 247 const char *name = variable_validate[i].name;
207 return true; 248 int match = 0;
208 249
209 /* Wildcard in the matching name means we've matched */ 250 if (efi_guidcmp(vendor, variable_validate[i].vendor))
210 if (c == '*') 251 continue;
211 return variable_validate[i].validate(var_name,
212 match, data, len);
213 252
214 /* Case sensitive match */ 253 if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
215 if (c != u) 254 if (variable_validate[i].validate == NULL)
216 break; 255 break;
217 256 kfree(utf8_name);
218 /* Reached the end of the string while matching */ 257 return variable_validate[i].validate(var_name, match,
219 if (!c) 258 data, data_size);
220 return variable_validate[i].validate(var_name,
221 match, data, len);
222 } 259 }
223 } 260 }
224 261 kfree(utf8_name);
225 return true; 262 return true;
226} 263}
227EXPORT_SYMBOL_GPL(efivar_validate); 264EXPORT_SYMBOL_GPL(efivar_validate);
228 265
266bool
267efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
268 size_t len)
269{
270 int i;
271 bool found = false;
272 int match = 0;
273
274 /*
275 * Check if our variable is in the validated variables list
276 */
277 for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
278 if (efi_guidcmp(variable_validate[i].vendor, vendor))
279 continue;
280
281 if (variable_matches(var_name, len,
282 variable_validate[i].name, &match)) {
283 found = true;
284 break;
285 }
286 }
287
288 /*
289 * If it's in our list, it is removable.
290 */
291 return found;
292}
293EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
294
229static efi_status_t 295static efi_status_t
230check_var_size(u32 attributes, unsigned long size) 296check_var_size(u32 attributes, unsigned long size)
231{ 297{
@@ -852,7 +918,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
852 918
853 *set = false; 919 *set = false;
854 920
855 if (efivar_validate(name, data, *size) == false) 921 if (efivar_validate(*vendor, name, data, *size) == false)
856 return -EINVAL; 922 return -EINVAL;
857 923
858 /* 924 /*
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 2aeaebd1c6e7..3f87a03abc22 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -312,8 +312,8 @@ static int altera_gpio_probe(struct platform_device *pdev)
312 handle_simple_irq, IRQ_TYPE_NONE); 312 handle_simple_irq, IRQ_TYPE_NONE);
313 313
314 if (ret) { 314 if (ret) {
315 dev_info(&pdev->dev, "could not add irqchip\n"); 315 dev_err(&pdev->dev, "could not add irqchip\n");
316 return ret; 316 goto teardown;
317 } 317 }
318 318
319 gpiochip_set_chained_irqchip(&altera_gc->mmchip.gc, 319 gpiochip_set_chained_irqchip(&altera_gc->mmchip.gc,
@@ -326,6 +326,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
326skip_irq: 326skip_irq:
327 return 0; 327 return 0;
328teardown: 328teardown:
329 of_mm_gpiochip_remove(&altera_gc->mmchip);
329 pr_err("%s: registration failed with status %d\n", 330 pr_err("%s: registration failed with status %d\n",
330 node->full_name, ret); 331 node->full_name, ret);
331 332
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index ec58f4288649..cd007a67b302 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -195,7 +195,7 @@ static int davinci_gpio_of_xlate(struct gpio_chip *gc,
195static int davinci_gpio_probe(struct platform_device *pdev) 195static int davinci_gpio_probe(struct platform_device *pdev)
196{ 196{
197 int i, base; 197 int i, base;
198 unsigned ngpio; 198 unsigned ngpio, nbank;
199 struct davinci_gpio_controller *chips; 199 struct davinci_gpio_controller *chips;
200 struct davinci_gpio_platform_data *pdata; 200 struct davinci_gpio_platform_data *pdata;
201 struct davinci_gpio_regs __iomem *regs; 201 struct davinci_gpio_regs __iomem *regs;
@@ -224,8 +224,9 @@ static int davinci_gpio_probe(struct platform_device *pdev)
224 if (WARN_ON(ARCH_NR_GPIOS < ngpio)) 224 if (WARN_ON(ARCH_NR_GPIOS < ngpio))
225 ngpio = ARCH_NR_GPIOS; 225 ngpio = ARCH_NR_GPIOS;
226 226
227 nbank = DIV_ROUND_UP(ngpio, 32);
227 chips = devm_kzalloc(dev, 228 chips = devm_kzalloc(dev,
228 ngpio * sizeof(struct davinci_gpio_controller), 229 nbank * sizeof(struct davinci_gpio_controller),
229 GFP_KERNEL); 230 GFP_KERNEL);
230 if (!chips) 231 if (!chips)
231 return -ENOMEM; 232 return -ENOMEM;
@@ -511,7 +512,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
511 return irq; 512 return irq;
512 } 513 }
513 514
514 irq_domain = irq_domain_add_legacy(NULL, ngpio, irq, 0, 515 irq_domain = irq_domain_add_legacy(dev->of_node, ngpio, irq, 0,
515 &davinci_gpio_irq_ops, 516 &davinci_gpio_irq_ops,
516 chips); 517 chips);
517 if (!irq_domain) { 518 if (!irq_domain) {
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index cf41440aff91..d9ab0cd1d205 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -196,6 +196,44 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
196 return 0; 196 return 0;
197} 197}
198 198
199static void gpio_rcar_irq_bus_lock(struct irq_data *d)
200{
201 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
202 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
203
204 pm_runtime_get_sync(&p->pdev->dev);
205}
206
207static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
208{
209 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
210 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
211
212 pm_runtime_put(&p->pdev->dev);
213}
214
215
216static int gpio_rcar_irq_request_resources(struct irq_data *d)
217{
218 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
219 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
220 int error;
221
222 error = pm_runtime_get_sync(&p->pdev->dev);
223 if (error < 0)
224 return error;
225
226 return 0;
227}
228
229static void gpio_rcar_irq_release_resources(struct irq_data *d)
230{
231 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
232 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
233
234 pm_runtime_put(&p->pdev->dev);
235}
236
199static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id) 237static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
200{ 238{
201 struct gpio_rcar_priv *p = dev_id; 239 struct gpio_rcar_priv *p = dev_id;
@@ -450,6 +488,10 @@ static int gpio_rcar_probe(struct platform_device *pdev)
450 irq_chip->irq_unmask = gpio_rcar_irq_enable; 488 irq_chip->irq_unmask = gpio_rcar_irq_enable;
451 irq_chip->irq_set_type = gpio_rcar_irq_set_type; 489 irq_chip->irq_set_type = gpio_rcar_irq_set_type;
452 irq_chip->irq_set_wake = gpio_rcar_irq_set_wake; 490 irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
491 irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
492 irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
493 irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
494 irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
453 irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND; 495 irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
454 496
455 ret = gpiochip_add_data(gpio_chip, p); 497 ret = gpiochip_add_data(gpio_chip, p);
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 66f729eaf00b..20c9539abc36 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -25,7 +25,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
25 amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o 25 amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
26 26
27# add asic specific block 27# add asic specific block
28amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \ 28amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
29 ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \ 29 ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
30 amdgpu_amdkfd_gfx_v7.o 30 amdgpu_amdkfd_gfx_v7.o
31 31
@@ -34,6 +34,7 @@ amdgpu-y += \
34 34
35# add GMC block 35# add GMC block
36amdgpu-y += \ 36amdgpu-y += \
37 gmc_v7_0.o \
37 gmc_v8_0.o 38 gmc_v8_0.o
38 39
39# add IH block 40# add IH block
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 313b0cc8d676..5e7770f9a415 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -87,6 +87,8 @@ extern int amdgpu_sched_jobs;
87extern int amdgpu_sched_hw_submission; 87extern int amdgpu_sched_hw_submission;
88extern int amdgpu_enable_semaphores; 88extern int amdgpu_enable_semaphores;
89extern int amdgpu_powerplay; 89extern int amdgpu_powerplay;
90extern unsigned amdgpu_pcie_gen_cap;
91extern unsigned amdgpu_pcie_lane_cap;
90 92
91#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 93#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
92#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 94#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -132,47 +134,6 @@ extern int amdgpu_powerplay;
132#define AMDGPU_RESET_VCE (1 << 13) 134#define AMDGPU_RESET_VCE (1 << 13)
133#define AMDGPU_RESET_VCE1 (1 << 14) 135#define AMDGPU_RESET_VCE1 (1 << 14)
134 136
135/* CG block flags */
136#define AMDGPU_CG_BLOCK_GFX (1 << 0)
137#define AMDGPU_CG_BLOCK_MC (1 << 1)
138#define AMDGPU_CG_BLOCK_SDMA (1 << 2)
139#define AMDGPU_CG_BLOCK_UVD (1 << 3)
140#define AMDGPU_CG_BLOCK_VCE (1 << 4)
141#define AMDGPU_CG_BLOCK_HDP (1 << 5)
142#define AMDGPU_CG_BLOCK_BIF (1 << 6)
143
144/* CG flags */
145#define AMDGPU_CG_SUPPORT_GFX_MGCG (1 << 0)
146#define AMDGPU_CG_SUPPORT_GFX_MGLS (1 << 1)
147#define AMDGPU_CG_SUPPORT_GFX_CGCG (1 << 2)
148#define AMDGPU_CG_SUPPORT_GFX_CGLS (1 << 3)
149#define AMDGPU_CG_SUPPORT_GFX_CGTS (1 << 4)
150#define AMDGPU_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
151#define AMDGPU_CG_SUPPORT_GFX_CP_LS (1 << 6)
152#define AMDGPU_CG_SUPPORT_GFX_RLC_LS (1 << 7)
153#define AMDGPU_CG_SUPPORT_MC_LS (1 << 8)
154#define AMDGPU_CG_SUPPORT_MC_MGCG (1 << 9)
155#define AMDGPU_CG_SUPPORT_SDMA_LS (1 << 10)
156#define AMDGPU_CG_SUPPORT_SDMA_MGCG (1 << 11)
157#define AMDGPU_CG_SUPPORT_BIF_LS (1 << 12)
158#define AMDGPU_CG_SUPPORT_UVD_MGCG (1 << 13)
159#define AMDGPU_CG_SUPPORT_VCE_MGCG (1 << 14)
160#define AMDGPU_CG_SUPPORT_HDP_LS (1 << 15)
161#define AMDGPU_CG_SUPPORT_HDP_MGCG (1 << 16)
162
163/* PG flags */
164#define AMDGPU_PG_SUPPORT_GFX_PG (1 << 0)
165#define AMDGPU_PG_SUPPORT_GFX_SMG (1 << 1)
166#define AMDGPU_PG_SUPPORT_GFX_DMG (1 << 2)
167#define AMDGPU_PG_SUPPORT_UVD (1 << 3)
168#define AMDGPU_PG_SUPPORT_VCE (1 << 4)
169#define AMDGPU_PG_SUPPORT_CP (1 << 5)
170#define AMDGPU_PG_SUPPORT_GDS (1 << 6)
171#define AMDGPU_PG_SUPPORT_RLC_SMU_HS (1 << 7)
172#define AMDGPU_PG_SUPPORT_SDMA (1 << 8)
173#define AMDGPU_PG_SUPPORT_ACP (1 << 9)
174#define AMDGPU_PG_SUPPORT_SAMU (1 << 10)
175
176/* GFX current status */ 137/* GFX current status */
177#define AMDGPU_GFX_NORMAL_MODE 0x00000000L 138#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
178#define AMDGPU_GFX_SAFE_MODE 0x00000001L 139#define AMDGPU_GFX_SAFE_MODE 0x00000001L
@@ -606,8 +567,6 @@ struct amdgpu_sa_manager {
606 uint32_t align; 567 uint32_t align;
607}; 568};
608 569
609struct amdgpu_sa_bo;
610
611/* sub-allocation buffer */ 570/* sub-allocation buffer */
612struct amdgpu_sa_bo { 571struct amdgpu_sa_bo {
613 struct list_head olist; 572 struct list_head olist;
@@ -2278,60 +2237,60 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2278#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) 2237#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
2279 2238
2280#define amdgpu_dpm_get_temperature(adev) \ 2239#define amdgpu_dpm_get_temperature(adev) \
2281 (adev)->pp_enabled ? \ 2240 ((adev)->pp_enabled ? \
2282 (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ 2241 (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
2283 (adev)->pm.funcs->get_temperature((adev)) 2242 (adev)->pm.funcs->get_temperature((adev)))
2284 2243
2285#define amdgpu_dpm_set_fan_control_mode(adev, m) \ 2244#define amdgpu_dpm_set_fan_control_mode(adev, m) \
2286 (adev)->pp_enabled ? \ 2245 ((adev)->pp_enabled ? \
2287 (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \ 2246 (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
2288 (adev)->pm.funcs->set_fan_control_mode((adev), (m)) 2247 (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
2289 2248
2290#define amdgpu_dpm_get_fan_control_mode(adev) \ 2249#define amdgpu_dpm_get_fan_control_mode(adev) \
2291 (adev)->pp_enabled ? \ 2250 ((adev)->pp_enabled ? \
2292 (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \ 2251 (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
2293 (adev)->pm.funcs->get_fan_control_mode((adev)) 2252 (adev)->pm.funcs->get_fan_control_mode((adev)))
2294 2253
2295#define amdgpu_dpm_set_fan_speed_percent(adev, s) \ 2254#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
2296 (adev)->pp_enabled ? \ 2255 ((adev)->pp_enabled ? \
2297 (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ 2256 (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
2298 (adev)->pm.funcs->set_fan_speed_percent((adev), (s)) 2257 (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
2299 2258
2300#define amdgpu_dpm_get_fan_speed_percent(adev, s) \ 2259#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
2301 (adev)->pp_enabled ? \ 2260 ((adev)->pp_enabled ? \
2302 (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ 2261 (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
2303 (adev)->pm.funcs->get_fan_speed_percent((adev), (s)) 2262 (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
2304 2263
2305#define amdgpu_dpm_get_sclk(adev, l) \ 2264#define amdgpu_dpm_get_sclk(adev, l) \
2306 (adev)->pp_enabled ? \ 2265 ((adev)->pp_enabled ? \
2307 (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \ 2266 (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
2308 (adev)->pm.funcs->get_sclk((adev), (l)) 2267 (adev)->pm.funcs->get_sclk((adev), (l)))
2309 2268
2310#define amdgpu_dpm_get_mclk(adev, l) \ 2269#define amdgpu_dpm_get_mclk(adev, l) \
2311 (adev)->pp_enabled ? \ 2270 ((adev)->pp_enabled ? \
2312 (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \ 2271 (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
2313 (adev)->pm.funcs->get_mclk((adev), (l)) 2272 (adev)->pm.funcs->get_mclk((adev), (l)))
2314 2273
2315 2274
2316#define amdgpu_dpm_force_performance_level(adev, l) \ 2275#define amdgpu_dpm_force_performance_level(adev, l) \
2317 (adev)->pp_enabled ? \ 2276 ((adev)->pp_enabled ? \
2318 (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \ 2277 (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
2319 (adev)->pm.funcs->force_performance_level((adev), (l)) 2278 (adev)->pm.funcs->force_performance_level((adev), (l)))
2320 2279
2321#define amdgpu_dpm_powergate_uvd(adev, g) \ 2280#define amdgpu_dpm_powergate_uvd(adev, g) \
2322 (adev)->pp_enabled ? \ 2281 ((adev)->pp_enabled ? \
2323 (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \ 2282 (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
2324 (adev)->pm.funcs->powergate_uvd((adev), (g)) 2283 (adev)->pm.funcs->powergate_uvd((adev), (g)))
2325 2284
2326#define amdgpu_dpm_powergate_vce(adev, g) \ 2285#define amdgpu_dpm_powergate_vce(adev, g) \
2327 (adev)->pp_enabled ? \ 2286 ((adev)->pp_enabled ? \
2328 (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ 2287 (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
2329 (adev)->pm.funcs->powergate_vce((adev), (g)) 2288 (adev)->pm.funcs->powergate_vce((adev), (g)))
2330 2289
2331#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \ 2290#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \
2332 (adev)->pp_enabled ? \ 2291 ((adev)->pp_enabled ? \
2333 (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \ 2292 (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \
2334 (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)) 2293 (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)))
2335 2294
2336#define amdgpu_dpm_get_current_power_state(adev) \ 2295#define amdgpu_dpm_get_current_power_state(adev) \
2337 (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) 2296 (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
@@ -2360,6 +2319,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
2360int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, 2319int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
2361 uint32_t flags); 2320 uint32_t flags);
2362bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); 2321bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
2322bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
2323 unsigned long end);
2363bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); 2324bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
2364uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, 2325uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
2365 struct ttm_mem_reg *mem); 2326 struct ttm_mem_reg *mem);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 0e1376317683..362bedc9e507 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -154,7 +154,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
154 .get_fw_version = get_fw_version 154 .get_fw_version = get_fw_version
155}; 155};
156 156
157struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions() 157struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
158{ 158{
159 return (struct kfd2kgd_calls *)&kfd2kgd; 159 return (struct kfd2kgd_calls *)&kfd2kgd;
160} 160}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 79fa5c7de856..04b744d64b57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -115,7 +115,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
115 .get_fw_version = get_fw_version 115 .get_fw_version = get_fw_version
116}; 116};
117 117
118struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions() 118struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
119{ 119{
120 return (struct kfd2kgd_calls *)&kfd2kgd; 120 return (struct kfd2kgd_calls *)&kfd2kgd;
121} 121}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index a081dda9fa2f..7a4b101e10c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -795,6 +795,12 @@ static int amdgpu_cgs_query_system_info(void *cgs_device,
795 case CGS_SYSTEM_INFO_PCIE_MLW: 795 case CGS_SYSTEM_INFO_PCIE_MLW:
796 sys_info->value = adev->pm.pcie_mlw_mask; 796 sys_info->value = adev->pm.pcie_mlw_mask;
797 break; 797 break;
798 case CGS_SYSTEM_INFO_CG_FLAGS:
799 sys_info->value = adev->cg_flags;
800 break;
801 case CGS_SYSTEM_INFO_PG_FLAGS:
802 sys_info->value = adev->pg_flags;
803 break;
798 default: 804 default:
799 return -ENODEV; 805 return -ENODEV;
800 } 806 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 89c3dd62ba21..119cdc2c43e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -77,7 +77,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
77 } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { 77 } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
78 /* Don't try to start link training before we 78 /* Don't try to start link training before we
79 * have the dpcd */ 79 * have the dpcd */
80 if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) 80 if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
81 return; 81 return;
82 82
83 /* set it to OFF so that drm_helper_connector_dpms() 83 /* set it to OFF so that drm_helper_connector_dpms()
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 6f89f8e034d0..b882e8175615 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -478,9 +478,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
478 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; 478 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
479 unsigned i; 479 unsigned i;
480 480
481 amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
482
483 if (!error) { 481 if (!error) {
482 amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
483
484 /* Sort the buffer list from the smallest to largest buffer, 484 /* Sort the buffer list from the smallest to largest buffer,
485 * which affects the order of buffers in the LRU list. 485 * which affects the order of buffers in the LRU list.
486 * This assures that the smallest buffers are added first 486 * This assures that the smallest buffers are added first
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 65531463f88e..51bfc114584e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1795,15 +1795,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1795 } 1795 }
1796 1796
1797 /* post card */ 1797 /* post card */
1798 amdgpu_atom_asic_init(adev->mode_info.atom_context); 1798 if (!amdgpu_card_posted(adev))
1799 amdgpu_atom_asic_init(adev->mode_info.atom_context);
1799 1800
1800 r = amdgpu_resume(adev); 1801 r = amdgpu_resume(adev);
1802 if (r)
1803 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
1801 1804
1802 amdgpu_fence_driver_resume(adev); 1805 amdgpu_fence_driver_resume(adev);
1803 1806
1804 r = amdgpu_ib_ring_tests(adev); 1807 if (resume) {
1805 if (r) 1808 r = amdgpu_ib_ring_tests(adev);
1806 DRM_ERROR("ib ring test failed (%d).\n", r); 1809 if (r)
1810 DRM_ERROR("ib ring test failed (%d).\n", r);
1811 }
1807 1812
1808 r = amdgpu_late_init(adev); 1813 r = amdgpu_late_init(adev);
1809 if (r) 1814 if (r)
@@ -1933,80 +1938,97 @@ retry:
1933 return r; 1938 return r;
1934} 1939}
1935 1940
1941#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */
1942#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */
1943
1936void amdgpu_get_pcie_info(struct amdgpu_device *adev) 1944void amdgpu_get_pcie_info(struct amdgpu_device *adev)
1937{ 1945{
1938 u32 mask; 1946 u32 mask;
1939 int ret; 1947 int ret;
1940 1948
1941 if (pci_is_root_bus(adev->pdev->bus)) 1949 if (amdgpu_pcie_gen_cap)
1942 return; 1950 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
1943 1951
1944 if (amdgpu_pcie_gen2 == 0) 1952 if (amdgpu_pcie_lane_cap)
1945 return; 1953 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
1946 1954
1947 if (adev->flags & AMD_IS_APU) 1955 /* covers APUs as well */
1956 if (pci_is_root_bus(adev->pdev->bus)) {
1957 if (adev->pm.pcie_gen_mask == 0)
1958 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
1959 if (adev->pm.pcie_mlw_mask == 0)
1960 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
1948 return; 1961 return;
1962 }
1949 1963
1950 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 1964 if (adev->pm.pcie_gen_mask == 0) {
1951 if (!ret) { 1965 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
1952 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 1966 if (!ret) {
1953 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 1967 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
1954 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 1968 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
1955 1969 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
1956 if (mask & DRM_PCIE_SPEED_25) 1970
1957 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; 1971 if (mask & DRM_PCIE_SPEED_25)
1958 if (mask & DRM_PCIE_SPEED_50) 1972 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
1959 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2; 1973 if (mask & DRM_PCIE_SPEED_50)
1960 if (mask & DRM_PCIE_SPEED_80) 1974 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
1961 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3; 1975 if (mask & DRM_PCIE_SPEED_80)
1962 } 1976 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
1963 ret = drm_pcie_get_max_link_width(adev->ddev, &mask); 1977 } else {
1964 if (!ret) { 1978 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
1965 switch (mask) { 1979 }
1966 case 32: 1980 }
1967 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | 1981 if (adev->pm.pcie_mlw_mask == 0) {
1968 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 1982 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
1969 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 1983 if (!ret) {
1970 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 1984 switch (mask) {
1971 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 1985 case 32:
1972 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 1986 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
1973 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 1987 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
1974 break; 1988 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
1975 case 16: 1989 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
1976 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 1990 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
1977 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 1991 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
1978 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 1992 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
1979 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 1993 break;
1980 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 1994 case 16:
1981 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 1995 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
1982 break; 1996 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
1983 case 12: 1997 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
1984 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 1998 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
1985 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 1999 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
1986 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 2000 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
1987 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 2001 break;
1988 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 2002 case 12:
1989 break; 2003 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
1990 case 8: 2004 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
1991 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 2005 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
1992 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 2006 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
1993 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 2007 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
1994 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 2008 break;
1995 break; 2009 case 8:
1996 case 4: 2010 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
1997 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 2011 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
1998 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 2012 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
1999 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 2013 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2000 break; 2014 break;
2001 case 2: 2015 case 4:
2002 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 2016 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2003 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 2017 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2004 break; 2018 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2005 case 1: 2019 break;
2006 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; 2020 case 2:
2007 break; 2021 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2008 default: 2022 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2009 break; 2023 break;
2024 case 1:
2025 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2026 break;
2027 default:
2028 break;
2029 }
2030 } else {
2031 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2010 } 2032 }
2011 } 2033 }
2012} 2034}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index acd066d0a805..1846d65b7285 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -72,8 +72,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
72 72
73 struct drm_crtc *crtc = &amdgpuCrtc->base; 73 struct drm_crtc *crtc = &amdgpuCrtc->base;
74 unsigned long flags; 74 unsigned long flags;
75 unsigned i; 75 unsigned i, repcnt = 4;
76 int vpos, hpos, stat, min_udelay; 76 int vpos, hpos, stat, min_udelay = 0;
77 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; 77 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
78 78
79 amdgpu_flip_wait_fence(adev, &work->excl); 79 amdgpu_flip_wait_fence(adev, &work->excl);
@@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
96 * In practice this won't execute very often unless on very fast 96 * In practice this won't execute very often unless on very fast
97 * machines because the time window for this to happen is very small. 97 * machines because the time window for this to happen is very small.
98 */ 98 */
99 for (;;) { 99 while (amdgpuCrtc->enabled && --repcnt) {
100 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank 100 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
101 * start in hpos, and to the "fudged earlier" vblank start in 101 * start in hpos, and to the "fudged earlier" vblank start in
102 * vpos. 102 * vpos.
@@ -112,12 +112,24 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
112 break; 112 break;
113 113
114 /* Sleep at least until estimated real start of hw vblank */ 114 /* Sleep at least until estimated real start of hw vblank */
115 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
116 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); 115 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
116 if (min_udelay > vblank->framedur_ns / 2000) {
117 /* Don't wait ridiculously long - something is wrong */
118 repcnt = 0;
119 break;
120 }
121 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
117 usleep_range(min_udelay, 2 * min_udelay); 122 usleep_range(min_udelay, 2 * min_udelay);
118 spin_lock_irqsave(&crtc->dev->event_lock, flags); 123 spin_lock_irqsave(&crtc->dev->event_lock, flags);
119 }; 124 };
120 125
126 if (!repcnt)
127 DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
128 "framedur %d, linedur %d, stat %d, vpos %d, "
129 "hpos %d\n", work->crtc_id, min_udelay,
130 vblank->framedur_ns / 1000,
131 vblank->linedur_ns / 1000, stat, vpos, hpos);
132
121 /* do the flip (mmio) */ 133 /* do the flip (mmio) */
122 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); 134 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
123 /* set the flip status */ 135 /* set the flip status */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b5dbbb573491..9ef1db87cf26 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -83,6 +83,8 @@ int amdgpu_sched_jobs = 32;
83int amdgpu_sched_hw_submission = 2; 83int amdgpu_sched_hw_submission = 2;
84int amdgpu_enable_semaphores = 0; 84int amdgpu_enable_semaphores = 0;
85int amdgpu_powerplay = -1; 85int amdgpu_powerplay = -1;
86unsigned amdgpu_pcie_gen_cap = 0;
87unsigned amdgpu_pcie_lane_cap = 0;
86 88
87MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); 89MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
88module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); 90module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -170,6 +172,12 @@ MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 =
170module_param_named(powerplay, amdgpu_powerplay, int, 0444); 172module_param_named(powerplay, amdgpu_powerplay, int, 0444);
171#endif 173#endif
172 174
175MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
176module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
177
178MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
179module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
180
173static struct pci_device_id pciidlist[] = { 181static struct pci_device_id pciidlist[] = {
174#ifdef CONFIG_DRM_AMDGPU_CIK 182#ifdef CONFIG_DRM_AMDGPU_CIK
175 /* Kaveri */ 183 /* Kaveri */
@@ -256,11 +264,11 @@ static struct pci_device_id pciidlist[] = {
256 {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 264 {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
257#endif 265#endif
258 /* topaz */ 266 /* topaz */
259 {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, 267 {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
260 {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, 268 {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
261 {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, 269 {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
262 {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, 270 {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
263 {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, 271 {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
264 /* tonga */ 272 /* tonga */
265 {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, 273 {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
266 {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, 274 {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index cfb6caad2a73..919146780a15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -333,6 +333,10 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
333 if (!adev->mode_info.mode_config_initialized) 333 if (!adev->mode_info.mode_config_initialized)
334 return 0; 334 return 0;
335 335
336 /* don't init fbdev if there are no connectors */
337 if (list_empty(&adev->ddev->mode_config.connector_list))
338 return 0;
339
336 /* select 8 bpp console on low vram cards */ 340 /* select 8 bpp console on low vram cards */
337 if (adev->mc.real_vram_size <= (32*1024*1024)) 341 if (adev->mc.real_vram_size <= (32*1024*1024))
338 bpp_sel = 8; 342 bpp_sel = 8;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7380f782cd14..d20c2a8929cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -596,7 +596,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
596 break; 596 break;
597 } 597 }
598 ttm_eu_backoff_reservation(&ticket, &list); 598 ttm_eu_backoff_reservation(&ticket, &list);
599 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 599 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
600 !amdgpu_vm_debug)
600 amdgpu_gem_va_update_vm(adev, bo_va, args->operation); 601 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
601 602
602 drm_gem_object_unreference_unlocked(gobj); 603 drm_gem_object_unreference_unlocked(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index b1969f2b2038..d4e2780c0796 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -142,7 +142,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
142 142
143 list_for_each_entry(bo, &node->bos, mn_list) { 143 list_for_each_entry(bo, &node->bos, mn_list) {
144 144
145 if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) 145 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
146 end))
146 continue; 147 continue;
147 148
148 r = amdgpu_bo_reserve(bo, true); 149 r = amdgpu_bo_reserve(bo, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index c3ce103b6a33..b8fbbd7699e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -33,6 +33,7 @@
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <drm/drmP.h> 34#include <drm/drmP.h>
35#include <drm/amdgpu_drm.h> 35#include <drm/amdgpu_drm.h>
36#include <drm/drm_cache.h>
36#include "amdgpu.h" 37#include "amdgpu.h"
37#include "amdgpu_trace.h" 38#include "amdgpu_trace.h"
38 39
@@ -261,6 +262,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
261 AMDGPU_GEM_DOMAIN_OA); 262 AMDGPU_GEM_DOMAIN_OA);
262 263
263 bo->flags = flags; 264 bo->flags = flags;
265
266 /* For architectures that don't support WC memory,
267 * mask out the WC flag from the BO
268 */
269 if (!drm_arch_can_wc_memory())
270 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
271
264 amdgpu_fill_placement_to_bo(bo, placement); 272 amdgpu_fill_placement_to_bo(bo, placement);
265 /* Kernel allocation are uninterruptible */ 273 /* Kernel allocation are uninterruptible */
266 r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, 274 r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
@@ -399,7 +407,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
399 } 407 }
400 if (fpfn > bo->placements[i].fpfn) 408 if (fpfn > bo->placements[i].fpfn)
401 bo->placements[i].fpfn = fpfn; 409 bo->placements[i].fpfn = fpfn;
402 if (lpfn && lpfn < bo->placements[i].lpfn) 410 if (!bo->placements[i].lpfn ||
411 (lpfn && lpfn < bo->placements[i].lpfn))
403 bo->placements[i].lpfn = lpfn; 412 bo->placements[i].lpfn = lpfn;
404 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 413 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
405 } 414 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 7d8d84eaea4a..95a4a25d8df9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -113,6 +113,10 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
113 struct drm_device *ddev = dev_get_drvdata(dev); 113 struct drm_device *ddev = dev_get_drvdata(dev);
114 struct amdgpu_device *adev = ddev->dev_private; 114 struct amdgpu_device *adev = ddev->dev_private;
115 115
116 if ((adev->flags & AMD_IS_PX) &&
117 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
118 return snprintf(buf, PAGE_SIZE, "off\n");
119
116 if (adev->pp_enabled) { 120 if (adev->pp_enabled) {
117 enum amd_dpm_forced_level level; 121 enum amd_dpm_forced_level level;
118 122
@@ -140,6 +144,11 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
140 enum amdgpu_dpm_forced_level level; 144 enum amdgpu_dpm_forced_level level;
141 int ret = 0; 145 int ret = 0;
142 146
147 /* Can't force performance level when the card is off */
148 if ((adev->flags & AMD_IS_PX) &&
149 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
150 return -EINVAL;
151
143 if (strncmp("low", buf, strlen("low")) == 0) { 152 if (strncmp("low", buf, strlen("low")) == 0) {
144 level = AMDGPU_DPM_FORCED_LEVEL_LOW; 153 level = AMDGPU_DPM_FORCED_LEVEL_LOW;
145 } else if (strncmp("high", buf, strlen("high")) == 0) { 154 } else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -157,6 +166,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
157 mutex_lock(&adev->pm.mutex); 166 mutex_lock(&adev->pm.mutex);
158 if (adev->pm.dpm.thermal_active) { 167 if (adev->pm.dpm.thermal_active) {
159 count = -EINVAL; 168 count = -EINVAL;
169 mutex_unlock(&adev->pm.mutex);
160 goto fail; 170 goto fail;
161 } 171 }
162 ret = amdgpu_dpm_force_performance_level(adev, level); 172 ret = amdgpu_dpm_force_performance_level(adev, level);
@@ -167,8 +177,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
167 mutex_unlock(&adev->pm.mutex); 177 mutex_unlock(&adev->pm.mutex);
168 } 178 }
169fail: 179fail:
170 mutex_unlock(&adev->pm.mutex);
171
172 return count; 180 return count;
173} 181}
174 182
@@ -182,8 +190,14 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
182 char *buf) 190 char *buf)
183{ 191{
184 struct amdgpu_device *adev = dev_get_drvdata(dev); 192 struct amdgpu_device *adev = dev_get_drvdata(dev);
193 struct drm_device *ddev = adev->ddev;
185 int temp; 194 int temp;
186 195
196 /* Can't get temperature when the card is off */
197 if ((adev->flags & AMD_IS_PX) &&
198 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
199 return -EINVAL;
200
187 if (!adev->pp_enabled && !adev->pm.funcs->get_temperature) 201 if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
188 temp = 0; 202 temp = 0;
189 else 203 else
@@ -634,11 +648,6 @@ force:
634 648
635 /* update display watermarks based on new power state */ 649 /* update display watermarks based on new power state */
636 amdgpu_display_bandwidth_update(adev); 650 amdgpu_display_bandwidth_update(adev);
637 /* update displays */
638 amdgpu_dpm_display_configuration_changed(adev);
639
640 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
641 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
642 651
643 /* wait for the rings to drain */ 652 /* wait for the rings to drain */
644 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 653 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@@ -655,6 +664,12 @@ force:
655 664
656 amdgpu_dpm_post_set_power_state(adev); 665 amdgpu_dpm_post_set_power_state(adev);
657 666
667 /* update displays */
668 amdgpu_dpm_display_configuration_changed(adev);
669
670 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
671 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
672
658 if (adev->pm.funcs->force_performance_level) { 673 if (adev->pm.funcs->force_performance_level) {
659 if (adev->pm.dpm.thermal_active) { 674 if (adev->pm.dpm.thermal_active) {
660 enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level; 675 enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
@@ -847,12 +862,16 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
847 struct drm_info_node *node = (struct drm_info_node *) m->private; 862 struct drm_info_node *node = (struct drm_info_node *) m->private;
848 struct drm_device *dev = node->minor->dev; 863 struct drm_device *dev = node->minor->dev;
849 struct amdgpu_device *adev = dev->dev_private; 864 struct amdgpu_device *adev = dev->dev_private;
865 struct drm_device *ddev = adev->ddev;
850 866
851 if (!adev->pm.dpm_enabled) { 867 if (!adev->pm.dpm_enabled) {
852 seq_printf(m, "dpm not enabled\n"); 868 seq_printf(m, "dpm not enabled\n");
853 return 0; 869 return 0;
854 } 870 }
855 if (adev->pp_enabled) { 871 if ((adev->flags & AMD_IS_PX) &&
872 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
873 seq_printf(m, "PX asic powered off\n");
874 } else if (adev->pp_enabled) {
856 amdgpu_dpm_debugfs_print_current_performance_level(adev, m); 875 amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
857 } else { 876 } else {
858 mutex_lock(&adev->pm.mutex); 877 mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 5ee9a0690278..3cb6d6c413c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -99,13 +99,24 @@ static int amdgpu_pp_early_init(void *handle)
99 99
100#ifdef CONFIG_DRM_AMD_POWERPLAY 100#ifdef CONFIG_DRM_AMD_POWERPLAY
101 switch (adev->asic_type) { 101 switch (adev->asic_type) {
102 case CHIP_TONGA: 102 case CHIP_TONGA:
103 case CHIP_FIJI: 103 case CHIP_FIJI:
104 adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false; 104 adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
105 break; 105 break;
106 default: 106 case CHIP_CARRIZO:
107 adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false; 107 case CHIP_STONEY:
108 break; 108 adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
109 break;
110 /* These chips don't have powerplay implemenations */
111 case CHIP_BONAIRE:
112 case CHIP_HAWAII:
113 case CHIP_KABINI:
114 case CHIP_MULLINS:
115 case CHIP_KAVERI:
116 case CHIP_TOPAZ:
117 default:
118 adev->pp_enabled = false;
119 break;
109 } 120 }
110#else 121#else
111 adev->pp_enabled = false; 122 adev->pp_enabled = false;
@@ -132,8 +143,10 @@ static int amdgpu_pp_late_init(void *handle)
132 adev->powerplay.pp_handle); 143 adev->powerplay.pp_handle);
133 144
134#ifdef CONFIG_DRM_AMD_POWERPLAY 145#ifdef CONFIG_DRM_AMD_POWERPLAY
135 if (adev->pp_enabled) 146 if (adev->pp_enabled) {
136 amdgpu_pm_sysfs_init(adev); 147 amdgpu_pm_sysfs_init(adev);
148 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
149 }
137#endif 150#endif
138 return ret; 151 return ret;
139} 152}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 78e9b0f14661..d1f234dd2126 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -487,7 +487,7 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
487 seq_printf(m, "rptr: 0x%08x [%5d]\n", 487 seq_printf(m, "rptr: 0x%08x [%5d]\n",
488 rptr, rptr); 488 rptr, rptr);
489 489
490 rptr_next = ~0; 490 rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr);
491 491
492 seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", 492 seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
493 ring->wptr, ring->wptr); 493 ring->wptr, ring->wptr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 8b88edb0434b..ca72a2e487b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -354,12 +354,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
354 354
355 for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i) 355 for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
356 if (fences[i]) 356 if (fences[i])
357 fences[count++] = fences[i]; 357 fences[count++] = fence_get(fences[i]);
358 358
359 if (count) { 359 if (count) {
360 spin_unlock(&sa_manager->wq.lock); 360 spin_unlock(&sa_manager->wq.lock);
361 t = fence_wait_any_timeout(fences, count, false, 361 t = fence_wait_any_timeout(fences, count, false,
362 MAX_SCHEDULE_TIMEOUT); 362 MAX_SCHEDULE_TIMEOUT);
363 for (i = 0; i < count; ++i)
364 fence_put(fences[i]);
365
363 r = (t > 0) ? 0 : t; 366 r = (t > 0) ? 0 : t;
364 spin_lock(&sa_manager->wq.lock); 367 spin_lock(&sa_manager->wq.lock);
365 } else { 368 } else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8a1752ff3d8e..1cbb16e15307 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -712,7 +712,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
712 0, PAGE_SIZE, 712 0, PAGE_SIZE,
713 PCI_DMA_BIDIRECTIONAL); 713 PCI_DMA_BIDIRECTIONAL);
714 if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) { 714 if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
715 while (--i) { 715 while (i--) {
716 pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], 716 pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
717 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 717 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
718 gtt->ttm.dma_address[i] = 0; 718 gtt->ttm.dma_address[i] = 0;
@@ -783,6 +783,25 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
783 return !!gtt->userptr; 783 return !!gtt->userptr;
784} 784}
785 785
786bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
787 unsigned long end)
788{
789 struct amdgpu_ttm_tt *gtt = (void *)ttm;
790 unsigned long size;
791
792 if (gtt == NULL)
793 return false;
794
795 if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
796 return false;
797
798 size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
799 if (gtt->userptr > end || gtt->userptr + size <= start)
800 return false;
801
802 return true;
803}
804
786bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) 805bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
787{ 806{
788 struct amdgpu_ttm_tt *gtt = (void *)ttm; 807 struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -808,7 +827,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
808 flags |= AMDGPU_PTE_SNOOPED; 827 flags |= AMDGPU_PTE_SNOOPED;
809 } 828 }
810 829
811 if (adev->asic_type >= CHIP_TOPAZ) 830 if (adev->asic_type >= CHIP_TONGA)
812 flags |= AMDGPU_PTE_EXECUTABLE; 831 flags |= AMDGPU_PTE_EXECUTABLE;
813 832
814 flags |= AMDGPU_PTE_READABLE; 833 flags |= AMDGPU_PTE_READABLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index aefc668e6b5d..9599f7559b3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1282,7 +1282,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1282{ 1282{
1283 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, 1283 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1284 AMDGPU_VM_PTE_COUNT * 8); 1284 AMDGPU_VM_PTE_COUNT * 8);
1285 unsigned pd_size, pd_entries, pts_size; 1285 unsigned pd_size, pd_entries;
1286 int i, r; 1286 int i, r;
1287 1287
1288 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1288 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -1300,8 +1300,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1300 pd_entries = amdgpu_vm_num_pdes(adev); 1300 pd_entries = amdgpu_vm_num_pdes(adev);
1301 1301
1302 /* allocate page table array */ 1302 /* allocate page table array */
1303 pts_size = pd_entries * sizeof(struct amdgpu_vm_pt); 1303 vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
1304 vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
1305 if (vm->page_tables == NULL) { 1304 if (vm->page_tables == NULL) {
1306 DRM_ERROR("Cannot allocate memory for page table array\n"); 1305 DRM_ERROR("Cannot allocate memory for page table array\n");
1307 return -ENOMEM; 1306 return -ENOMEM;
@@ -1361,7 +1360,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1361 1360
1362 for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) 1361 for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
1363 amdgpu_bo_unref(&vm->page_tables[i].entry.robj); 1362 amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
1364 kfree(vm->page_tables); 1363 drm_free_large(vm->page_tables);
1365 1364
1366 amdgpu_bo_unref(&vm->page_directory); 1365 amdgpu_bo_unref(&vm->page_directory);
1367 fence_put(vm->page_directory_fence); 1366 fence_put(vm->page_directory_fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 21aacc1f45c1..bf731e9f643e 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -265,15 +265,27 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector
265 unsigned max_lane_num = drm_dp_max_lane_count(dpcd); 265 unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
266 unsigned lane_num, i, max_pix_clock; 266 unsigned lane_num, i, max_pix_clock;
267 267
268 for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { 268 if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
269 for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { 269 ENCODER_OBJECT_ID_NUTMEG) {
270 max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; 270 for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
271 max_pix_clock = (lane_num * 270000 * 8) / bpp;
271 if (max_pix_clock >= pix_clock) { 272 if (max_pix_clock >= pix_clock) {
272 *dp_lanes = lane_num; 273 *dp_lanes = lane_num;
273 *dp_rate = link_rates[i]; 274 *dp_rate = 270000;
274 return 0; 275 return 0;
275 } 276 }
276 } 277 }
278 } else {
279 for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
280 for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
281 max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
282 if (max_pix_clock >= pix_clock) {
283 *dp_lanes = lane_num;
284 *dp_rate = link_rates[i];
285 return 0;
286 }
287 }
288 }
277 } 289 }
278 290
279 return -EINVAL; 291 return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 8b4731d4e10e..474ca02b0949 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -31,6 +31,7 @@
31#include "ci_dpm.h" 31#include "ci_dpm.h"
32#include "gfx_v7_0.h" 32#include "gfx_v7_0.h"
33#include "atom.h" 33#include "atom.h"
34#include "amd_pcie.h"
34#include <linux/seq_file.h> 35#include <linux/seq_file.h>
35 36
36#include "smu/smu_7_0_1_d.h" 37#include "smu/smu_7_0_1_d.h"
@@ -5835,18 +5836,16 @@ static int ci_dpm_init(struct amdgpu_device *adev)
5835 u8 frev, crev; 5836 u8 frev, crev;
5836 struct ci_power_info *pi; 5837 struct ci_power_info *pi;
5837 int ret; 5838 int ret;
5838 u32 mask;
5839 5839
5840 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL); 5840 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5841 if (pi == NULL) 5841 if (pi == NULL)
5842 return -ENOMEM; 5842 return -ENOMEM;
5843 adev->pm.dpm.priv = pi; 5843 adev->pm.dpm.priv = pi;
5844 5844
5845 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 5845 pi->sys_pcie_mask =
5846 if (ret) 5846 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
5847 pi->sys_pcie_mask = 0; 5847 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
5848 else 5848
5849 pi->sys_pcie_mask = mask;
5850 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; 5849 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5851 5850
5852 pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1; 5851 pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index fd9c9588ef46..155965ed14a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1762,6 +1762,9 @@ static void cik_program_aspm(struct amdgpu_device *adev)
1762 if (amdgpu_aspm == 0) 1762 if (amdgpu_aspm == 0)
1763 return; 1763 return;
1764 1764
1765 if (pci_is_root_bus(adev->pdev->bus))
1766 return;
1767
1765 /* XXX double check APUs */ 1768 /* XXX double check APUs */
1766 if (adev->flags & AMD_IS_APU) 1769 if (adev->flags & AMD_IS_APU)
1767 return; 1770 return;
@@ -2332,72 +2335,72 @@ static int cik_common_early_init(void *handle)
2332 switch (adev->asic_type) { 2335 switch (adev->asic_type) {
2333 case CHIP_BONAIRE: 2336 case CHIP_BONAIRE:
2334 adev->cg_flags = 2337 adev->cg_flags =
2335 AMDGPU_CG_SUPPORT_GFX_MGCG | 2338 AMD_CG_SUPPORT_GFX_MGCG |
2336 AMDGPU_CG_SUPPORT_GFX_MGLS | 2339 AMD_CG_SUPPORT_GFX_MGLS |
2337 /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ 2340 /*AMD_CG_SUPPORT_GFX_CGCG |*/
2338 AMDGPU_CG_SUPPORT_GFX_CGLS | 2341 AMD_CG_SUPPORT_GFX_CGLS |
2339 AMDGPU_CG_SUPPORT_GFX_CGTS | 2342 AMD_CG_SUPPORT_GFX_CGTS |
2340 AMDGPU_CG_SUPPORT_GFX_CGTS_LS | 2343 AMD_CG_SUPPORT_GFX_CGTS_LS |
2341 AMDGPU_CG_SUPPORT_GFX_CP_LS | 2344 AMD_CG_SUPPORT_GFX_CP_LS |
2342 AMDGPU_CG_SUPPORT_MC_LS | 2345 AMD_CG_SUPPORT_MC_LS |
2343 AMDGPU_CG_SUPPORT_MC_MGCG | 2346 AMD_CG_SUPPORT_MC_MGCG |
2344 AMDGPU_CG_SUPPORT_SDMA_MGCG | 2347 AMD_CG_SUPPORT_SDMA_MGCG |
2345 AMDGPU_CG_SUPPORT_SDMA_LS | 2348 AMD_CG_SUPPORT_SDMA_LS |
2346 AMDGPU_CG_SUPPORT_BIF_LS | 2349 AMD_CG_SUPPORT_BIF_LS |
2347 AMDGPU_CG_SUPPORT_VCE_MGCG | 2350 AMD_CG_SUPPORT_VCE_MGCG |
2348 AMDGPU_CG_SUPPORT_UVD_MGCG | 2351 AMD_CG_SUPPORT_UVD_MGCG |
2349 AMDGPU_CG_SUPPORT_HDP_LS | 2352 AMD_CG_SUPPORT_HDP_LS |
2350 AMDGPU_CG_SUPPORT_HDP_MGCG; 2353 AMD_CG_SUPPORT_HDP_MGCG;
2351 adev->pg_flags = 0; 2354 adev->pg_flags = 0;
2352 adev->external_rev_id = adev->rev_id + 0x14; 2355 adev->external_rev_id = adev->rev_id + 0x14;
2353 break; 2356 break;
2354 case CHIP_HAWAII: 2357 case CHIP_HAWAII:
2355 adev->cg_flags = 2358 adev->cg_flags =
2356 AMDGPU_CG_SUPPORT_GFX_MGCG | 2359 AMD_CG_SUPPORT_GFX_MGCG |
2357 AMDGPU_CG_SUPPORT_GFX_MGLS | 2360 AMD_CG_SUPPORT_GFX_MGLS |
2358 /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ 2361 /*AMD_CG_SUPPORT_GFX_CGCG |*/
2359 AMDGPU_CG_SUPPORT_GFX_CGLS | 2362 AMD_CG_SUPPORT_GFX_CGLS |
2360 AMDGPU_CG_SUPPORT_GFX_CGTS | 2363 AMD_CG_SUPPORT_GFX_CGTS |
2361 AMDGPU_CG_SUPPORT_GFX_CP_LS | 2364 AMD_CG_SUPPORT_GFX_CP_LS |
2362 AMDGPU_CG_SUPPORT_MC_LS | 2365 AMD_CG_SUPPORT_MC_LS |
2363 AMDGPU_CG_SUPPORT_MC_MGCG | 2366 AMD_CG_SUPPORT_MC_MGCG |
2364 AMDGPU_CG_SUPPORT_SDMA_MGCG | 2367 AMD_CG_SUPPORT_SDMA_MGCG |
2365 AMDGPU_CG_SUPPORT_SDMA_LS | 2368 AMD_CG_SUPPORT_SDMA_LS |
2366 AMDGPU_CG_SUPPORT_BIF_LS | 2369 AMD_CG_SUPPORT_BIF_LS |
2367 AMDGPU_CG_SUPPORT_VCE_MGCG | 2370 AMD_CG_SUPPORT_VCE_MGCG |
2368 AMDGPU_CG_SUPPORT_UVD_MGCG | 2371 AMD_CG_SUPPORT_UVD_MGCG |
2369 AMDGPU_CG_SUPPORT_HDP_LS | 2372 AMD_CG_SUPPORT_HDP_LS |
2370 AMDGPU_CG_SUPPORT_HDP_MGCG; 2373 AMD_CG_SUPPORT_HDP_MGCG;
2371 adev->pg_flags = 0; 2374 adev->pg_flags = 0;
2372 adev->external_rev_id = 0x28; 2375 adev->external_rev_id = 0x28;
2373 break; 2376 break;
2374 case CHIP_KAVERI: 2377 case CHIP_KAVERI:
2375 adev->cg_flags = 2378 adev->cg_flags =
2376 AMDGPU_CG_SUPPORT_GFX_MGCG | 2379 AMD_CG_SUPPORT_GFX_MGCG |
2377 AMDGPU_CG_SUPPORT_GFX_MGLS | 2380 AMD_CG_SUPPORT_GFX_MGLS |
2378 /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ 2381 /*AMD_CG_SUPPORT_GFX_CGCG |*/
2379 AMDGPU_CG_SUPPORT_GFX_CGLS | 2382 AMD_CG_SUPPORT_GFX_CGLS |
2380 AMDGPU_CG_SUPPORT_GFX_CGTS | 2383 AMD_CG_SUPPORT_GFX_CGTS |
2381 AMDGPU_CG_SUPPORT_GFX_CGTS_LS | 2384 AMD_CG_SUPPORT_GFX_CGTS_LS |
2382 AMDGPU_CG_SUPPORT_GFX_CP_LS | 2385 AMD_CG_SUPPORT_GFX_CP_LS |
2383 AMDGPU_CG_SUPPORT_SDMA_MGCG | 2386 AMD_CG_SUPPORT_SDMA_MGCG |
2384 AMDGPU_CG_SUPPORT_SDMA_LS | 2387 AMD_CG_SUPPORT_SDMA_LS |
2385 AMDGPU_CG_SUPPORT_BIF_LS | 2388 AMD_CG_SUPPORT_BIF_LS |
2386 AMDGPU_CG_SUPPORT_VCE_MGCG | 2389 AMD_CG_SUPPORT_VCE_MGCG |
2387 AMDGPU_CG_SUPPORT_UVD_MGCG | 2390 AMD_CG_SUPPORT_UVD_MGCG |
2388 AMDGPU_CG_SUPPORT_HDP_LS | 2391 AMD_CG_SUPPORT_HDP_LS |
2389 AMDGPU_CG_SUPPORT_HDP_MGCG; 2392 AMD_CG_SUPPORT_HDP_MGCG;
2390 adev->pg_flags = 2393 adev->pg_flags =
2391 /*AMDGPU_PG_SUPPORT_GFX_PG | 2394 /*AMD_PG_SUPPORT_GFX_PG |
2392 AMDGPU_PG_SUPPORT_GFX_SMG | 2395 AMD_PG_SUPPORT_GFX_SMG |
2393 AMDGPU_PG_SUPPORT_GFX_DMG |*/ 2396 AMD_PG_SUPPORT_GFX_DMG |*/
2394 AMDGPU_PG_SUPPORT_UVD | 2397 AMD_PG_SUPPORT_UVD |
2395 /*AMDGPU_PG_SUPPORT_VCE | 2398 /*AMD_PG_SUPPORT_VCE |
2396 AMDGPU_PG_SUPPORT_CP | 2399 AMD_PG_SUPPORT_CP |
2397 AMDGPU_PG_SUPPORT_GDS | 2400 AMD_PG_SUPPORT_GDS |
2398 AMDGPU_PG_SUPPORT_RLC_SMU_HS | 2401 AMD_PG_SUPPORT_RLC_SMU_HS |
2399 AMDGPU_PG_SUPPORT_ACP | 2402 AMD_PG_SUPPORT_ACP |
2400 AMDGPU_PG_SUPPORT_SAMU |*/ 2403 AMD_PG_SUPPORT_SAMU |*/
2401 0; 2404 0;
2402 if (adev->pdev->device == 0x1312 || 2405 if (adev->pdev->device == 0x1312 ||
2403 adev->pdev->device == 0x1316 || 2406 adev->pdev->device == 0x1316 ||
@@ -2409,29 +2412,29 @@ static int cik_common_early_init(void *handle)
2409 case CHIP_KABINI: 2412 case CHIP_KABINI:
2410 case CHIP_MULLINS: 2413 case CHIP_MULLINS:
2411 adev->cg_flags = 2414 adev->cg_flags =
2412 AMDGPU_CG_SUPPORT_GFX_MGCG | 2415 AMD_CG_SUPPORT_GFX_MGCG |
2413 AMDGPU_CG_SUPPORT_GFX_MGLS | 2416 AMD_CG_SUPPORT_GFX_MGLS |
2414 /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ 2417 /*AMD_CG_SUPPORT_GFX_CGCG |*/
2415 AMDGPU_CG_SUPPORT_GFX_CGLS | 2418 AMD_CG_SUPPORT_GFX_CGLS |
2416 AMDGPU_CG_SUPPORT_GFX_CGTS | 2419 AMD_CG_SUPPORT_GFX_CGTS |
2417 AMDGPU_CG_SUPPORT_GFX_CGTS_LS | 2420 AMD_CG_SUPPORT_GFX_CGTS_LS |
2418 AMDGPU_CG_SUPPORT_GFX_CP_LS | 2421 AMD_CG_SUPPORT_GFX_CP_LS |
2419 AMDGPU_CG_SUPPORT_SDMA_MGCG | 2422 AMD_CG_SUPPORT_SDMA_MGCG |
2420 AMDGPU_CG_SUPPORT_SDMA_LS | 2423 AMD_CG_SUPPORT_SDMA_LS |
2421 AMDGPU_CG_SUPPORT_BIF_LS | 2424 AMD_CG_SUPPORT_BIF_LS |
2422 AMDGPU_CG_SUPPORT_VCE_MGCG | 2425 AMD_CG_SUPPORT_VCE_MGCG |
2423 AMDGPU_CG_SUPPORT_UVD_MGCG | 2426 AMD_CG_SUPPORT_UVD_MGCG |
2424 AMDGPU_CG_SUPPORT_HDP_LS | 2427 AMD_CG_SUPPORT_HDP_LS |
2425 AMDGPU_CG_SUPPORT_HDP_MGCG; 2428 AMD_CG_SUPPORT_HDP_MGCG;
2426 adev->pg_flags = 2429 adev->pg_flags =
2427 /*AMDGPU_PG_SUPPORT_GFX_PG | 2430 /*AMD_PG_SUPPORT_GFX_PG |
2428 AMDGPU_PG_SUPPORT_GFX_SMG | */ 2431 AMD_PG_SUPPORT_GFX_SMG | */
2429 AMDGPU_PG_SUPPORT_UVD | 2432 AMD_PG_SUPPORT_UVD |
2430 /*AMDGPU_PG_SUPPORT_VCE | 2433 /*AMD_PG_SUPPORT_VCE |
2431 AMDGPU_PG_SUPPORT_CP | 2434 AMD_PG_SUPPORT_CP |
2432 AMDGPU_PG_SUPPORT_GDS | 2435 AMD_PG_SUPPORT_GDS |
2433 AMDGPU_PG_SUPPORT_RLC_SMU_HS | 2436 AMD_PG_SUPPORT_RLC_SMU_HS |
2434 AMDGPU_PG_SUPPORT_SAMU |*/ 2437 AMD_PG_SUPPORT_SAMU |*/
2435 0; 2438 0;
2436 if (adev->asic_type == CHIP_KABINI) { 2439 if (adev->asic_type == CHIP_KABINI) {
2437 if (adev->rev_id == 0) 2440 if (adev->rev_id == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 5f712ceddf08..c55ecf0ea845 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -885,7 +885,7 @@ static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
885{ 885{
886 u32 orig, data; 886 u32 orig, data;
887 887
888 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) { 888 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
889 WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100); 889 WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
890 WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100); 890 WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
891 } else { 891 } else {
@@ -906,7 +906,7 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
906{ 906{
907 u32 orig, data; 907 u32 orig, data;
908 908
909 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) { 909 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
910 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); 910 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
911 data |= 0x100; 911 data |= 0x100;
912 if (orig != data) 912 if (orig != data)
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 4dd17f2dd905..e7ef2261ff4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -445,13 +445,13 @@ static int cz_dpm_init(struct amdgpu_device *adev)
445 pi->gfx_pg_threshold = 500; 445 pi->gfx_pg_threshold = 500;
446 pi->caps_fps = true; 446 pi->caps_fps = true;
447 /* uvd */ 447 /* uvd */
448 pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false; 448 pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
449 pi->caps_uvd_dpm = true; 449 pi->caps_uvd_dpm = true;
450 /* vce */ 450 /* vce */
451 pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false; 451 pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
452 pi->caps_vce_dpm = true; 452 pi->caps_vce_dpm = true;
453 /* acp */ 453 /* acp */
454 pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false; 454 pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
455 pi->caps_acp_dpm = true; 455 pi->caps_acp_dpm = true;
456 456
457 pi->caps_stable_power_state = false; 457 pi->caps_stable_power_state = false;
@@ -2202,8 +2202,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
2202 AMD_PG_STATE_GATE); 2202 AMD_PG_STATE_GATE);
2203 2203
2204 cz_enable_vce_dpm(adev, false); 2204 cz_enable_vce_dpm(adev, false);
2205 /* TODO: to figure out why vce can't be poweroff. */ 2205 cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF);
2206 /* cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF); */
2207 pi->vce_power_gated = true; 2206 pi->vce_power_gated = true;
2208 } else { 2207 } else {
2209 cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON); 2208 cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON);
@@ -2226,10 +2225,8 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
2226 } 2225 }
2227 } else { /*pi->caps_vce_pg*/ 2226 } else { /*pi->caps_vce_pg*/
2228 cz_update_vce_dpm(adev); 2227 cz_update_vce_dpm(adev);
2229 cz_enable_vce_dpm(adev, true); 2228 cz_enable_vce_dpm(adev, !gate);
2230 } 2229 }
2231
2232 return;
2233} 2230}
2234 2231
2235const struct amd_ip_funcs cz_dpm_ip_funcs = { 2232const struct amd_ip_funcs cz_dpm_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 72793f93e2fc..06602df707f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3628,6 +3628,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3628 unsigned vm_id, uint64_t pd_addr) 3628 unsigned vm_id, uint64_t pd_addr)
3629{ 3629{
3630 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); 3630 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
3631 uint32_t seq = ring->fence_drv.sync_seq;
3632 uint64_t addr = ring->fence_drv.gpu_addr;
3633
3634 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3635 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
3636 WAIT_REG_MEM_FUNCTION(3) | /* equal */
3637 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
3638 amdgpu_ring_write(ring, addr & 0xfffffffc);
3639 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3640 amdgpu_ring_write(ring, seq);
3641 amdgpu_ring_write(ring, 0xffffffff);
3642 amdgpu_ring_write(ring, 4); /* poll interval */
3643
3631 if (usepfp) { 3644 if (usepfp) {
3632 /* synce CE with ME to prevent CE fetch CEIB before context switch done */ 3645 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3633 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 3646 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
@@ -4109,7 +4122,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
4109 4122
4110 orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL); 4123 orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
4111 4124
4112 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGCG)) { 4125 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
4113 gfx_v7_0_enable_gui_idle_interrupt(adev, true); 4126 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
4114 4127
4115 tmp = gfx_v7_0_halt_rlc(adev); 4128 tmp = gfx_v7_0_halt_rlc(adev);
@@ -4147,9 +4160,9 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
4147{ 4160{
4148 u32 data, orig, tmp = 0; 4161 u32 data, orig, tmp = 0;
4149 4162
4150 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGCG)) { 4163 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4151 if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) { 4164 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4152 if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CP_LS) { 4165 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4153 orig = data = RREG32(mmCP_MEM_SLP_CNTL); 4166 orig = data = RREG32(mmCP_MEM_SLP_CNTL);
4154 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 4167 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4155 if (orig != data) 4168 if (orig != data)
@@ -4176,14 +4189,14 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
4176 4189
4177 gfx_v7_0_update_rlc(adev, tmp); 4190 gfx_v7_0_update_rlc(adev, tmp);
4178 4191
4179 if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS) { 4192 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
4180 orig = data = RREG32(mmCGTS_SM_CTRL_REG); 4193 orig = data = RREG32(mmCGTS_SM_CTRL_REG);
4181 data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK; 4194 data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
4182 data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT); 4195 data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
4183 data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK; 4196 data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
4184 data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK; 4197 data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
4185 if ((adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) && 4198 if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
4186 (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS_LS)) 4199 (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
4187 data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; 4200 data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
4188 data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK; 4201 data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
4189 data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK; 4202 data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
@@ -4249,7 +4262,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
4249 u32 data, orig; 4262 u32 data, orig;
4250 4263
4251 orig = data = RREG32(mmRLC_PG_CNTL); 4264 orig = data = RREG32(mmRLC_PG_CNTL);
4252 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS)) 4265 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
4253 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; 4266 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
4254 else 4267 else
4255 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; 4268 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
@@ -4263,7 +4276,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
4263 u32 data, orig; 4276 u32 data, orig;
4264 4277
4265 orig = data = RREG32(mmRLC_PG_CNTL); 4278 orig = data = RREG32(mmRLC_PG_CNTL);
4266 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS)) 4279 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
4267 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; 4280 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
4268 else 4281 else
4269 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; 4282 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
@@ -4276,7 +4289,7 @@ static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
4276 u32 data, orig; 4289 u32 data, orig;
4277 4290
4278 orig = data = RREG32(mmRLC_PG_CNTL); 4291 orig = data = RREG32(mmRLC_PG_CNTL);
4279 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_CP)) 4292 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
4280 data &= ~0x8000; 4293 data &= ~0x8000;
4281 else 4294 else
4282 data |= 0x8000; 4295 data |= 0x8000;
@@ -4289,7 +4302,7 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
4289 u32 data, orig; 4302 u32 data, orig;
4290 4303
4291 orig = data = RREG32(mmRLC_PG_CNTL); 4304 orig = data = RREG32(mmRLC_PG_CNTL);
4292 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GDS)) 4305 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS))
4293 data &= ~0x2000; 4306 data &= ~0x2000;
4294 else 4307 else
4295 data |= 0x2000; 4308 data |= 0x2000;
@@ -4370,7 +4383,7 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
4370{ 4383{
4371 u32 data, orig; 4384 u32 data, orig;
4372 4385
4373 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG)) { 4386 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
4374 orig = data = RREG32(mmRLC_PG_CNTL); 4387 orig = data = RREG32(mmRLC_PG_CNTL);
4375 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; 4388 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
4376 if (orig != data) 4389 if (orig != data)
@@ -4442,7 +4455,7 @@ static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
4442 u32 data, orig; 4455 u32 data, orig;
4443 4456
4444 orig = data = RREG32(mmRLC_PG_CNTL); 4457 orig = data = RREG32(mmRLC_PG_CNTL);
4445 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_SMG)) 4458 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
4446 data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; 4459 data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
4447 else 4460 else
4448 data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; 4461 data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
@@ -4456,7 +4469,7 @@ static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
4456 u32 data, orig; 4469 u32 data, orig;
4457 4470
4458 orig = data = RREG32(mmRLC_PG_CNTL); 4471 orig = data = RREG32(mmRLC_PG_CNTL);
4459 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_DMG)) 4472 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
4460 data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; 4473 data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
4461 else 4474 else
4462 data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; 4475 data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
@@ -4623,15 +4636,15 @@ static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
4623 4636
4624static void gfx_v7_0_init_pg(struct amdgpu_device *adev) 4637static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
4625{ 4638{
4626 if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | 4639 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4627 AMDGPU_PG_SUPPORT_GFX_SMG | 4640 AMD_PG_SUPPORT_GFX_SMG |
4628 AMDGPU_PG_SUPPORT_GFX_DMG | 4641 AMD_PG_SUPPORT_GFX_DMG |
4629 AMDGPU_PG_SUPPORT_CP | 4642 AMD_PG_SUPPORT_CP |
4630 AMDGPU_PG_SUPPORT_GDS | 4643 AMD_PG_SUPPORT_GDS |
4631 AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { 4644 AMD_PG_SUPPORT_RLC_SMU_HS)) {
4632 gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true); 4645 gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
4633 gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true); 4646 gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
4634 if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { 4647 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4635 gfx_v7_0_init_gfx_cgpg(adev); 4648 gfx_v7_0_init_gfx_cgpg(adev);
4636 gfx_v7_0_enable_cp_pg(adev, true); 4649 gfx_v7_0_enable_cp_pg(adev, true);
4637 gfx_v7_0_enable_gds_pg(adev, true); 4650 gfx_v7_0_enable_gds_pg(adev, true);
@@ -4643,14 +4656,14 @@ static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
4643 4656
4644static void gfx_v7_0_fini_pg(struct amdgpu_device *adev) 4657static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
4645{ 4658{
4646 if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | 4659 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4647 AMDGPU_PG_SUPPORT_GFX_SMG | 4660 AMD_PG_SUPPORT_GFX_SMG |
4648 AMDGPU_PG_SUPPORT_GFX_DMG | 4661 AMD_PG_SUPPORT_GFX_DMG |
4649 AMDGPU_PG_SUPPORT_CP | 4662 AMD_PG_SUPPORT_CP |
4650 AMDGPU_PG_SUPPORT_GDS | 4663 AMD_PG_SUPPORT_GDS |
4651 AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { 4664 AMD_PG_SUPPORT_RLC_SMU_HS)) {
4652 gfx_v7_0_update_gfx_pg(adev, false); 4665 gfx_v7_0_update_gfx_pg(adev, false);
4653 if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { 4666 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4654 gfx_v7_0_enable_cp_pg(adev, false); 4667 gfx_v7_0_enable_cp_pg(adev, false);
4655 gfx_v7_0_enable_gds_pg(adev, false); 4668 gfx_v7_0_enable_gds_pg(adev, false);
4656 } 4669 }
@@ -4738,6 +4751,22 @@ static int gfx_v7_0_early_init(void *handle)
4738 return 0; 4751 return 0;
4739} 4752}
4740 4753
4754static int gfx_v7_0_late_init(void *handle)
4755{
4756 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4757 int r;
4758
4759 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4760 if (r)
4761 return r;
4762
4763 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4764 if (r)
4765 return r;
4766
4767 return 0;
4768}
4769
4741static int gfx_v7_0_sw_init(void *handle) 4770static int gfx_v7_0_sw_init(void *handle)
4742{ 4771{
4743 struct amdgpu_ring *ring; 4772 struct amdgpu_ring *ring;
@@ -4890,6 +4919,8 @@ static int gfx_v7_0_hw_fini(void *handle)
4890{ 4919{
4891 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4920 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4892 4921
4922 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4923 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4893 gfx_v7_0_cp_enable(adev, false); 4924 gfx_v7_0_cp_enable(adev, false);
4894 gfx_v7_0_rlc_stop(adev); 4925 gfx_v7_0_rlc_stop(adev);
4895 gfx_v7_0_fini_pg(adev); 4926 gfx_v7_0_fini_pg(adev);
@@ -5509,14 +5540,14 @@ static int gfx_v7_0_set_powergating_state(void *handle,
5509 if (state == AMD_PG_STATE_GATE) 5540 if (state == AMD_PG_STATE_GATE)
5510 gate = true; 5541 gate = true;
5511 5542
5512 if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | 5543 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
5513 AMDGPU_PG_SUPPORT_GFX_SMG | 5544 AMD_PG_SUPPORT_GFX_SMG |
5514 AMDGPU_PG_SUPPORT_GFX_DMG | 5545 AMD_PG_SUPPORT_GFX_DMG |
5515 AMDGPU_PG_SUPPORT_CP | 5546 AMD_PG_SUPPORT_CP |
5516 AMDGPU_PG_SUPPORT_GDS | 5547 AMD_PG_SUPPORT_GDS |
5517 AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { 5548 AMD_PG_SUPPORT_RLC_SMU_HS)) {
5518 gfx_v7_0_update_gfx_pg(adev, gate); 5549 gfx_v7_0_update_gfx_pg(adev, gate);
5519 if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { 5550 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
5520 gfx_v7_0_enable_cp_pg(adev, gate); 5551 gfx_v7_0_enable_cp_pg(adev, gate);
5521 gfx_v7_0_enable_gds_pg(adev, gate); 5552 gfx_v7_0_enable_gds_pg(adev, gate);
5522 } 5553 }
@@ -5527,7 +5558,7 @@ static int gfx_v7_0_set_powergating_state(void *handle,
5527 5558
5528const struct amd_ip_funcs gfx_v7_0_ip_funcs = { 5559const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
5529 .early_init = gfx_v7_0_early_init, 5560 .early_init = gfx_v7_0_early_init,
5530 .late_init = NULL, 5561 .late_init = gfx_v7_0_late_init,
5531 .sw_init = gfx_v7_0_sw_init, 5562 .sw_init = gfx_v7_0_sw_init,
5532 .sw_fini = gfx_v7_0_sw_fini, 5563 .sw_fini = gfx_v7_0_sw_fini,
5533 .hw_init = gfx_v7_0_hw_init, 5564 .hw_init = gfx_v7_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 13235d84e5a6..7086ac17abee 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -111,7 +111,6 @@ MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
111MODULE_FIRMWARE("amdgpu/topaz_pfp.bin"); 111MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
112MODULE_FIRMWARE("amdgpu/topaz_me.bin"); 112MODULE_FIRMWARE("amdgpu/topaz_me.bin");
113MODULE_FIRMWARE("amdgpu/topaz_mec.bin"); 113MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
114MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
115MODULE_FIRMWARE("amdgpu/topaz_rlc.bin"); 114MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
116 115
117MODULE_FIRMWARE("amdgpu/fiji_ce.bin"); 116MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
@@ -828,7 +827,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
828 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 827 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
829 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 828 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
830 829
831 if (adev->asic_type != CHIP_STONEY) { 830 if ((adev->asic_type != CHIP_STONEY) &&
831 (adev->asic_type != CHIP_TOPAZ)) {
832 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); 832 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
833 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); 833 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
834 if (!err) { 834 if (!err) {
@@ -3851,10 +3851,16 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
3851 if (r) 3851 if (r)
3852 return -EINVAL; 3852 return -EINVAL;
3853 3853
3854 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, 3854 if (adev->asic_type == CHIP_TOPAZ) {
3855 AMDGPU_UCODE_ID_CP_MEC1); 3855 r = gfx_v8_0_cp_compute_load_microcode(adev);
3856 if (r) 3856 if (r)
3857 return -EINVAL; 3857 return r;
3858 } else {
3859 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
3860 AMDGPU_UCODE_ID_CP_MEC1);
3861 if (r)
3862 return -EINVAL;
3863 }
3858 } 3864 }
3859 } 3865 }
3860 3866
@@ -3901,6 +3907,8 @@ static int gfx_v8_0_hw_fini(void *handle)
3901{ 3907{
3902 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3908 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3903 3909
3910 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3911 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3904 gfx_v8_0_cp_enable(adev, false); 3912 gfx_v8_0_cp_enable(adev, false);
3905 gfx_v8_0_rlc_stop(adev); 3913 gfx_v8_0_rlc_stop(adev);
3906 gfx_v8_0_cp_compute_fini(adev); 3914 gfx_v8_0_cp_compute_fini(adev);
@@ -4186,7 +4194,18 @@ static int gfx_v8_0_soft_reset(void *handle)
4186 gfx_v8_0_cp_gfx_enable(adev, false); 4194 gfx_v8_0_cp_gfx_enable(adev, false);
4187 4195
4188 /* Disable MEC parsing/prefetching */ 4196 /* Disable MEC parsing/prefetching */
4189 /* XXX todo */ 4197 gfx_v8_0_cp_compute_enable(adev, false);
4198
4199 if (grbm_soft_reset || srbm_soft_reset) {
4200 tmp = RREG32(mmGMCON_DEBUG);
4201 tmp = REG_SET_FIELD(tmp,
4202 GMCON_DEBUG, GFX_STALL, 1);
4203 tmp = REG_SET_FIELD(tmp,
4204 GMCON_DEBUG, GFX_CLEAR, 1);
4205 WREG32(mmGMCON_DEBUG, tmp);
4206
4207 udelay(50);
4208 }
4190 4209
4191 if (grbm_soft_reset) { 4210 if (grbm_soft_reset) {
4192 tmp = RREG32(mmGRBM_SOFT_RESET); 4211 tmp = RREG32(mmGRBM_SOFT_RESET);
@@ -4215,6 +4234,16 @@ static int gfx_v8_0_soft_reset(void *handle)
4215 WREG32(mmSRBM_SOFT_RESET, tmp); 4234 WREG32(mmSRBM_SOFT_RESET, tmp);
4216 tmp = RREG32(mmSRBM_SOFT_RESET); 4235 tmp = RREG32(mmSRBM_SOFT_RESET);
4217 } 4236 }
4237
4238 if (grbm_soft_reset || srbm_soft_reset) {
4239 tmp = RREG32(mmGMCON_DEBUG);
4240 tmp = REG_SET_FIELD(tmp,
4241 GMCON_DEBUG, GFX_STALL, 0);
4242 tmp = REG_SET_FIELD(tmp,
4243 GMCON_DEBUG, GFX_CLEAR, 0);
4244 WREG32(mmGMCON_DEBUG, tmp);
4245 }
4246
4218 /* Wait a little for things to settle down */ 4247 /* Wait a little for things to settle down */
4219 udelay(50); 4248 udelay(50);
4220 gfx_v8_0_print_status((void *)adev); 4249 gfx_v8_0_print_status((void *)adev);
@@ -4308,6 +4337,14 @@ static int gfx_v8_0_late_init(void *handle)
4308 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4337 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4309 int r; 4338 int r;
4310 4339
4340 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4341 if (r)
4342 return r;
4343
4344 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4345 if (r)
4346 return r;
4347
4311 /* requires IBs so do in late init after IB pool is initialized */ 4348 /* requires IBs so do in late init after IB pool is initialized */
4312 r = gfx_v8_0_do_edc_gpr_workarounds(adev); 4349 r = gfx_v8_0_do_edc_gpr_workarounds(adev);
4313 if (r) 4350 if (r)
@@ -4772,7 +4809,8 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4772 4809
4773 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 4810 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
4774 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ 4811 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
4775 WAIT_REG_MEM_FUNCTION(3))); /* equal */ 4812 WAIT_REG_MEM_FUNCTION(3) | /* equal */
4813 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
4776 amdgpu_ring_write(ring, addr & 0xfffffffc); 4814 amdgpu_ring_write(ring, addr & 0xfffffffc);
4777 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 4815 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
4778 amdgpu_ring_write(ring, seq); 4816 amdgpu_ring_write(ring, seq);
@@ -4958,7 +4996,7 @@ static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4958 case AMDGPU_IRQ_STATE_ENABLE: 4996 case AMDGPU_IRQ_STATE_ENABLE:
4959 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); 4997 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4960 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4998 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4961 PRIV_REG_INT_ENABLE, 0); 4999 PRIV_REG_INT_ENABLE, 1);
4962 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); 5000 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4963 break; 5001 break;
4964 default: 5002 default:
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 3f956065d069..b8060795b27b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -42,9 +42,39 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
42 42
43MODULE_FIRMWARE("radeon/bonaire_mc.bin"); 43MODULE_FIRMWARE("radeon/bonaire_mc.bin");
44MODULE_FIRMWARE("radeon/hawaii_mc.bin"); 44MODULE_FIRMWARE("radeon/hawaii_mc.bin");
45MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
46
47static const u32 golden_settings_iceland_a11[] =
48{
49 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
50 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
51 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
52 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
53};
54
55static const u32 iceland_mgcg_cgcg_init[] =
56{
57 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
58};
59
60static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
61{
62 switch (adev->asic_type) {
63 case CHIP_TOPAZ:
64 amdgpu_program_register_sequence(adev,
65 iceland_mgcg_cgcg_init,
66 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
67 amdgpu_program_register_sequence(adev,
68 golden_settings_iceland_a11,
69 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
70 break;
71 default:
72 break;
73 }
74}
45 75
46/** 76/**
47 * gmc8_mc_wait_for_idle - wait for MC idle callback. 77 * gmc7_mc_wait_for_idle - wait for MC idle callback.
48 * 78 *
49 * @adev: amdgpu_device pointer 79 * @adev: amdgpu_device pointer
50 * 80 *
@@ -132,13 +162,20 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
132 case CHIP_HAWAII: 162 case CHIP_HAWAII:
133 chip_name = "hawaii"; 163 chip_name = "hawaii";
134 break; 164 break;
165 case CHIP_TOPAZ:
166 chip_name = "topaz";
167 break;
135 case CHIP_KAVERI: 168 case CHIP_KAVERI:
136 case CHIP_KABINI: 169 case CHIP_KABINI:
137 return 0; 170 return 0;
138 default: BUG(); 171 default: BUG();
139 } 172 }
140 173
141 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 174 if (adev->asic_type == CHIP_TOPAZ)
175 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
176 else
177 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
178
142 err = request_firmware(&adev->mc.fw, fw_name, adev->dev); 179 err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
143 if (err) 180 if (err)
144 goto out; 181 goto out;
@@ -755,7 +792,7 @@ static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
755 792
756 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { 793 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
757 orig = data = RREG32(mc_cg_registers[i]); 794 orig = data = RREG32(mc_cg_registers[i]);
758 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS)) 795 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
759 data |= mc_cg_ls_en[i]; 796 data |= mc_cg_ls_en[i];
760 else 797 else
761 data &= ~mc_cg_ls_en[i]; 798 data &= ~mc_cg_ls_en[i];
@@ -772,7 +809,7 @@ static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
772 809
773 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { 810 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
774 orig = data = RREG32(mc_cg_registers[i]); 811 orig = data = RREG32(mc_cg_registers[i]);
775 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG)) 812 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
776 data |= mc_cg_en[i]; 813 data |= mc_cg_en[i];
777 else 814 else
778 data &= ~mc_cg_en[i]; 815 data &= ~mc_cg_en[i];
@@ -788,7 +825,7 @@ static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
788 825
789 orig = data = RREG32_PCIE(ixPCIE_CNTL2); 826 orig = data = RREG32_PCIE(ixPCIE_CNTL2);
790 827
791 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) { 828 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
792 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1); 829 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
793 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1); 830 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
794 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1); 831 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
@@ -811,7 +848,7 @@ static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
811 848
812 orig = data = RREG32(mmHDP_HOST_PATH_CNTL); 849 orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
813 850
814 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG)) 851 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
815 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0); 852 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
816 else 853 else
817 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1); 854 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
@@ -827,7 +864,7 @@ static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
827 864
828 orig = data = RREG32(mmHDP_MEM_POWER_LS); 865 orig = data = RREG32(mmHDP_MEM_POWER_LS);
829 866
830 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS)) 867 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
831 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1); 868 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
832 else 869 else
833 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0); 870 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
@@ -984,6 +1021,8 @@ static int gmc_v7_0_hw_init(void *handle)
984 int r; 1021 int r;
985 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1022 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
986 1023
1024 gmc_v7_0_init_golden_registers(adev);
1025
987 gmc_v7_0_mc_program(adev); 1026 gmc_v7_0_mc_program(adev);
988 1027
989 if (!(adev->flags & AMD_IS_APU)) { 1028 if (!(adev->flags & AMD_IS_APU)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index c0c9a0101eb4..3efd45546241 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -42,9 +42,7 @@
42static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); 42static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
43static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); 43static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
44 44
45MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
46MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); 45MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
47MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
48 46
49static const u32 golden_settings_tonga_a11[] = 47static const u32 golden_settings_tonga_a11[] =
50{ 48{
@@ -75,19 +73,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
75 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 73 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
76}; 74};
77 75
78static const u32 golden_settings_iceland_a11[] =
79{
80 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
81 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
82 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
83 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
84};
85
86static const u32 iceland_mgcg_cgcg_init[] =
87{
88 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
89};
90
91static const u32 cz_mgcg_cgcg_init[] = 76static const u32 cz_mgcg_cgcg_init[] =
92{ 77{
93 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 78 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
@@ -102,14 +87,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
102static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) 87static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
103{ 88{
104 switch (adev->asic_type) { 89 switch (adev->asic_type) {
105 case CHIP_TOPAZ:
106 amdgpu_program_register_sequence(adev,
107 iceland_mgcg_cgcg_init,
108 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
109 amdgpu_program_register_sequence(adev,
110 golden_settings_iceland_a11,
111 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
112 break;
113 case CHIP_FIJI: 90 case CHIP_FIJI:
114 amdgpu_program_register_sequence(adev, 91 amdgpu_program_register_sequence(adev,
115 fiji_mgcg_cgcg_init, 92 fiji_mgcg_cgcg_init,
@@ -229,15 +206,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
229 DRM_DEBUG("\n"); 206 DRM_DEBUG("\n");
230 207
231 switch (adev->asic_type) { 208 switch (adev->asic_type) {
232 case CHIP_TOPAZ:
233 chip_name = "topaz";
234 break;
235 case CHIP_TONGA: 209 case CHIP_TONGA:
236 chip_name = "tonga"; 210 chip_name = "tonga";
237 break; 211 break;
238 case CHIP_FIJI: 212 case CHIP_FIJI:
239 chip_name = "fiji";
240 break;
241 case CHIP_CARRIZO: 213 case CHIP_CARRIZO:
242 case CHIP_STONEY: 214 case CHIP_STONEY:
243 return 0; 215 return 0;
@@ -1007,7 +979,7 @@ static int gmc_v8_0_hw_init(void *handle)
1007 979
1008 gmc_v8_0_mc_program(adev); 980 gmc_v8_0_mc_program(adev);
1009 981
1010 if (!(adev->flags & AMD_IS_APU)) { 982 if (adev->asic_type == CHIP_TONGA) {
1011 r = gmc_v8_0_mc_load_microcode(adev); 983 r = gmc_v8_0_mc_load_microcode(adev);
1012 if (r) { 984 if (r) {
1013 DRM_ERROR("Failed to load MC firmware!\n"); 985 DRM_ERROR("Failed to load MC firmware!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index 966d4b2ed9da..090486c18249 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -432,7 +432,7 @@ static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
432 case AMDGPU_UCODE_ID_CP_ME: 432 case AMDGPU_UCODE_ID_CP_ME:
433 return UCODE_ID_CP_ME_MASK; 433 return UCODE_ID_CP_ME_MASK;
434 case AMDGPU_UCODE_ID_CP_MEC1: 434 case AMDGPU_UCODE_ID_CP_MEC1:
435 return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK; 435 return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
436 case AMDGPU_UCODE_ID_CP_MEC2: 436 case AMDGPU_UCODE_ID_CP_MEC2:
437 return UCODE_ID_CP_MEC_MASK; 437 return UCODE_ID_CP_MEC_MASK;
438 case AMDGPU_UCODE_ID_RLC_G: 438 case AMDGPU_UCODE_ID_RLC_G:
@@ -522,12 +522,6 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
522 return -EINVAL; 522 return -EINVAL;
523 } 523 }
524 524
525 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
526 &toc->entry[toc->num_entries++])) {
527 DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
528 return -EINVAL;
529 }
530
531 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0, 525 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
532 &toc->entry[toc->num_entries++])) { 526 &toc->entry[toc->num_entries++])) {
533 DRM_ERROR("Failed to get firmware entry for SDMA0\n"); 527 DRM_ERROR("Failed to get firmware entry for SDMA0\n");
@@ -550,8 +544,8 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
550 UCODE_ID_CP_ME_MASK | 544 UCODE_ID_CP_ME_MASK |
551 UCODE_ID_CP_PFP_MASK | 545 UCODE_ID_CP_PFP_MASK |
552 UCODE_ID_CP_MEC_MASK | 546 UCODE_ID_CP_MEC_MASK |
553 UCODE_ID_CP_MEC_JT1_MASK | 547 UCODE_ID_CP_MEC_JT1_MASK;
554 UCODE_ID_CP_MEC_JT2_MASK; 548
555 549
556 if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) { 550 if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
557 DRM_ERROR("Fail to request SMU load ucode\n"); 551 DRM_ERROR("Fail to request SMU load ucode\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 7e9154c7f1db..654d76723bc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2859,11 +2859,11 @@ static int kv_dpm_init(struct amdgpu_device *adev)
2859 pi->voltage_drop_t = 0; 2859 pi->voltage_drop_t = 0;
2860 pi->caps_sclk_throttle_low_notification = false; 2860 pi->caps_sclk_throttle_low_notification = false;
2861 pi->caps_fps = false; /* true? */ 2861 pi->caps_fps = false; /* true? */
2862 pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false; 2862 pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
2863 pi->caps_uvd_dpm = true; 2863 pi->caps_uvd_dpm = true;
2864 pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false; 2864 pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
2865 pi->caps_samu_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_SAMU) ? true : false; 2865 pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false;
2866 pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false; 2866 pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
2867 pi->caps_stable_p_state = false; 2867 pi->caps_stable_p_state = false;
2868 2868
2869 ret = kv_parse_sys_info_table(adev); 2869 ret = kv_parse_sys_info_table(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
index f4a1346525fe..0497784b3652 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
@@ -122,25 +122,12 @@ static int tonga_dpm_hw_fini(void *handle)
122 122
123static int tonga_dpm_suspend(void *handle) 123static int tonga_dpm_suspend(void *handle)
124{ 124{
125 return 0; 125 return tonga_dpm_hw_fini(handle);
126} 126}
127 127
128static int tonga_dpm_resume(void *handle) 128static int tonga_dpm_resume(void *handle)
129{ 129{
130 int ret; 130 return tonga_dpm_hw_init(handle);
131 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
132
133 mutex_lock(&adev->pm.mutex);
134
135 ret = tonga_smu_start(adev);
136 if (ret) {
137 DRM_ERROR("SMU start failed\n");
138 goto fail;
139 }
140
141fail:
142 mutex_unlock(&adev->pm.mutex);
143 return ret;
144} 131}
145 132
146static int tonga_dpm_set_clockgating_state(void *handle, 133static int tonga_dpm_set_clockgating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 5e9f73af83a8..fbd3767671bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -611,7 +611,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
611{ 611{
612 u32 orig, data; 612 u32 orig, data;
613 613
614 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) { 614 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
615 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 615 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
616 data = 0xfff; 616 data = 0xfff;
617 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 617 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
@@ -830,6 +830,9 @@ static int uvd_v4_2_set_clockgating_state(void *handle,
830 bool gate = false; 830 bool gate = false;
831 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 831 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
832 832
833 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
834 return 0;
835
833 if (state == AMD_CG_STATE_GATE) 836 if (state == AMD_CG_STATE_GATE)
834 gate = true; 837 gate = true;
835 838
@@ -848,7 +851,10 @@ static int uvd_v4_2_set_powergating_state(void *handle,
848 * revisit this when there is a cleaner line between 851 * revisit this when there is a cleaner line between
849 * the smc and the hw blocks 852 * the smc and the hw blocks
850 */ 853 */
851 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 854 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
855
856 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
857 return 0;
852 858
853 if (state == AMD_PG_STATE_GATE) { 859 if (state == AMD_PG_STATE_GATE) {
854 uvd_v4_2_stop(adev); 860 uvd_v4_2_stop(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 38864f562981..57f1c5bf3bf1 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -774,6 +774,11 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
774static int uvd_v5_0_set_clockgating_state(void *handle, 774static int uvd_v5_0_set_clockgating_state(void *handle,
775 enum amd_clockgating_state state) 775 enum amd_clockgating_state state)
776{ 776{
777 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
778
779 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
780 return 0;
781
777 return 0; 782 return 0;
778} 783}
779 784
@@ -789,6 +794,9 @@ static int uvd_v5_0_set_powergating_state(void *handle,
789 */ 794 */
790 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 795 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
791 796
797 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
798 return 0;
799
792 if (state == AMD_PG_STATE_GATE) { 800 if (state == AMD_PG_STATE_GATE) {
793 uvd_v5_0_stop(adev); 801 uvd_v5_0_stop(adev);
794 return 0; 802 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 3d5913926436..0b365b7651ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -532,7 +532,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
532 uvd_v6_0_mc_resume(adev); 532 uvd_v6_0_mc_resume(adev);
533 533
534 /* Set dynamic clock gating in S/W control mode */ 534 /* Set dynamic clock gating in S/W control mode */
535 if (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG) { 535 if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) {
536 if (adev->flags & AMD_IS_APU) 536 if (adev->flags & AMD_IS_APU)
537 cz_set_uvd_clock_gating_branches(adev, false); 537 cz_set_uvd_clock_gating_branches(adev, false);
538 else 538 else
@@ -1000,7 +1000,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
1000 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1000 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1001 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 1001 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1002 1002
1003 if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) 1003 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1004 return 0; 1004 return 0;
1005 1005
1006 if (enable) { 1006 if (enable) {
@@ -1030,6 +1030,9 @@ static int uvd_v6_0_set_powergating_state(void *handle,
1030 */ 1030 */
1031 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1031 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1032 1032
1033 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1034 return 0;
1035
1033 if (state == AMD_PG_STATE_GATE) { 1036 if (state == AMD_PG_STATE_GATE) {
1034 uvd_v6_0_stop(adev); 1037 uvd_v6_0_stop(adev);
1035 return 0; 1038 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 52ac7a8f1e58..a822edacfa95 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -373,7 +373,7 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
373{ 373{
374 bool sw_cg = false; 374 bool sw_cg = false;
375 375
376 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) { 376 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
377 if (sw_cg) 377 if (sw_cg)
378 vce_v2_0_set_sw_cg(adev, true); 378 vce_v2_0_set_sw_cg(adev, true);
379 else 379 else
@@ -608,6 +608,9 @@ static int vce_v2_0_set_powergating_state(void *handle,
608 */ 608 */
609 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 609 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
610 610
611 if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
612 return 0;
613
611 if (state == AMD_PG_STATE_GATE) 614 if (state == AMD_PG_STATE_GATE)
612 /* XXX do we need a vce_v2_0_stop()? */ 615 /* XXX do we need a vce_v2_0_stop()? */
613 return 0; 616 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index e99af81e4aec..d662fa9f9091 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -277,7 +277,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
277 WREG32_P(mmVCE_STATUS, 0, ~1); 277 WREG32_P(mmVCE_STATUS, 0, ~1);
278 278
279 /* Set Clock-Gating off */ 279 /* Set Clock-Gating off */
280 if (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG) 280 if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
281 vce_v3_0_set_vce_sw_clock_gating(adev, false); 281 vce_v3_0_set_vce_sw_clock_gating(adev, false);
282 282
283 if (r) { 283 if (r) {
@@ -676,7 +676,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
676 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 676 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
677 int i; 677 int i;
678 678
679 if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) 679 if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
680 return 0; 680 return 0;
681 681
682 mutex_lock(&adev->grbm_idx_mutex); 682 mutex_lock(&adev->grbm_idx_mutex);
@@ -728,6 +728,9 @@ static int vce_v3_0_set_powergating_state(void *handle,
728 */ 728 */
729 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 729 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
730 730
731 if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
732 return 0;
733
731 if (state == AMD_PG_STATE_GATE) 734 if (state == AMD_PG_STATE_GATE)
732 /* XXX do we need a vce_v3_0_stop()? */ 735 /* XXX do we need a vce_v3_0_stop()? */
733 return 0; 736 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 652e76644c31..0d14d108a6c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -61,6 +61,7 @@
61#include "vi.h" 61#include "vi.h"
62#include "vi_dpm.h" 62#include "vi_dpm.h"
63#include "gmc_v8_0.h" 63#include "gmc_v8_0.h"
64#include "gmc_v7_0.h"
64#include "gfx_v8_0.h" 65#include "gfx_v8_0.h"
65#include "sdma_v2_4.h" 66#include "sdma_v2_4.h"
66#include "sdma_v3_0.h" 67#include "sdma_v3_0.h"
@@ -1109,10 +1110,10 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
1109 }, 1110 },
1110 { 1111 {
1111 .type = AMD_IP_BLOCK_TYPE_GMC, 1112 .type = AMD_IP_BLOCK_TYPE_GMC,
1112 .major = 8, 1113 .major = 7,
1113 .minor = 0, 1114 .minor = 4,
1114 .rev = 0, 1115 .rev = 0,
1115 .funcs = &gmc_v8_0_ip_funcs, 1116 .funcs = &gmc_v7_0_ip_funcs,
1116 }, 1117 },
1117 { 1118 {
1118 .type = AMD_IP_BLOCK_TYPE_IH, 1119 .type = AMD_IP_BLOCK_TYPE_IH,
@@ -1442,8 +1443,7 @@ static int vi_common_early_init(void *handle)
1442 break; 1443 break;
1443 case CHIP_FIJI: 1444 case CHIP_FIJI:
1444 adev->has_uvd = true; 1445 adev->has_uvd = true;
1445 adev->cg_flags = AMDGPU_CG_SUPPORT_UVD_MGCG | 1446 adev->cg_flags = 0;
1446 AMDGPU_CG_SUPPORT_VCE_MGCG;
1447 adev->pg_flags = 0; 1447 adev->pg_flags = 0;
1448 adev->external_rev_id = adev->rev_id + 0x3c; 1448 adev->external_rev_id = adev->rev_id + 0x3c;
1449 break; 1449 break;
@@ -1457,8 +1457,7 @@ static int vi_common_early_init(void *handle)
1457 case CHIP_STONEY: 1457 case CHIP_STONEY:
1458 adev->has_uvd = true; 1458 adev->has_uvd = true;
1459 adev->cg_flags = 0; 1459 adev->cg_flags = 0;
1460 /* Disable UVD pg */ 1460 adev->pg_flags = 0;
1461 adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
1462 adev->external_rev_id = adev->rev_id + 0x1; 1461 adev->external_rev_id = adev->rev_id + 0x1;
1463 break; 1462 break;
1464 default: 1463 default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 9be007081b72..a902ae037398 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -194,7 +194,7 @@ static void kfd_process_wq_release(struct work_struct *work)
194 194
195 kfree(p); 195 kfree(p);
196 196
197 kfree((void *)work); 197 kfree(work);
198} 198}
199 199
200static void kfd_process_destroy_delayed(struct rcu_head *rcu) 200static void kfd_process_destroy_delayed(struct rcu_head *rcu)
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 1195d06f55bc..dbf7e6413cab 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -85,6 +85,38 @@ enum amd_powergating_state {
85 AMD_PG_STATE_UNGATE, 85 AMD_PG_STATE_UNGATE,
86}; 86};
87 87
88/* CG flags */
89#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
90#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)
91#define AMD_CG_SUPPORT_GFX_CGCG (1 << 2)
92#define AMD_CG_SUPPORT_GFX_CGLS (1 << 3)
93#define AMD_CG_SUPPORT_GFX_CGTS (1 << 4)
94#define AMD_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
95#define AMD_CG_SUPPORT_GFX_CP_LS (1 << 6)
96#define AMD_CG_SUPPORT_GFX_RLC_LS (1 << 7)
97#define AMD_CG_SUPPORT_MC_LS (1 << 8)
98#define AMD_CG_SUPPORT_MC_MGCG (1 << 9)
99#define AMD_CG_SUPPORT_SDMA_LS (1 << 10)
100#define AMD_CG_SUPPORT_SDMA_MGCG (1 << 11)
101#define AMD_CG_SUPPORT_BIF_LS (1 << 12)
102#define AMD_CG_SUPPORT_UVD_MGCG (1 << 13)
103#define AMD_CG_SUPPORT_VCE_MGCG (1 << 14)
104#define AMD_CG_SUPPORT_HDP_LS (1 << 15)
105#define AMD_CG_SUPPORT_HDP_MGCG (1 << 16)
106
107/* PG flags */
108#define AMD_PG_SUPPORT_GFX_PG (1 << 0)
109#define AMD_PG_SUPPORT_GFX_SMG (1 << 1)
110#define AMD_PG_SUPPORT_GFX_DMG (1 << 2)
111#define AMD_PG_SUPPORT_UVD (1 << 3)
112#define AMD_PG_SUPPORT_VCE (1 << 4)
113#define AMD_PG_SUPPORT_CP (1 << 5)
114#define AMD_PG_SUPPORT_GDS (1 << 6)
115#define AMD_PG_SUPPORT_RLC_SMU_HS (1 << 7)
116#define AMD_PG_SUPPORT_SDMA (1 << 8)
117#define AMD_PG_SUPPORT_ACP (1 << 9)
118#define AMD_PG_SUPPORT_SAMU (1 << 10)
119
88enum amd_pm_state_type { 120enum amd_pm_state_type {
89 /* not used for dpm */ 121 /* not used for dpm */
90 POWER_STATE_TYPE_DEFAULT, 122 POWER_STATE_TYPE_DEFAULT,
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 713aec954692..aec38fc3834f 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -109,6 +109,8 @@ enum cgs_system_info_id {
109 CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1, 109 CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1,
110 CGS_SYSTEM_INFO_PCIE_GEN_INFO, 110 CGS_SYSTEM_INFO_PCIE_GEN_INFO,
111 CGS_SYSTEM_INFO_PCIE_MLW, 111 CGS_SYSTEM_INFO_PCIE_MLW,
112 CGS_SYSTEM_INFO_CG_FLAGS,
113 CGS_SYSTEM_INFO_PG_FLAGS,
112 CGS_SYSTEM_INFO_ID_MAXIMUM, 114 CGS_SYSTEM_INFO_ID_MAXIMUM,
113}; 115};
114 116
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 8f5d5edcf193..589599f66fcc 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -64,6 +64,11 @@ static int pp_sw_init(void *handle)
64 if (ret == 0) 64 if (ret == 0)
65 ret = hwmgr->hwmgr_func->backend_init(hwmgr); 65 ret = hwmgr->hwmgr_func->backend_init(hwmgr);
66 66
67 if (ret)
68 printk("amdgpu: powerplay initialization failed\n");
69 else
70 printk("amdgpu: powerplay initialized\n");
71
67 return ret; 72 return ret;
68} 73}
69 74
@@ -397,8 +402,11 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
397 402
398 data.requested_ui_label = power_state_convert(ps); 403 data.requested_ui_label = power_state_convert(ps);
399 ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); 404 ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
405 break;
400 } 406 }
401 break; 407 case AMD_PP_EVENT_COMPLETE_INIT:
408 ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
409 break;
402 default: 410 default:
403 break; 411 break;
404 } 412 }
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 83be3cf210e0..6b52c78cb404 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -165,6 +165,7 @@ const struct action_chain resume_action_chain = {
165}; 165};
166 166
167static const pem_event_action *complete_init_event[] = { 167static const pem_event_action *complete_init_event[] = {
168 unblock_adjust_power_state_tasks,
168 adjust_power_state_tasks, 169 adjust_power_state_tasks,
169 enable_gfx_clock_gating_tasks, 170 enable_gfx_clock_gating_tasks,
170 enable_gfx_voltage_island_power_gating_tasks, 171 enable_gfx_voltage_island_power_gating_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
index 52a3efc97f05..46410e3c7349 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
@@ -31,7 +31,7 @@
31static int pem_init(struct pp_eventmgr *eventmgr) 31static int pem_init(struct pp_eventmgr *eventmgr)
32{ 32{
33 int result = 0; 33 int result = 0;
34 struct pem_event_data event_data; 34 struct pem_event_data event_data = { {0} };
35 35
36 /* Initialize PowerPlay feature info */ 36 /* Initialize PowerPlay feature info */
37 pem_init_feature_info(eventmgr); 37 pem_init_feature_info(eventmgr);
@@ -52,7 +52,7 @@ static int pem_init(struct pp_eventmgr *eventmgr)
52 52
53static void pem_fini(struct pp_eventmgr *eventmgr) 53static void pem_fini(struct pp_eventmgr *eventmgr)
54{ 54{
55 struct pem_event_data event_data; 55 struct pem_event_data event_data = { {0} };
56 56
57 pem_uninit_featureInfo(eventmgr); 57 pem_uninit_featureInfo(eventmgr);
58 pem_unregister_interrupts(eventmgr); 58 pem_unregister_interrupts(eventmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index ad7700822a1c..ff08ce41bde9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -226,7 +226,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
226 } 226 }
227 } else { 227 } else {
228 cz_dpm_update_vce_dpm(hwmgr); 228 cz_dpm_update_vce_dpm(hwmgr);
229 cz_enable_disable_vce_dpm(hwmgr, true); 229 cz_enable_disable_vce_dpm(hwmgr, !bgate);
230 return 0; 230 return 0;
231 } 231 }
232 232
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 0874ab42ee95..cf01177ca3b5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -174,6 +174,8 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
174{ 174{
175 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 175 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
176 uint32_t i; 176 uint32_t i;
177 struct cgs_system_info sys_info = {0};
178 int result;
177 179
178 cz_hwmgr->gfx_ramp_step = 256*25/100; 180 cz_hwmgr->gfx_ramp_step = 256*25/100;
179 181
@@ -247,6 +249,22 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
247 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 249 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
248 PHM_PlatformCaps_DisableVoltageIsland); 250 PHM_PlatformCaps_DisableVoltageIsland);
249 251
252 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
253 PHM_PlatformCaps_UVDPowerGating);
254 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
255 PHM_PlatformCaps_VCEPowerGating);
256 sys_info.size = sizeof(struct cgs_system_info);
257 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
258 result = cgs_query_system_info(hwmgr->device, &sys_info);
259 if (!result) {
260 if (sys_info.value & AMD_PG_SUPPORT_UVD)
261 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
262 PHM_PlatformCaps_UVDPowerGating);
263 if (sys_info.value & AMD_PG_SUPPORT_VCE)
264 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
265 PHM_PlatformCaps_VCEPowerGating);
266 }
267
250 return 0; 268 return 0;
251} 269}
252 270
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index 44a925006479..980d3bf8ea76 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -4451,6 +4451,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
4451 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; 4451 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
4452 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); 4452 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
4453 phw_tonga_ulv_parm *ulv; 4453 phw_tonga_ulv_parm *ulv;
4454 struct cgs_system_info sys_info = {0};
4454 4455
4455 PP_ASSERT_WITH_CODE((NULL != hwmgr), 4456 PP_ASSERT_WITH_CODE((NULL != hwmgr),
4456 "Invalid Parameter!", return -1;); 4457 "Invalid Parameter!", return -1;);
@@ -4615,9 +4616,23 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
4615 4616
4616 data->vddc_phase_shed_control = 0; 4617 data->vddc_phase_shed_control = 0;
4617 4618
4618 if (0 == result) { 4619 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4619 struct cgs_system_info sys_info = {0}; 4620 PHM_PlatformCaps_UVDPowerGating);
4621 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4622 PHM_PlatformCaps_VCEPowerGating);
4623 sys_info.size = sizeof(struct cgs_system_info);
4624 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
4625 result = cgs_query_system_info(hwmgr->device, &sys_info);
4626 if (!result) {
4627 if (sys_info.value & AMD_PG_SUPPORT_UVD)
4628 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4629 PHM_PlatformCaps_UVDPowerGating);
4630 if (sys_info.value & AMD_PG_SUPPORT_VCE)
4631 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4632 PHM_PlatformCaps_VCEPowerGating);
4633 }
4620 4634
4635 if (0 == result) {
4621 data->is_tlu_enabled = 0; 4636 data->is_tlu_enabled = 0;
4622 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = 4637 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
4623 TONGA_MAX_HARDWARE_POWERLEVELS; 4638 TONGA_MAX_HARDWARE_POWERLEVELS;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index 873a8d264d5c..ec222c665602 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -272,6 +272,9 @@ static int cz_start_smu(struct pp_smumgr *smumgr)
272 UCODE_ID_CP_MEC_JT1_MASK | 272 UCODE_ID_CP_MEC_JT1_MASK |
273 UCODE_ID_CP_MEC_JT2_MASK; 273 UCODE_ID_CP_MEC_JT2_MASK;
274 274
275 if (smumgr->chip_id == CHIP_STONEY)
276 fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
277
275 cz_request_smu_load_fw(smumgr); 278 cz_request_smu_load_fw(smumgr);
276 cz_check_fw_load_finish(smumgr, fw_to_check); 279 cz_check_fw_load_finish(smumgr, fw_to_check);
277 280
@@ -282,7 +285,7 @@ static int cz_start_smu(struct pp_smumgr *smumgr)
282 return ret; 285 return ret;
283} 286}
284 287
285static uint8_t cz_translate_firmware_enum_to_arg( 288static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
286 enum cz_scratch_entry firmware_enum) 289 enum cz_scratch_entry firmware_enum)
287{ 290{
288 uint8_t ret = 0; 291 uint8_t ret = 0;
@@ -292,7 +295,10 @@ static uint8_t cz_translate_firmware_enum_to_arg(
292 ret = UCODE_ID_SDMA0; 295 ret = UCODE_ID_SDMA0;
293 break; 296 break;
294 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1: 297 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
295 ret = UCODE_ID_SDMA1; 298 if (smumgr->chip_id == CHIP_STONEY)
299 ret = UCODE_ID_SDMA0;
300 else
301 ret = UCODE_ID_SDMA1;
296 break; 302 break;
297 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE: 303 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
298 ret = UCODE_ID_CP_CE; 304 ret = UCODE_ID_CP_CE;
@@ -307,7 +313,10 @@ static uint8_t cz_translate_firmware_enum_to_arg(
307 ret = UCODE_ID_CP_MEC_JT1; 313 ret = UCODE_ID_CP_MEC_JT1;
308 break; 314 break;
309 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2: 315 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
310 ret = UCODE_ID_CP_MEC_JT2; 316 if (smumgr->chip_id == CHIP_STONEY)
317 ret = UCODE_ID_CP_MEC_JT1;
318 else
319 ret = UCODE_ID_CP_MEC_JT2;
311 break; 320 break;
312 case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG: 321 case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
313 ret = UCODE_ID_GMCON_RENG; 322 ret = UCODE_ID_GMCON_RENG;
@@ -396,7 +405,7 @@ static int cz_smu_populate_single_scratch_task(
396 struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; 405 struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
397 406
398 task->type = type; 407 task->type = type;
399 task->arg = cz_translate_firmware_enum_to_arg(fw_enum); 408 task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
400 task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count; 409 task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
401 410
402 for (i = 0; i < cz_smu->scratch_buffer_length; i++) 411 for (i = 0; i < cz_smu->scratch_buffer_length; i++)
@@ -433,7 +442,7 @@ static int cz_smu_populate_single_ucode_load_task(
433 struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; 442 struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
434 443
435 task->type = TASK_TYPE_UCODE_LOAD; 444 task->type = TASK_TYPE_UCODE_LOAD;
436 task->arg = cz_translate_firmware_enum_to_arg(fw_enum); 445 task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
437 task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count; 446 task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
438 447
439 for (i = 0; i < cz_smu->driver_buffer_length; i++) 448 for (i = 0; i < cz_smu->driver_buffer_length; i++)
@@ -509,8 +518,14 @@ static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_smumgr *smumgr)
509 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); 518 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
510 cz_smu_populate_single_ucode_load_task(smumgr, 519 cz_smu_populate_single_ucode_load_task(smumgr,
511 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); 520 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
512 cz_smu_populate_single_ucode_load_task(smumgr, 521
522 if (smumgr->chip_id == CHIP_STONEY)
523 cz_smu_populate_single_ucode_load_task(smumgr,
524 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
525 else
526 cz_smu_populate_single_ucode_load_task(smumgr,
513 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); 527 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
528
514 cz_smu_populate_single_ucode_load_task(smumgr, 529 cz_smu_populate_single_ucode_load_task(smumgr,
515 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false); 530 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
516 531
@@ -551,7 +566,11 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
551 566
552 cz_smu_populate_single_ucode_load_task(smumgr, 567 cz_smu_populate_single_ucode_load_task(smumgr,
553 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); 568 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
554 cz_smu_populate_single_ucode_load_task(smumgr, 569 if (smumgr->chip_id == CHIP_STONEY)
570 cz_smu_populate_single_ucode_load_task(smumgr,
571 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
572 else
573 cz_smu_populate_single_ucode_load_task(smumgr,
555 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false); 574 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
556 cz_smu_populate_single_ucode_load_task(smumgr, 575 cz_smu_populate_single_ucode_load_task(smumgr,
557 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); 576 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
@@ -561,7 +580,11 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
561 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); 580 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
562 cz_smu_populate_single_ucode_load_task(smumgr, 581 cz_smu_populate_single_ucode_load_task(smumgr,
563 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); 582 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
564 cz_smu_populate_single_ucode_load_task(smumgr, 583 if (smumgr->chip_id == CHIP_STONEY)
584 cz_smu_populate_single_ucode_load_task(smumgr,
585 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
586 else
587 cz_smu_populate_single_ucode_load_task(smumgr,
565 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); 588 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
566 cz_smu_populate_single_ucode_load_task(smumgr, 589 cz_smu_populate_single_ucode_load_task(smumgr,
567 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true); 590 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
@@ -618,7 +641,7 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
618 641
619 for (i = 0; i < sizeof(firmware_list)/sizeof(*firmware_list); i++) { 642 for (i = 0; i < sizeof(firmware_list)/sizeof(*firmware_list); i++) {
620 643
621 firmware_type = cz_translate_firmware_enum_to_arg( 644 firmware_type = cz_translate_firmware_enum_to_arg(smumgr,
622 firmware_list[i]); 645 firmware_list[i]);
623 646
624 ucode_id = cz_convert_fw_type_to_cgs(firmware_type); 647 ucode_id = cz_convert_fw_type_to_cgs(firmware_type);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 9759009d1da3..b1480acbb3c3 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev)
227 } while (ast_read32(ast, 0x10000) != 0x01); 227 } while (ast_read32(ast, 0x10000) != 0x01);
228 data = ast_read32(ast, 0x10004); 228 data = ast_read32(ast, 0x10004);
229 229
230 if (data & 0x400) 230 if (data & 0x40)
231 ast->dram_bus_width = 16; 231 ast->dram_bus_width = 16;
232 else 232 else
233 ast->dram_bus_width = 32; 233 ast->dram_bus_width = 32;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3f74193885f1..9a7b44616b55 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -65,8 +65,6 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
65 */ 65 */
66 state->allow_modeset = true; 66 state->allow_modeset = true;
67 67
68 state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
69
70 state->crtcs = kcalloc(dev->mode_config.num_crtc, 68 state->crtcs = kcalloc(dev->mode_config.num_crtc,
71 sizeof(*state->crtcs), GFP_KERNEL); 69 sizeof(*state->crtcs), GFP_KERNEL);
72 if (!state->crtcs) 70 if (!state->crtcs)
@@ -83,16 +81,6 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
83 sizeof(*state->plane_states), GFP_KERNEL); 81 sizeof(*state->plane_states), GFP_KERNEL);
84 if (!state->plane_states) 82 if (!state->plane_states)
85 goto fail; 83 goto fail;
86 state->connectors = kcalloc(state->num_connector,
87 sizeof(*state->connectors),
88 GFP_KERNEL);
89 if (!state->connectors)
90 goto fail;
91 state->connector_states = kcalloc(state->num_connector,
92 sizeof(*state->connector_states),
93 GFP_KERNEL);
94 if (!state->connector_states)
95 goto fail;
96 84
97 state->dev = dev; 85 state->dev = dev;
98 86
@@ -823,19 +811,27 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
823 811
824 index = drm_connector_index(connector); 812 index = drm_connector_index(connector);
825 813
826 /*
827 * Construction of atomic state updates can race with a connector
828 * hot-add which might overflow. In this case flip the table and just
829 * restart the entire ioctl - no one is fast enough to livelock a cpu
830 * with physical hotplug events anyway.
831 *
832 * Note that we only grab the indexes once we have the right lock to
833 * prevent hotplug/unplugging of connectors. So removal is no problem,
834 * at most the array is a bit too large.
835 */
836 if (index >= state->num_connector) { 814 if (index >= state->num_connector) {
837 DRM_DEBUG_ATOMIC("Hot-added connector would overflow state array, restarting\n"); 815 struct drm_connector **c;
838 return ERR_PTR(-EAGAIN); 816 struct drm_connector_state **cs;
817 int alloc = max(index + 1, config->num_connector);
818
819 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
820 if (!c)
821 return ERR_PTR(-ENOMEM);
822
823 state->connectors = c;
824 memset(&state->connectors[state->num_connector], 0,
825 sizeof(*state->connectors) * (alloc - state->num_connector));
826
827 cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), GFP_KERNEL);
828 if (!cs)
829 return ERR_PTR(-ENOMEM);
830
831 state->connector_states = cs;
832 memset(&state->connector_states[state->num_connector], 0,
833 sizeof(*state->connector_states) * (alloc - state->num_connector));
834 state->num_connector = alloc;
839 } 835 }
840 836
841 if (state->connector_states[index]) 837 if (state->connector_states[index])
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 57cccd68ca52..4f2d3e161593 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -946,9 +946,23 @@ static void wait_for_fences(struct drm_device *dev,
946 } 946 }
947} 947}
948 948
949static bool framebuffer_changed(struct drm_device *dev, 949/**
950 struct drm_atomic_state *old_state, 950 * drm_atomic_helper_framebuffer_changed - check if framebuffer has changed
951 struct drm_crtc *crtc) 951 * @dev: DRM device
952 * @old_state: atomic state object with old state structures
953 * @crtc: DRM crtc
954 *
955 * Checks whether the framebuffer used for this CRTC changes as a result of
956 * the atomic update. This is useful for drivers which cannot use
957 * drm_atomic_helper_wait_for_vblanks() and need to reimplement its
958 * functionality.
959 *
960 * Returns:
961 * true if the framebuffer changed.
962 */
963bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
964 struct drm_atomic_state *old_state,
965 struct drm_crtc *crtc)
952{ 966{
953 struct drm_plane *plane; 967 struct drm_plane *plane;
954 struct drm_plane_state *old_plane_state; 968 struct drm_plane_state *old_plane_state;
@@ -965,6 +979,7 @@ static bool framebuffer_changed(struct drm_device *dev,
965 979
966 return false; 980 return false;
967} 981}
982EXPORT_SYMBOL(drm_atomic_helper_framebuffer_changed);
968 983
969/** 984/**
970 * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs 985 * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
@@ -999,7 +1014,8 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
999 if (old_state->legacy_cursor_update) 1014 if (old_state->legacy_cursor_update)
1000 continue; 1015 continue;
1001 1016
1002 if (!framebuffer_changed(dev, old_state, crtc)) 1017 if (!drm_atomic_helper_framebuffer_changed(dev,
1018 old_state, crtc))
1003 continue; 1019 continue;
1004 1020
1005 ret = drm_crtc_vblank_get(crtc); 1021 ret = drm_crtc_vblank_get(crtc);
@@ -1477,7 +1493,7 @@ void drm_atomic_helper_swap_state(struct drm_device *dev,
1477{ 1493{
1478 int i; 1494 int i;
1479 1495
1480 for (i = 0; i < dev->mode_config.num_connector; i++) { 1496 for (i = 0; i < state->num_connector; i++) {
1481 struct drm_connector *connector = state->connectors[i]; 1497 struct drm_connector *connector = state->connectors[i];
1482 1498
1483 if (!connector) 1499 if (!connector)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d40bab29747e..f6191215b2cb 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -918,12 +918,19 @@ int drm_connector_init(struct drm_device *dev,
918 connector->base.properties = &connector->properties; 918 connector->base.properties = &connector->properties;
919 connector->dev = dev; 919 connector->dev = dev;
920 connector->funcs = funcs; 920 connector->funcs = funcs;
921
922 connector->connector_id = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
923 if (connector->connector_id < 0) {
924 ret = connector->connector_id;
925 goto out_put;
926 }
927
921 connector->connector_type = connector_type; 928 connector->connector_type = connector_type;
922 connector->connector_type_id = 929 connector->connector_type_id =
923 ida_simple_get(connector_ida, 1, 0, GFP_KERNEL); 930 ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
924 if (connector->connector_type_id < 0) { 931 if (connector->connector_type_id < 0) {
925 ret = connector->connector_type_id; 932 ret = connector->connector_type_id;
926 goto out_put; 933 goto out_put_id;
927 } 934 }
928 connector->name = 935 connector->name =
929 kasprintf(GFP_KERNEL, "%s-%d", 936 kasprintf(GFP_KERNEL, "%s-%d",
@@ -931,7 +938,7 @@ int drm_connector_init(struct drm_device *dev,
931 connector->connector_type_id); 938 connector->connector_type_id);
932 if (!connector->name) { 939 if (!connector->name) {
933 ret = -ENOMEM; 940 ret = -ENOMEM;
934 goto out_put; 941 goto out_put_type_id;
935 } 942 }
936 943
937 INIT_LIST_HEAD(&connector->probed_modes); 944 INIT_LIST_HEAD(&connector->probed_modes);
@@ -959,7 +966,12 @@ int drm_connector_init(struct drm_device *dev,
959 } 966 }
960 967
961 connector->debugfs_entry = NULL; 968 connector->debugfs_entry = NULL;
962 969out_put_type_id:
970 if (ret)
971 ida_remove(connector_ida, connector->connector_type_id);
972out_put_id:
973 if (ret)
974 ida_remove(&config->connector_ida, connector->connector_id);
963out_put: 975out_put:
964 if (ret) 976 if (ret)
965 drm_mode_object_put(dev, &connector->base); 977 drm_mode_object_put(dev, &connector->base);
@@ -996,6 +1008,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
996 ida_remove(&drm_connector_enum_list[connector->connector_type].ida, 1008 ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
997 connector->connector_type_id); 1009 connector->connector_type_id);
998 1010
1011 ida_remove(&dev->mode_config.connector_ida,
1012 connector->connector_id);
1013
999 kfree(connector->display_info.bus_formats); 1014 kfree(connector->display_info.bus_formats);
1000 drm_mode_object_put(dev, &connector->base); 1015 drm_mode_object_put(dev, &connector->base);
1001 kfree(connector->name); 1016 kfree(connector->name);
@@ -1013,32 +1028,6 @@ void drm_connector_cleanup(struct drm_connector *connector)
1013EXPORT_SYMBOL(drm_connector_cleanup); 1028EXPORT_SYMBOL(drm_connector_cleanup);
1014 1029
1015/** 1030/**
1016 * drm_connector_index - find the index of a registered connector
1017 * @connector: connector to find index for
1018 *
1019 * Given a registered connector, return the index of that connector within a DRM
1020 * device's list of connectors.
1021 */
1022unsigned int drm_connector_index(struct drm_connector *connector)
1023{
1024 unsigned int index = 0;
1025 struct drm_connector *tmp;
1026 struct drm_mode_config *config = &connector->dev->mode_config;
1027
1028 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
1029
1030 drm_for_each_connector(tmp, connector->dev) {
1031 if (tmp == connector)
1032 return index;
1033
1034 index++;
1035 }
1036
1037 BUG();
1038}
1039EXPORT_SYMBOL(drm_connector_index);
1040
1041/**
1042 * drm_connector_register - register a connector 1031 * drm_connector_register - register a connector
1043 * @connector: the connector to register 1032 * @connector: the connector to register
1044 * 1033 *
@@ -5789,6 +5778,7 @@ void drm_mode_config_init(struct drm_device *dev)
5789 INIT_LIST_HEAD(&dev->mode_config.plane_list); 5778 INIT_LIST_HEAD(&dev->mode_config.plane_list);
5790 idr_init(&dev->mode_config.crtc_idr); 5779 idr_init(&dev->mode_config.crtc_idr);
5791 idr_init(&dev->mode_config.tile_idr); 5780 idr_init(&dev->mode_config.tile_idr);
5781 ida_init(&dev->mode_config.connector_ida);
5792 5782
5793 drm_modeset_lock_all(dev); 5783 drm_modeset_lock_all(dev);
5794 drm_mode_create_standard_properties(dev); 5784 drm_mode_create_standard_properties(dev);
@@ -5869,6 +5859,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
5869 crtc->funcs->destroy(crtc); 5859 crtc->funcs->destroy(crtc);
5870 } 5860 }
5871 5861
5862 ida_destroy(&dev->mode_config.connector_ida);
5872 idr_destroy(&dev->mode_config.tile_idr); 5863 idr_destroy(&dev->mode_config.tile_idr);
5873 idr_destroy(&dev->mode_config.crtc_idr); 5864 idr_destroy(&dev->mode_config.crtc_idr);
5874 drm_modeset_lock_fini(&dev->mode_config.connection_mutex); 5865 drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 6ed90a2437e5..27fbd79d0daf 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -803,6 +803,18 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
803 return mstb; 803 return mstb;
804} 804}
805 805
806static void drm_dp_free_mst_port(struct kref *kref);
807
808static void drm_dp_free_mst_branch_device(struct kref *kref)
809{
810 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
811 if (mstb->port_parent) {
812 if (list_empty(&mstb->port_parent->next))
813 kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
814 }
815 kfree(mstb);
816}
817
806static void drm_dp_destroy_mst_branch_device(struct kref *kref) 818static void drm_dp_destroy_mst_branch_device(struct kref *kref)
807{ 819{
808 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); 820 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
@@ -810,6 +822,15 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
810 bool wake_tx = false; 822 bool wake_tx = false;
811 823
812 /* 824 /*
825 * init kref again to be used by ports to remove mst branch when it is
826 * not needed anymore
827 */
828 kref_init(kref);
829
830 if (mstb->port_parent && list_empty(&mstb->port_parent->next))
831 kref_get(&mstb->port_parent->kref);
832
833 /*
813 * destroy all ports - don't need lock 834 * destroy all ports - don't need lock
814 * as there are no more references to the mst branch 835 * as there are no more references to the mst branch
815 * device at this point. 836 * device at this point.
@@ -835,7 +856,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
835 856
836 if (wake_tx) 857 if (wake_tx)
837 wake_up(&mstb->mgr->tx_waitq); 858 wake_up(&mstb->mgr->tx_waitq);
838 kfree(mstb); 859
860 kref_put(kref, drm_dp_free_mst_branch_device);
839} 861}
840 862
841static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb) 863static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
@@ -883,6 +905,7 @@ static void drm_dp_destroy_port(struct kref *kref)
883 * from an EDID retrieval */ 905 * from an EDID retrieval */
884 906
885 mutex_lock(&mgr->destroy_connector_lock); 907 mutex_lock(&mgr->destroy_connector_lock);
908 kref_get(&port->parent->kref);
886 list_add(&port->next, &mgr->destroy_connector_list); 909 list_add(&port->next, &mgr->destroy_connector_list);
887 mutex_unlock(&mgr->destroy_connector_lock); 910 mutex_unlock(&mgr->destroy_connector_lock);
888 schedule_work(&mgr->destroy_connector_work); 911 schedule_work(&mgr->destroy_connector_work);
@@ -1018,18 +1041,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1018 return send_link; 1041 return send_link;
1019} 1042}
1020 1043
1021static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, 1044static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1022 struct drm_dp_mst_port *port)
1023{ 1045{
1024 int ret; 1046 int ret;
1025 if (port->dpcd_rev >= 0x12) { 1047
1026 port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid); 1048 memcpy(mstb->guid, guid, 16);
1027 if (!port->guid_valid) { 1049
1028 ret = drm_dp_send_dpcd_write(mstb->mgr, 1050 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1029 port, 1051 if (mstb->port_parent) {
1030 DP_GUID, 1052 ret = drm_dp_send_dpcd_write(
1031 16, port->guid); 1053 mstb->mgr,
1032 port->guid_valid = true; 1054 mstb->port_parent,
1055 DP_GUID,
1056 16,
1057 mstb->guid);
1058 } else {
1059
1060 ret = drm_dp_dpcd_write(
1061 mstb->mgr->aux,
1062 DP_GUID,
1063 mstb->guid,
1064 16);
1033 } 1065 }
1034 } 1066 }
1035} 1067}
@@ -1086,7 +1118,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1086 port->dpcd_rev = port_msg->dpcd_revision; 1118 port->dpcd_rev = port_msg->dpcd_revision;
1087 port->num_sdp_streams = port_msg->num_sdp_streams; 1119 port->num_sdp_streams = port_msg->num_sdp_streams;
1088 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; 1120 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1089 memcpy(port->guid, port_msg->peer_guid, 16);
1090 1121
1091 /* manage mstb port lists with mgr lock - take a reference 1122 /* manage mstb port lists with mgr lock - take a reference
1092 for this list */ 1123 for this list */
@@ -1099,11 +1130,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1099 1130
1100 if (old_ddps != port->ddps) { 1131 if (old_ddps != port->ddps) {
1101 if (port->ddps) { 1132 if (port->ddps) {
1102 drm_dp_check_port_guid(mstb, port);
1103 if (!port->input) 1133 if (!port->input)
1104 drm_dp_send_enum_path_resources(mstb->mgr, mstb, port); 1134 drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
1105 } else { 1135 } else {
1106 port->guid_valid = false;
1107 port->available_pbn = 0; 1136 port->available_pbn = 0;
1108 } 1137 }
1109 } 1138 }
@@ -1162,10 +1191,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1162 1191
1163 if (old_ddps != port->ddps) { 1192 if (old_ddps != port->ddps) {
1164 if (port->ddps) { 1193 if (port->ddps) {
1165 drm_dp_check_port_guid(mstb, port);
1166 dowork = true; 1194 dowork = true;
1167 } else { 1195 } else {
1168 port->guid_valid = false;
1169 port->available_pbn = 0; 1196 port->available_pbn = 0;
1170 } 1197 }
1171 } 1198 }
@@ -1222,13 +1249,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1222 struct drm_dp_mst_branch *found_mstb; 1249 struct drm_dp_mst_branch *found_mstb;
1223 struct drm_dp_mst_port *port; 1250 struct drm_dp_mst_port *port;
1224 1251
1252 if (memcmp(mstb->guid, guid, 16) == 0)
1253 return mstb;
1254
1255
1225 list_for_each_entry(port, &mstb->ports, next) { 1256 list_for_each_entry(port, &mstb->ports, next) {
1226 if (!port->mstb) 1257 if (!port->mstb)
1227 continue; 1258 continue;
1228 1259
1229 if (port->guid_valid && memcmp(port->guid, guid, 16) == 0)
1230 return port->mstb;
1231
1232 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); 1260 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1233 1261
1234 if (found_mstb) 1262 if (found_mstb)
@@ -1247,10 +1275,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
1247 /* find the port by iterating down */ 1275 /* find the port by iterating down */
1248 mutex_lock(&mgr->lock); 1276 mutex_lock(&mgr->lock);
1249 1277
1250 if (mgr->guid_valid && memcmp(mgr->guid, guid, 16) == 0) 1278 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1251 mstb = mgr->mst_primary;
1252 else
1253 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1254 1279
1255 if (mstb) 1280 if (mstb)
1256 kref_get(&mstb->kref); 1281 kref_get(&mstb->kref);
@@ -1555,6 +1580,9 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1555 txmsg->reply.u.link_addr.ports[i].num_sdp_streams, 1580 txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
1556 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks); 1581 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
1557 } 1582 }
1583
1584 drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
1585
1558 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { 1586 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1559 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); 1587 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1560 } 1588 }
@@ -1602,6 +1630,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
1602 return 0; 1630 return 0;
1603} 1631}
1604 1632
1633static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
1634{
1635 if (!mstb->port_parent)
1636 return NULL;
1637
1638 if (mstb->port_parent->mstb != mstb)
1639 return mstb->port_parent;
1640
1641 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
1642}
1643
1644static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
1645 struct drm_dp_mst_branch *mstb,
1646 int *port_num)
1647{
1648 struct drm_dp_mst_branch *rmstb = NULL;
1649 struct drm_dp_mst_port *found_port;
1650 mutex_lock(&mgr->lock);
1651 if (mgr->mst_primary) {
1652 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
1653
1654 if (found_port) {
1655 rmstb = found_port->parent;
1656 kref_get(&rmstb->kref);
1657 *port_num = found_port->port_num;
1658 }
1659 }
1660 mutex_unlock(&mgr->lock);
1661 return rmstb;
1662}
1663
1605static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, 1664static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1606 struct drm_dp_mst_port *port, 1665 struct drm_dp_mst_port *port,
1607 int id, 1666 int id,
@@ -1609,13 +1668,18 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1609{ 1668{
1610 struct drm_dp_sideband_msg_tx *txmsg; 1669 struct drm_dp_sideband_msg_tx *txmsg;
1611 struct drm_dp_mst_branch *mstb; 1670 struct drm_dp_mst_branch *mstb;
1612 int len, ret; 1671 int len, ret, port_num;
1613 u8 sinks[DRM_DP_MAX_SDP_STREAMS]; 1672 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
1614 int i; 1673 int i;
1615 1674
1675 port_num = port->port_num;
1616 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 1676 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1617 if (!mstb) 1677 if (!mstb) {
1618 return -EINVAL; 1678 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
1679
1680 if (!mstb)
1681 return -EINVAL;
1682 }
1619 1683
1620 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1684 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1621 if (!txmsg) { 1685 if (!txmsg) {
@@ -1627,7 +1691,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1627 sinks[i] = i; 1691 sinks[i] = i;
1628 1692
1629 txmsg->dst = mstb; 1693 txmsg->dst = mstb;
1630 len = build_allocate_payload(txmsg, port->port_num, 1694 len = build_allocate_payload(txmsg, port_num,
1631 id, 1695 id,
1632 pbn, port->num_sdp_streams, sinks); 1696 pbn, port->num_sdp_streams, sinks);
1633 1697
@@ -1983,31 +2047,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
1983 mgr->mst_primary = mstb; 2047 mgr->mst_primary = mstb;
1984 kref_get(&mgr->mst_primary->kref); 2048 kref_get(&mgr->mst_primary->kref);
1985 2049
1986 {
1987 struct drm_dp_payload reset_pay;
1988 reset_pay.start_slot = 0;
1989 reset_pay.num_slots = 0x3f;
1990 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
1991 }
1992
1993 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 2050 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1994 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); 2051 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
1995 if (ret < 0) { 2052 if (ret < 0) {
1996 goto out_unlock; 2053 goto out_unlock;
1997 } 2054 }
1998 2055
1999 2056 {
2000 /* sort out guid */ 2057 struct drm_dp_payload reset_pay;
2001 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16); 2058 reset_pay.start_slot = 0;
2002 if (ret != 16) { 2059 reset_pay.num_slots = 0x3f;
2003 DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret); 2060 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2004 goto out_unlock;
2005 }
2006
2007 mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
2008 if (!mgr->guid_valid) {
2009 ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
2010 mgr->guid_valid = true;
2011 } 2061 }
2012 2062
2013 queue_work(system_long_wq, &mgr->work); 2063 queue_work(system_long_wq, &mgr->work);
@@ -2231,6 +2281,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2231 } 2281 }
2232 2282
2233 drm_dp_update_port(mstb, &msg.u.conn_stat); 2283 drm_dp_update_port(mstb, &msg.u.conn_stat);
2284
2234 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); 2285 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2235 (*mgr->cbs->hotplug)(mgr); 2286 (*mgr->cbs->hotplug)(mgr);
2236 2287
@@ -2446,6 +2497,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
2446 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn); 2497 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
2447 if (pbn == port->vcpi.pbn) { 2498 if (pbn == port->vcpi.pbn) {
2448 *slots = port->vcpi.num_slots; 2499 *slots = port->vcpi.num_slots;
2500 drm_dp_put_port(port);
2449 return true; 2501 return true;
2450 } 2502 }
2451 } 2503 }
@@ -2605,32 +2657,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
2605 */ 2657 */
2606int drm_dp_calc_pbn_mode(int clock, int bpp) 2658int drm_dp_calc_pbn_mode(int clock, int bpp)
2607{ 2659{
2608 fixed20_12 pix_bw; 2660 u64 kbps;
2609 fixed20_12 fbpp; 2661 s64 peak_kbps;
2610 fixed20_12 result; 2662 u32 numerator;
2611 fixed20_12 margin, tmp; 2663 u32 denominator;
2612 u32 res; 2664
2613 2665 kbps = clock * bpp;
2614 pix_bw.full = dfixed_const(clock); 2666
2615 fbpp.full = dfixed_const(bpp); 2667 /*
2616 tmp.full = dfixed_const(8); 2668 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
2617 fbpp.full = dfixed_div(fbpp, tmp); 2669 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
2618 2670 * common multiplier to render an integer PBN for all link rate/lane
2619 result.full = dfixed_mul(pix_bw, fbpp); 2671 * counts combinations
2620 margin.full = dfixed_const(54); 2672 * calculate
2621 tmp.full = dfixed_const(64); 2673 * peak_kbps *= (1006/1000)
2622 margin.full = dfixed_div(margin, tmp); 2674 * peak_kbps *= (64/54)
2623 result.full = dfixed_div(result, margin); 2675 * peak_kbps *= 8 convert to bytes
2624 2676 */
2625 margin.full = dfixed_const(1006); 2677
2626 tmp.full = dfixed_const(1000); 2678 numerator = 64 * 1006;
2627 margin.full = dfixed_div(margin, tmp); 2679 denominator = 54 * 8 * 1000 * 1000;
2628 result.full = dfixed_mul(result, margin); 2680
2629 2681 kbps *= numerator;
2630 result.full = dfixed_div(result, tmp); 2682 peak_kbps = drm_fixp_from_fraction(kbps, denominator);
2631 result.full = dfixed_ceil(result); 2683
2632 res = dfixed_trunc(result); 2684 return drm_fixp2int_ceil(peak_kbps);
2633 return res;
2634} 2685}
2635EXPORT_SYMBOL(drm_dp_calc_pbn_mode); 2686EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2636 2687
@@ -2638,11 +2689,23 @@ static int test_calc_pbn_mode(void)
2638{ 2689{
2639 int ret; 2690 int ret;
2640 ret = drm_dp_calc_pbn_mode(154000, 30); 2691 ret = drm_dp_calc_pbn_mode(154000, 30);
2641 if (ret != 689) 2692 if (ret != 689) {
2693 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2694 154000, 30, 689, ret);
2642 return -EINVAL; 2695 return -EINVAL;
2696 }
2643 ret = drm_dp_calc_pbn_mode(234000, 30); 2697 ret = drm_dp_calc_pbn_mode(234000, 30);
2644 if (ret != 1047) 2698 if (ret != 1047) {
2699 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2700 234000, 30, 1047, ret);
2701 return -EINVAL;
2702 }
2703 ret = drm_dp_calc_pbn_mode(297000, 24);
2704 if (ret != 1063) {
2705 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2706 297000, 24, 1063, ret);
2645 return -EINVAL; 2707 return -EINVAL;
2708 }
2646 return 0; 2709 return 0;
2647} 2710}
2648 2711
@@ -2783,6 +2846,13 @@ static void drm_dp_tx_work(struct work_struct *work)
2783 mutex_unlock(&mgr->qlock); 2846 mutex_unlock(&mgr->qlock);
2784} 2847}
2785 2848
2849static void drm_dp_free_mst_port(struct kref *kref)
2850{
2851 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
2852 kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
2853 kfree(port);
2854}
2855
2786static void drm_dp_destroy_connector_work(struct work_struct *work) 2856static void drm_dp_destroy_connector_work(struct work_struct *work)
2787{ 2857{
2788 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); 2858 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
@@ -2803,13 +2873,22 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
2803 list_del(&port->next); 2873 list_del(&port->next);
2804 mutex_unlock(&mgr->destroy_connector_lock); 2874 mutex_unlock(&mgr->destroy_connector_lock);
2805 2875
2876 kref_init(&port->kref);
2877 INIT_LIST_HEAD(&port->next);
2878
2806 mgr->cbs->destroy_connector(mgr, port->connector); 2879 mgr->cbs->destroy_connector(mgr, port->connector);
2807 2880
2808 drm_dp_port_teardown_pdt(port, port->pdt); 2881 drm_dp_port_teardown_pdt(port, port->pdt);
2809 2882
2810 if (!port->input && port->vcpi.vcpi > 0) 2883 if (!port->input && port->vcpi.vcpi > 0) {
2811 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 2884 if (mgr->mst_state) {
2812 kfree(port); 2885 drm_dp_mst_reset_vcpi_slots(mgr, port);
2886 drm_dp_update_payload_part1(mgr);
2887 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2888 }
2889 }
2890
2891 kref_put(&port->kref, drm_dp_free_mst_port);
2813 send_hotplug = true; 2892 send_hotplug = true;
2814 } 2893 }
2815 if (send_hotplug) 2894 if (send_hotplug)
@@ -2847,6 +2926,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2847 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; 2926 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
2848 mgr->max_payloads = max_payloads; 2927 mgr->max_payloads = max_payloads;
2849 mgr->conn_base_id = conn_base_id; 2928 mgr->conn_base_id = conn_base_id;
2929 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
2930 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
2931 return -EINVAL;
2850 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL); 2932 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
2851 if (!mgr->payloads) 2933 if (!mgr->payloads)
2852 return -ENOMEM; 2934 return -ENOMEM;
@@ -2854,7 +2936,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2854 if (!mgr->proposed_vcpis) 2936 if (!mgr->proposed_vcpis)
2855 return -ENOMEM; 2937 return -ENOMEM;
2856 set_bit(0, &mgr->payload_mask); 2938 set_bit(0, &mgr->payload_mask);
2857 test_calc_pbn_mode(); 2939 if (test_calc_pbn_mode() < 0)
2940 DRM_ERROR("MST PBN self-test failed\n");
2941
2858 return 0; 2942 return 0;
2859} 2943}
2860EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); 2944EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index d12a4efa651b..1fe14579e8c9 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -224,6 +224,64 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
224 diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0; 224 diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
225 } 225 }
226 226
227 /*
228 * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
229 * interval? If so then vblank irqs keep running and it will likely
230 * happen that the hardware vblank counter is not trustworthy as it
231 * might reset at some point in that interval and vblank timestamps
232 * are not trustworthy either in that interval. Iow. this can result
233 * in a bogus diff >> 1 which must be avoided as it would cause
234 * random large forward jumps of the software vblank counter.
235 */
236 if (diff > 1 && (vblank->inmodeset & 0x2)) {
237 DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
238 " due to pre-modeset.\n", pipe, diff);
239 diff = 1;
240 }
241
242 /*
243 * FIMXE: Need to replace this hack with proper seqlocks.
244 *
245 * Restrict the bump of the software vblank counter to a safe maximum
246 * value of +1 whenever there is the possibility that concurrent readers
247 * of vblank timestamps could be active at the moment, as the current
248 * implementation of the timestamp caching and updating is not safe
249 * against concurrent readers for calls to store_vblank() with a bump
250 * of anything but +1. A bump != 1 would very likely return corrupted
251 * timestamps to userspace, because the same slot in the cache could
252 * be concurrently written by store_vblank() and read by one of those
253 * readers without the read-retry logic detecting the collision.
254 *
255 * Concurrent readers can exist when we are called from the
256 * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
257 * irq callers. However, all those calls to us are happening with the
258 * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
259 * can't increase while we are executing. Therefore a zero refcount at
260 * this point is safe for arbitrary counter bumps if we are called
261 * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
262 * we must also accept a refcount of 1, as whenever we are called from
263 * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
264 * we must let that one pass through in order to not lose vblank counts
265 * during vblank irq off - which would completely defeat the whole
266 * point of this routine.
267 *
268 * Whenever we are called from vblank irq, we have to assume concurrent
269 * readers exist or can show up any time during our execution, even if
270 * the refcount is currently zero, as vblank irqs are usually only
271 * enabled due to the presence of readers, and because when we are called
272 * from vblank irq we can't hold the vbl_lock to protect us from sudden
273 * bumps in vblank refcount. Therefore also restrict bumps to +1 when
274 * called from vblank irq.
275 */
276 if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
277 (flags & DRM_CALLED_FROM_VBLIRQ))) {
278 DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
279 "refcount %u, vblirq %u\n", pipe, diff,
280 atomic_read(&vblank->refcount),
281 (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
282 diff = 1;
283 }
284
227 DRM_DEBUG_VBL("updating vblank count on crtc %u:" 285 DRM_DEBUG_VBL("updating vblank count on crtc %u:"
228 " current=%u, diff=%u, hw=%u hw_last=%u\n", 286 " current=%u, diff=%u, hw=%u hw_last=%u\n",
229 pipe, vblank->count, diff, cur_vblank, vblank->last); 287 pipe, vblank->count, diff, cur_vblank, vblank->last);
@@ -1316,7 +1374,13 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
1316 spin_lock_irqsave(&dev->event_lock, irqflags); 1374 spin_lock_irqsave(&dev->event_lock, irqflags);
1317 1375
1318 spin_lock(&dev->vbl_lock); 1376 spin_lock(&dev->vbl_lock);
1319 vblank_disable_and_save(dev, pipe); 1377 DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
1378 pipe, vblank->enabled, vblank->inmodeset);
1379
1380 /* Avoid redundant vblank disables without previous drm_vblank_on(). */
1381 if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
1382 vblank_disable_and_save(dev, pipe);
1383
1320 wake_up(&vblank->queue); 1384 wake_up(&vblank->queue);
1321 1385
1322 /* 1386 /*
@@ -1418,6 +1482,9 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
1418 return; 1482 return;
1419 1483
1420 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1484 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1485 DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
1486 pipe, vblank->enabled, vblank->inmodeset);
1487
1421 /* Drop our private "prevent drm_vblank_get" refcount */ 1488 /* Drop our private "prevent drm_vblank_get" refcount */
1422 if (vblank->inmodeset) { 1489 if (vblank->inmodeset) {
1423 atomic_dec(&vblank->refcount); 1490 atomic_dec(&vblank->refcount);
@@ -1430,8 +1497,7 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
1430 * re-enable interrupts if there are users left, or the 1497 * re-enable interrupts if there are users left, or the
1431 * user wishes vblank interrupts to be enabled all the time. 1498 * user wishes vblank interrupts to be enabled all the time.
1432 */ 1499 */
1433 if (atomic_read(&vblank->refcount) != 0 || 1500 if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
1434 (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
1435 WARN_ON(drm_vblank_enable(dev, pipe)); 1501 WARN_ON(drm_vblank_enable(dev, pipe));
1436 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1502 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1437} 1503}
@@ -1526,6 +1592,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
1526 if (vblank->inmodeset) { 1592 if (vblank->inmodeset) {
1527 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1593 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1528 dev->vblank_disable_allowed = true; 1594 dev->vblank_disable_allowed = true;
1595 drm_reset_vblank_timestamp(dev, pipe);
1529 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1596 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1530 1597
1531 if (vblank->inmodeset & 0x2) 1598 if (vblank->inmodeset & 0x2)
diff --git a/drivers/gpu/drm/etnaviv/common.xml.h b/drivers/gpu/drm/etnaviv/common.xml.h
index 9e585d51fb78..e881482b5971 100644
--- a/drivers/gpu/drm/etnaviv/common.xml.h
+++ b/drivers/gpu/drm/etnaviv/common.xml.h
@@ -8,8 +8,8 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng 8git clone git://0x04.net/rules-ng-ng
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- state_vg.xml ( 5973 bytes, from 2015-03-25 11:26:01) 11- state_hi.xml ( 24309 bytes, from 2015-12-12 09:02:53)
12- common.xml ( 18437 bytes, from 2015-03-25 11:27:41) 12- common.xml ( 18379 bytes, from 2015-12-12 09:02:53)
13 13
14Copyright (C) 2015 14Copyright (C) 2015
15*/ 15*/
@@ -30,15 +30,19 @@ Copyright (C) 2015
30#define ENDIAN_MODE_NO_SWAP 0x00000000 30#define ENDIAN_MODE_NO_SWAP 0x00000000
31#define ENDIAN_MODE_SWAP_16 0x00000001 31#define ENDIAN_MODE_SWAP_16 0x00000001
32#define ENDIAN_MODE_SWAP_32 0x00000002 32#define ENDIAN_MODE_SWAP_32 0x00000002
33#define chipModel_GC200 0x00000200
33#define chipModel_GC300 0x00000300 34#define chipModel_GC300 0x00000300
34#define chipModel_GC320 0x00000320 35#define chipModel_GC320 0x00000320
36#define chipModel_GC328 0x00000328
35#define chipModel_GC350 0x00000350 37#define chipModel_GC350 0x00000350
36#define chipModel_GC355 0x00000355 38#define chipModel_GC355 0x00000355
37#define chipModel_GC400 0x00000400 39#define chipModel_GC400 0x00000400
38#define chipModel_GC410 0x00000410 40#define chipModel_GC410 0x00000410
39#define chipModel_GC420 0x00000420 41#define chipModel_GC420 0x00000420
42#define chipModel_GC428 0x00000428
40#define chipModel_GC450 0x00000450 43#define chipModel_GC450 0x00000450
41#define chipModel_GC500 0x00000500 44#define chipModel_GC500 0x00000500
45#define chipModel_GC520 0x00000520
42#define chipModel_GC530 0x00000530 46#define chipModel_GC530 0x00000530
43#define chipModel_GC600 0x00000600 47#define chipModel_GC600 0x00000600
44#define chipModel_GC700 0x00000700 48#define chipModel_GC700 0x00000700
@@ -46,9 +50,16 @@ Copyright (C) 2015
46#define chipModel_GC860 0x00000860 50#define chipModel_GC860 0x00000860
47#define chipModel_GC880 0x00000880 51#define chipModel_GC880 0x00000880
48#define chipModel_GC1000 0x00001000 52#define chipModel_GC1000 0x00001000
53#define chipModel_GC1500 0x00001500
49#define chipModel_GC2000 0x00002000 54#define chipModel_GC2000 0x00002000
50#define chipModel_GC2100 0x00002100 55#define chipModel_GC2100 0x00002100
56#define chipModel_GC2200 0x00002200
57#define chipModel_GC2500 0x00002500
58#define chipModel_GC3000 0x00003000
51#define chipModel_GC4000 0x00004000 59#define chipModel_GC4000 0x00004000
60#define chipModel_GC5000 0x00005000
61#define chipModel_GC5200 0x00005200
62#define chipModel_GC6400 0x00006400
52#define RGBA_BITS_R 0x00000001 63#define RGBA_BITS_R 0x00000001
53#define RGBA_BITS_G 0x00000002 64#define RGBA_BITS_G 0x00000002
54#define RGBA_BITS_B 0x00000004 65#define RGBA_BITS_B 0x00000004
@@ -160,7 +171,7 @@ Copyright (C) 2015
160#define chipMinorFeatures2_UNK8 0x00000100 171#define chipMinorFeatures2_UNK8 0x00000100
161#define chipMinorFeatures2_UNK9 0x00000200 172#define chipMinorFeatures2_UNK9 0x00000200
162#define chipMinorFeatures2_UNK10 0x00000400 173#define chipMinorFeatures2_UNK10 0x00000400
163#define chipMinorFeatures2_SAMPLERBASE_16 0x00000800 174#define chipMinorFeatures2_HALTI1 0x00000800
164#define chipMinorFeatures2_UNK12 0x00001000 175#define chipMinorFeatures2_UNK12 0x00001000
165#define chipMinorFeatures2_UNK13 0x00002000 176#define chipMinorFeatures2_UNK13 0x00002000
166#define chipMinorFeatures2_UNK14 0x00004000 177#define chipMinorFeatures2_UNK14 0x00004000
@@ -189,7 +200,7 @@ Copyright (C) 2015
189#define chipMinorFeatures3_UNK5 0x00000020 200#define chipMinorFeatures3_UNK5 0x00000020
190#define chipMinorFeatures3_UNK6 0x00000040 201#define chipMinorFeatures3_UNK6 0x00000040
191#define chipMinorFeatures3_UNK7 0x00000080 202#define chipMinorFeatures3_UNK7 0x00000080
192#define chipMinorFeatures3_UNK8 0x00000100 203#define chipMinorFeatures3_FAST_MSAA 0x00000100
193#define chipMinorFeatures3_UNK9 0x00000200 204#define chipMinorFeatures3_UNK9 0x00000200
194#define chipMinorFeatures3_BUG_FIXES10 0x00000400 205#define chipMinorFeatures3_BUG_FIXES10 0x00000400
195#define chipMinorFeatures3_UNK11 0x00000800 206#define chipMinorFeatures3_UNK11 0x00000800
@@ -199,7 +210,7 @@ Copyright (C) 2015
199#define chipMinorFeatures3_UNK15 0x00008000 210#define chipMinorFeatures3_UNK15 0x00008000
200#define chipMinorFeatures3_UNK16 0x00010000 211#define chipMinorFeatures3_UNK16 0x00010000
201#define chipMinorFeatures3_UNK17 0x00020000 212#define chipMinorFeatures3_UNK17 0x00020000
202#define chipMinorFeatures3_UNK18 0x00040000 213#define chipMinorFeatures3_ACE 0x00040000
203#define chipMinorFeatures3_UNK19 0x00080000 214#define chipMinorFeatures3_UNK19 0x00080000
204#define chipMinorFeatures3_UNK20 0x00100000 215#define chipMinorFeatures3_UNK20 0x00100000
205#define chipMinorFeatures3_UNK21 0x00200000 216#define chipMinorFeatures3_UNK21 0x00200000
@@ -207,7 +218,7 @@ Copyright (C) 2015
207#define chipMinorFeatures3_UNK23 0x00800000 218#define chipMinorFeatures3_UNK23 0x00800000
208#define chipMinorFeatures3_UNK24 0x01000000 219#define chipMinorFeatures3_UNK24 0x01000000
209#define chipMinorFeatures3_UNK25 0x02000000 220#define chipMinorFeatures3_UNK25 0x02000000
210#define chipMinorFeatures3_UNK26 0x04000000 221#define chipMinorFeatures3_NEW_HZ 0x04000000
211#define chipMinorFeatures3_UNK27 0x08000000 222#define chipMinorFeatures3_UNK27 0x08000000
212#define chipMinorFeatures3_UNK28 0x10000000 223#define chipMinorFeatures3_UNK28 0x10000000
213#define chipMinorFeatures3_UNK29 0x20000000 224#define chipMinorFeatures3_UNK29 0x20000000
@@ -229,9 +240,9 @@ Copyright (C) 2015
229#define chipMinorFeatures4_UNK13 0x00002000 240#define chipMinorFeatures4_UNK13 0x00002000
230#define chipMinorFeatures4_UNK14 0x00004000 241#define chipMinorFeatures4_UNK14 0x00004000
231#define chipMinorFeatures4_UNK15 0x00008000 242#define chipMinorFeatures4_UNK15 0x00008000
232#define chipMinorFeatures4_UNK16 0x00010000 243#define chipMinorFeatures4_HALTI2 0x00010000
233#define chipMinorFeatures4_UNK17 0x00020000 244#define chipMinorFeatures4_UNK17 0x00020000
234#define chipMinorFeatures4_UNK18 0x00040000 245#define chipMinorFeatures4_SMALL_MSAA 0x00040000
235#define chipMinorFeatures4_UNK19 0x00080000 246#define chipMinorFeatures4_UNK19 0x00080000
236#define chipMinorFeatures4_UNK20 0x00100000 247#define chipMinorFeatures4_UNK20 0x00100000
237#define chipMinorFeatures4_UNK21 0x00200000 248#define chipMinorFeatures4_UNK21 0x00200000
@@ -245,5 +256,37 @@ Copyright (C) 2015
245#define chipMinorFeatures4_UNK29 0x20000000 256#define chipMinorFeatures4_UNK29 0x20000000
246#define chipMinorFeatures4_UNK30 0x40000000 257#define chipMinorFeatures4_UNK30 0x40000000
247#define chipMinorFeatures4_UNK31 0x80000000 258#define chipMinorFeatures4_UNK31 0x80000000
259#define chipMinorFeatures5_UNK0 0x00000001
260#define chipMinorFeatures5_UNK1 0x00000002
261#define chipMinorFeatures5_UNK2 0x00000004
262#define chipMinorFeatures5_UNK3 0x00000008
263#define chipMinorFeatures5_UNK4 0x00000010
264#define chipMinorFeatures5_UNK5 0x00000020
265#define chipMinorFeatures5_UNK6 0x00000040
266#define chipMinorFeatures5_UNK7 0x00000080
267#define chipMinorFeatures5_UNK8 0x00000100
268#define chipMinorFeatures5_HALTI3 0x00000200
269#define chipMinorFeatures5_UNK10 0x00000400
270#define chipMinorFeatures5_UNK11 0x00000800
271#define chipMinorFeatures5_UNK12 0x00001000
272#define chipMinorFeatures5_UNK13 0x00002000
273#define chipMinorFeatures5_UNK14 0x00004000
274#define chipMinorFeatures5_UNK15 0x00008000
275#define chipMinorFeatures5_UNK16 0x00010000
276#define chipMinorFeatures5_UNK17 0x00020000
277#define chipMinorFeatures5_UNK18 0x00040000
278#define chipMinorFeatures5_UNK19 0x00080000
279#define chipMinorFeatures5_UNK20 0x00100000
280#define chipMinorFeatures5_UNK21 0x00200000
281#define chipMinorFeatures5_UNK22 0x00400000
282#define chipMinorFeatures5_UNK23 0x00800000
283#define chipMinorFeatures5_UNK24 0x01000000
284#define chipMinorFeatures5_UNK25 0x02000000
285#define chipMinorFeatures5_UNK26 0x04000000
286#define chipMinorFeatures5_UNK27 0x08000000
287#define chipMinorFeatures5_UNK28 0x10000000
288#define chipMinorFeatures5_UNK29 0x20000000
289#define chipMinorFeatures5_UNK30 0x40000000
290#define chipMinorFeatures5_UNK31 0x80000000
248 291
249#endif /* COMMON_XML */ 292#endif /* COMMON_XML */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 5c89ebb52fd2..e8858985f01e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -668,7 +668,6 @@ static struct platform_driver etnaviv_platform_driver = {
668 .probe = etnaviv_pdev_probe, 668 .probe = etnaviv_pdev_probe,
669 .remove = etnaviv_pdev_remove, 669 .remove = etnaviv_pdev_remove,
670 .driver = { 670 .driver = {
671 .owner = THIS_MODULE,
672 .name = "etnaviv", 671 .name = "etnaviv",
673 .of_match_table = dt_match, 672 .of_match_table = dt_match,
674 }, 673 },
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index d6bd438bd5be..1cd6046e76b1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -85,7 +85,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
85 struct dma_buf_attachment *attach, struct sg_table *sg); 85 struct dma_buf_attachment *attach, struct sg_table *sg);
86int etnaviv_gem_prime_pin(struct drm_gem_object *obj); 86int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
87void etnaviv_gem_prime_unpin(struct drm_gem_object *obj); 87void etnaviv_gem_prime_unpin(struct drm_gem_object *obj);
88void *etnaviv_gem_vaddr(struct drm_gem_object *obj); 88void *etnaviv_gem_vmap(struct drm_gem_object *obj);
89int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, 89int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
90 struct timespec *timeout); 90 struct timespec *timeout);
91int etnaviv_gem_cpu_fini(struct drm_gem_object *obj); 91int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index bf8fa859e8be..4a29eeadbf1e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -201,7 +201,9 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
201 201
202 obj = vram->object; 202 obj = vram->object;
203 203
204 mutex_lock(&obj->lock);
204 pages = etnaviv_gem_get_pages(obj); 205 pages = etnaviv_gem_get_pages(obj);
206 mutex_unlock(&obj->lock);
205 if (pages) { 207 if (pages) {
206 int j; 208 int j;
207 209
@@ -213,8 +215,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
213 215
214 iter.hdr->iova = cpu_to_le64(vram->iova); 216 iter.hdr->iova = cpu_to_le64(vram->iova);
215 217
216 vaddr = etnaviv_gem_vaddr(&obj->base); 218 vaddr = etnaviv_gem_vmap(&obj->base);
217 if (vaddr && !IS_ERR(vaddr)) 219 if (vaddr)
218 memcpy(iter.data, vaddr, obj->base.size); 220 memcpy(iter.data, vaddr, obj->base.size);
219 221
220 etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data + 222 etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 9f77c3b94cc6..4b519e4309b2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -353,25 +353,39 @@ void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj)
353 drm_gem_object_unreference_unlocked(obj); 353 drm_gem_object_unreference_unlocked(obj);
354} 354}
355 355
356void *etnaviv_gem_vaddr(struct drm_gem_object *obj) 356void *etnaviv_gem_vmap(struct drm_gem_object *obj)
357{ 357{
358 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 358 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
359 359
360 mutex_lock(&etnaviv_obj->lock); 360 if (etnaviv_obj->vaddr)
361 if (!etnaviv_obj->vaddr) { 361 return etnaviv_obj->vaddr;
362 struct page **pages = etnaviv_gem_get_pages(etnaviv_obj);
363
364 if (IS_ERR(pages))
365 return ERR_CAST(pages);
366 362
367 etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 363 mutex_lock(&etnaviv_obj->lock);
368 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 364 /*
369 } 365 * Need to check again, as we might have raced with another thread
366 * while waiting for the mutex.
367 */
368 if (!etnaviv_obj->vaddr)
369 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
370 mutex_unlock(&etnaviv_obj->lock); 370 mutex_unlock(&etnaviv_obj->lock);
371 371
372 return etnaviv_obj->vaddr; 372 return etnaviv_obj->vaddr;
373} 373}
374 374
375static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
376{
377 struct page **pages;
378
379 lockdep_assert_held(&obj->lock);
380
381 pages = etnaviv_gem_get_pages(obj);
382 if (IS_ERR(pages))
383 return NULL;
384
385 return vmap(pages, obj->base.size >> PAGE_SHIFT,
386 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
387}
388
375static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op) 389static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
376{ 390{
377 if (op & ETNA_PREP_READ) 391 if (op & ETNA_PREP_READ)
@@ -522,6 +536,7 @@ static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
522static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = { 536static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
523 .get_pages = etnaviv_gem_shmem_get_pages, 537 .get_pages = etnaviv_gem_shmem_get_pages,
524 .release = etnaviv_gem_shmem_release, 538 .release = etnaviv_gem_shmem_release,
539 .vmap = etnaviv_gem_vmap_impl,
525}; 540};
526 541
527void etnaviv_gem_free_object(struct drm_gem_object *obj) 542void etnaviv_gem_free_object(struct drm_gem_object *obj)
@@ -866,6 +881,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
866static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = { 881static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
867 .get_pages = etnaviv_gem_userptr_get_pages, 882 .get_pages = etnaviv_gem_userptr_get_pages,
868 .release = etnaviv_gem_userptr_release, 883 .release = etnaviv_gem_userptr_release,
884 .vmap = etnaviv_gem_vmap_impl,
869}; 885};
870 886
871int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, 887int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index a300b4b3d545..ab5df8147a5f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -78,6 +78,7 @@ struct etnaviv_gem_object *to_etnaviv_bo(struct drm_gem_object *obj)
78struct etnaviv_gem_ops { 78struct etnaviv_gem_ops {
79 int (*get_pages)(struct etnaviv_gem_object *); 79 int (*get_pages)(struct etnaviv_gem_object *);
80 void (*release)(struct etnaviv_gem_object *); 80 void (*release)(struct etnaviv_gem_object *);
81 void *(*vmap)(struct etnaviv_gem_object *);
81}; 82};
82 83
83static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj) 84static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index e94db4f95770..4e67395f5fa1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -31,7 +31,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
31 31
32void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj) 32void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
33{ 33{
34 return etnaviv_gem_vaddr(obj); 34 return etnaviv_gem_vmap(obj);
35} 35}
36 36
37void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 37void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
@@ -77,9 +77,17 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
77 drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt); 77 drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt);
78} 78}
79 79
80static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
81{
82 lockdep_assert_held(&etnaviv_obj->lock);
83
84 return dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf);
85}
86
80static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = { 87static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
81 /* .get_pages should never be called */ 88 /* .get_pages should never be called */
82 .release = etnaviv_gem_prime_release, 89 .release = etnaviv_gem_prime_release,
90 .vmap = etnaviv_gem_prime_vmap_impl,
83}; 91};
84 92
85struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, 93struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 056a72e6ed26..a33162cf4f4c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -72,6 +72,14 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
72 *value = gpu->identity.minor_features3; 72 *value = gpu->identity.minor_features3;
73 break; 73 break;
74 74
75 case ETNAVIV_PARAM_GPU_FEATURES_5:
76 *value = gpu->identity.minor_features4;
77 break;
78
79 case ETNAVIV_PARAM_GPU_FEATURES_6:
80 *value = gpu->identity.minor_features5;
81 break;
82
75 case ETNAVIV_PARAM_GPU_STREAM_COUNT: 83 case ETNAVIV_PARAM_GPU_STREAM_COUNT:
76 *value = gpu->identity.stream_count; 84 *value = gpu->identity.stream_count;
77 break; 85 break;
@@ -112,6 +120,10 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
112 *value = gpu->identity.num_constants; 120 *value = gpu->identity.num_constants;
113 break; 121 break;
114 122
123 case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
124 *value = gpu->identity.varyings_count;
125 break;
126
115 default: 127 default:
116 DBG("%s: invalid param: %u", dev_name(gpu->dev), param); 128 DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
117 return -EINVAL; 129 return -EINVAL;
@@ -120,46 +132,56 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
120 return 0; 132 return 0;
121} 133}
122 134
135
136#define etnaviv_is_model_rev(gpu, mod, rev) \
137 ((gpu)->identity.model == chipModel_##mod && \
138 (gpu)->identity.revision == rev)
139#define etnaviv_field(val, field) \
140 (((val) & field##__MASK) >> field##__SHIFT)
141
123static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) 142static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
124{ 143{
125 if (gpu->identity.minor_features0 & 144 if (gpu->identity.minor_features0 &
126 chipMinorFeatures0_MORE_MINOR_FEATURES) { 145 chipMinorFeatures0_MORE_MINOR_FEATURES) {
127 u32 specs[2]; 146 u32 specs[4];
147 unsigned int streams;
128 148
129 specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS); 149 specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
130 specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2); 150 specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
131 151 specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
132 gpu->identity.stream_count = 152 specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
133 (specs[0] & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK) 153
134 >> VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT; 154 gpu->identity.stream_count = etnaviv_field(specs[0],
135 gpu->identity.register_max = 155 VIVS_HI_CHIP_SPECS_STREAM_COUNT);
136 (specs[0] & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK) 156 gpu->identity.register_max = etnaviv_field(specs[0],
137 >> VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT; 157 VIVS_HI_CHIP_SPECS_REGISTER_MAX);
138 gpu->identity.thread_count = 158 gpu->identity.thread_count = etnaviv_field(specs[0],
139 (specs[0] & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK) 159 VIVS_HI_CHIP_SPECS_THREAD_COUNT);
140 >> VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT; 160 gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
141 gpu->identity.vertex_cache_size = 161 VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
142 (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK) 162 gpu->identity.shader_core_count = etnaviv_field(specs[0],
143 >> VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT; 163 VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
144 gpu->identity.shader_core_count = 164 gpu->identity.pixel_pipes = etnaviv_field(specs[0],
145 (specs[0] & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK) 165 VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
146 >> VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT;
147 gpu->identity.pixel_pipes =
148 (specs[0] & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK)
149 >> VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT;
150 gpu->identity.vertex_output_buffer_size = 166 gpu->identity.vertex_output_buffer_size =
151 (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK) 167 etnaviv_field(specs[0],
152 >> VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT; 168 VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
153 169
154 gpu->identity.buffer_size = 170 gpu->identity.buffer_size = etnaviv_field(specs[1],
155 (specs[1] & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK) 171 VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
156 >> VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT; 172 gpu->identity.instruction_count = etnaviv_field(specs[1],
157 gpu->identity.instruction_count = 173 VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
158 (specs[1] & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK) 174 gpu->identity.num_constants = etnaviv_field(specs[1],
159 >> VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT; 175 VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
160 gpu->identity.num_constants = 176
161 (specs[1] & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK) 177 gpu->identity.varyings_count = etnaviv_field(specs[2],
162 >> VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT; 178 VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
179
180 /* This overrides the value from older register if non-zero */
181 streams = etnaviv_field(specs[3],
182 VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
183 if (streams)
184 gpu->identity.stream_count = streams;
163 } 185 }
164 186
165 /* Fill in the stream count if not specified */ 187 /* Fill in the stream count if not specified */
@@ -173,7 +195,7 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
173 /* Convert the register max value */ 195 /* Convert the register max value */
174 if (gpu->identity.register_max) 196 if (gpu->identity.register_max)
175 gpu->identity.register_max = 1 << gpu->identity.register_max; 197 gpu->identity.register_max = 1 << gpu->identity.register_max;
176 else if (gpu->identity.model == 0x0400) 198 else if (gpu->identity.model == chipModel_GC400)
177 gpu->identity.register_max = 32; 199 gpu->identity.register_max = 32;
178 else 200 else
179 gpu->identity.register_max = 64; 201 gpu->identity.register_max = 64;
@@ -181,10 +203,10 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
181 /* Convert thread count */ 203 /* Convert thread count */
182 if (gpu->identity.thread_count) 204 if (gpu->identity.thread_count)
183 gpu->identity.thread_count = 1 << gpu->identity.thread_count; 205 gpu->identity.thread_count = 1 << gpu->identity.thread_count;
184 else if (gpu->identity.model == 0x0400) 206 else if (gpu->identity.model == chipModel_GC400)
185 gpu->identity.thread_count = 64; 207 gpu->identity.thread_count = 64;
186 else if (gpu->identity.model == 0x0500 || 208 else if (gpu->identity.model == chipModel_GC500 ||
187 gpu->identity.model == 0x0530) 209 gpu->identity.model == chipModel_GC530)
188 gpu->identity.thread_count = 128; 210 gpu->identity.thread_count = 128;
189 else 211 else
190 gpu->identity.thread_count = 256; 212 gpu->identity.thread_count = 256;
@@ -206,7 +228,7 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
206 if (gpu->identity.vertex_output_buffer_size) { 228 if (gpu->identity.vertex_output_buffer_size) {
207 gpu->identity.vertex_output_buffer_size = 229 gpu->identity.vertex_output_buffer_size =
208 1 << gpu->identity.vertex_output_buffer_size; 230 1 << gpu->identity.vertex_output_buffer_size;
209 } else if (gpu->identity.model == 0x0400) { 231 } else if (gpu->identity.model == chipModel_GC400) {
210 if (gpu->identity.revision < 0x4000) 232 if (gpu->identity.revision < 0x4000)
211 gpu->identity.vertex_output_buffer_size = 512; 233 gpu->identity.vertex_output_buffer_size = 512;
212 else if (gpu->identity.revision < 0x4200) 234 else if (gpu->identity.revision < 0x4200)
@@ -219,9 +241,8 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
219 241
220 switch (gpu->identity.instruction_count) { 242 switch (gpu->identity.instruction_count) {
221 case 0: 243 case 0:
222 if ((gpu->identity.model == 0x2000 && 244 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
223 gpu->identity.revision == 0x5108) || 245 gpu->identity.model == chipModel_GC880)
224 gpu->identity.model == 0x880)
225 gpu->identity.instruction_count = 512; 246 gpu->identity.instruction_count = 512;
226 else 247 else
227 gpu->identity.instruction_count = 256; 248 gpu->identity.instruction_count = 256;
@@ -242,6 +263,30 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
242 263
243 if (gpu->identity.num_constants == 0) 264 if (gpu->identity.num_constants == 0)
244 gpu->identity.num_constants = 168; 265 gpu->identity.num_constants = 168;
266
267 if (gpu->identity.varyings_count == 0) {
268 if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
269 gpu->identity.varyings_count = 12;
270 else
271 gpu->identity.varyings_count = 8;
272 }
273
274 /*
275 * For some cores, two varyings are consumed for position, so the
276 * maximum varying count needs to be reduced by one.
277 */
278 if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
279 etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
280 etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
281 etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
282 etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
283 etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
284 etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
285 etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
286 etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
287 etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
288 etnaviv_is_model_rev(gpu, GC880, 0x5106))
289 gpu->identity.varyings_count -= 1;
245} 290}
246 291
247static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) 292static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
@@ -251,12 +296,10 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
251 chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY); 296 chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
252 297
253 /* Special case for older graphic cores. */ 298 /* Special case for older graphic cores. */
254 if (((chipIdentity & VIVS_HI_CHIP_IDENTITY_FAMILY__MASK) 299 if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
255 >> VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT) == 0x01) { 300 gpu->identity.model = chipModel_GC500;
256 gpu->identity.model = 0x500; /* gc500 */ 301 gpu->identity.revision = etnaviv_field(chipIdentity,
257 gpu->identity.revision = 302 VIVS_HI_CHIP_IDENTITY_REVISION);
258 (chipIdentity & VIVS_HI_CHIP_IDENTITY_REVISION__MASK)
259 >> VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT;
260 } else { 303 } else {
261 304
262 gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL); 305 gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
@@ -269,13 +312,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
269 * same. Only for GC400 family. 312 * same. Only for GC400 family.
270 */ 313 */
271 if ((gpu->identity.model & 0xff00) == 0x0400 && 314 if ((gpu->identity.model & 0xff00) == 0x0400 &&
272 gpu->identity.model != 0x0420) { 315 gpu->identity.model != chipModel_GC420) {
273 gpu->identity.model = gpu->identity.model & 0x0400; 316 gpu->identity.model = gpu->identity.model & 0x0400;
274 } 317 }
275 318
276 /* Another special case */ 319 /* Another special case */
277 if (gpu->identity.model == 0x300 && 320 if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
278 gpu->identity.revision == 0x2201) {
279 u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE); 321 u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
280 u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME); 322 u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
281 323
@@ -295,11 +337,13 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
295 gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE); 337 gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
296 338
297 /* Disable fast clear on GC700. */ 339 /* Disable fast clear on GC700. */
298 if (gpu->identity.model == 0x700) 340 if (gpu->identity.model == chipModel_GC700)
299 gpu->identity.features &= ~chipFeatures_FAST_CLEAR; 341 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
300 342
301 if ((gpu->identity.model == 0x500 && gpu->identity.revision < 2) || 343 if ((gpu->identity.model == chipModel_GC500 &&
302 (gpu->identity.model == 0x300 && gpu->identity.revision < 0x2000)) { 344 gpu->identity.revision < 2) ||
345 (gpu->identity.model == chipModel_GC300 &&
346 gpu->identity.revision < 0x2000)) {
303 347
304 /* 348 /*
305 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these 349 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
@@ -309,6 +353,8 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
309 gpu->identity.minor_features1 = 0; 353 gpu->identity.minor_features1 = 0;
310 gpu->identity.minor_features2 = 0; 354 gpu->identity.minor_features2 = 0;
311 gpu->identity.minor_features3 = 0; 355 gpu->identity.minor_features3 = 0;
356 gpu->identity.minor_features4 = 0;
357 gpu->identity.minor_features5 = 0;
312 } else 358 } else
313 gpu->identity.minor_features0 = 359 gpu->identity.minor_features0 =
314 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0); 360 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
@@ -321,6 +367,10 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
321 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2); 367 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
322 gpu->identity.minor_features3 = 368 gpu->identity.minor_features3 =
323 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3); 369 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
370 gpu->identity.minor_features4 =
371 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
372 gpu->identity.minor_features5 =
373 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
324 } 374 }
325 375
326 /* GC600 idle register reports zero bits where modules aren't present */ 376 /* GC600 idle register reports zero bits where modules aren't present */
@@ -441,10 +491,9 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
441{ 491{
442 u16 prefetch; 492 u16 prefetch;
443 493
444 if (gpu->identity.model == chipModel_GC320 && 494 if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
445 gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400 && 495 etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
446 (gpu->identity.revision == 0x5007 || 496 gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
447 gpu->identity.revision == 0x5220)) {
448 u32 mc_memory_debug; 497 u32 mc_memory_debug;
449 498
450 mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff; 499 mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
@@ -466,7 +515,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
466 VIVS_HI_AXI_CONFIG_ARCACHE(2)); 515 VIVS_HI_AXI_CONFIG_ARCACHE(2));
467 516
468 /* GC2000 rev 5108 needs a special bus config */ 517 /* GC2000 rev 5108 needs a special bus config */
469 if (gpu->identity.model == 0x2000 && gpu->identity.revision == 0x5108) { 518 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
470 u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG); 519 u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
471 bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK | 520 bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
472 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK); 521 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
@@ -511,8 +560,16 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
511 560
512 if (gpu->identity.model == 0) { 561 if (gpu->identity.model == 0) {
513 dev_err(gpu->dev, "Unknown GPU model\n"); 562 dev_err(gpu->dev, "Unknown GPU model\n");
514 pm_runtime_put_autosuspend(gpu->dev); 563 ret = -ENXIO;
515 return -ENXIO; 564 goto fail;
565 }
566
567 /* Exclude VG cores with FE2.0 */
568 if (gpu->identity.features & chipFeatures_PIPE_VG &&
569 gpu->identity.features & chipFeatures_FE20) {
570 dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
571 ret = -ENXIO;
572 goto fail;
516 } 573 }
517 574
518 ret = etnaviv_hw_reset(gpu); 575 ret = etnaviv_hw_reset(gpu);
@@ -539,10 +596,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
539 goto fail; 596 goto fail;
540 } 597 }
541 598
542 /* TODO: we will leak here memory - fix it! */
543
544 gpu->mmu = etnaviv_iommu_new(gpu, iommu, version); 599 gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
545 if (!gpu->mmu) { 600 if (!gpu->mmu) {
601 iommu_domain_free(iommu);
546 ret = -ENOMEM; 602 ret = -ENOMEM;
547 goto fail; 603 goto fail;
548 } 604 }
@@ -552,7 +608,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
552 if (!gpu->buffer) { 608 if (!gpu->buffer) {
553 ret = -ENOMEM; 609 ret = -ENOMEM;
554 dev_err(gpu->dev, "could not create command buffer\n"); 610 dev_err(gpu->dev, "could not create command buffer\n");
555 goto fail; 611 goto destroy_iommu;
556 } 612 }
557 if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) { 613 if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
558 ret = -EINVAL; 614 ret = -EINVAL;
@@ -582,6 +638,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
582free_buffer: 638free_buffer:
583 etnaviv_gpu_cmdbuf_free(gpu->buffer); 639 etnaviv_gpu_cmdbuf_free(gpu->buffer);
584 gpu->buffer = NULL; 640 gpu->buffer = NULL;
641destroy_iommu:
642 etnaviv_iommu_destroy(gpu->mmu);
643 gpu->mmu = NULL;
585fail: 644fail:
586 pm_runtime_mark_last_busy(gpu->dev); 645 pm_runtime_mark_last_busy(gpu->dev);
587 pm_runtime_put_autosuspend(gpu->dev); 646 pm_runtime_put_autosuspend(gpu->dev);
@@ -642,6 +701,10 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
642 gpu->identity.minor_features2); 701 gpu->identity.minor_features2);
643 seq_printf(m, "\t minor_features3: 0x%08x\n", 702 seq_printf(m, "\t minor_features3: 0x%08x\n",
644 gpu->identity.minor_features3); 703 gpu->identity.minor_features3);
704 seq_printf(m, "\t minor_features4: 0x%08x\n",
705 gpu->identity.minor_features4);
706 seq_printf(m, "\t minor_features5: 0x%08x\n",
707 gpu->identity.minor_features5);
645 708
646 seq_puts(m, "\tspecs\n"); 709 seq_puts(m, "\tspecs\n");
647 seq_printf(m, "\t stream_count: %d\n", 710 seq_printf(m, "\t stream_count: %d\n",
@@ -664,6 +727,8 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
664 gpu->identity.instruction_count); 727 gpu->identity.instruction_count);
665 seq_printf(m, "\t num_constants: %d\n", 728 seq_printf(m, "\t num_constants: %d\n",
666 gpu->identity.num_constants); 729 gpu->identity.num_constants);
730 seq_printf(m, "\t varyings_count: %d\n",
731 gpu->identity.varyings_count);
667 732
668 seq_printf(m, "\taxi: 0x%08x\n", axi); 733 seq_printf(m, "\taxi: 0x%08x\n", axi);
669 seq_printf(m, "\tidle: 0x%08x\n", idle); 734 seq_printf(m, "\tidle: 0x%08x\n", idle);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index c75d50359ab0..f233ac4c7c1c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -46,6 +46,12 @@ struct etnaviv_chip_identity {
46 /* Supported minor feature 3 fields. */ 46 /* Supported minor feature 3 fields. */
47 u32 minor_features3; 47 u32 minor_features3;
48 48
49 /* Supported minor feature 4 fields. */
50 u32 minor_features4;
51
52 /* Supported minor feature 5 fields. */
53 u32 minor_features5;
54
49 /* Number of streams supported. */ 55 /* Number of streams supported. */
50 u32 stream_count; 56 u32 stream_count;
51 57
@@ -75,6 +81,9 @@ struct etnaviv_chip_identity {
75 81
76 /* Buffer size */ 82 /* Buffer size */
77 u32 buffer_size; 83 u32 buffer_size;
84
85 /* Number of varyings */
86 u8 varyings_count;
78}; 87};
79 88
80struct etnaviv_event { 89struct etnaviv_event {
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
index 0064f2640396..6a7de5f1454a 100644
--- a/drivers/gpu/drm/etnaviv/state_hi.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -8,8 +8,8 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng 8git clone git://0x04.net/rules-ng-ng
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- state_hi.xml ( 23420 bytes, from 2015-03-25 11:47:21) 11- state_hi.xml ( 24309 bytes, from 2015-12-12 09:02:53)
12- common.xml ( 18437 bytes, from 2015-03-25 11:27:41) 12- common.xml ( 18437 bytes, from 2015-12-12 09:02:53)
13 13
14Copyright (C) 2015 14Copyright (C) 2015
15*/ 15*/
@@ -182,8 +182,25 @@ Copyright (C) 2015
182 182
183#define VIVS_HI_CHIP_MINOR_FEATURE_3 0x00000088 183#define VIVS_HI_CHIP_MINOR_FEATURE_3 0x00000088
184 184
185#define VIVS_HI_CHIP_SPECS_3 0x0000008c
186#define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__MASK 0x000001f0
187#define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__SHIFT 4
188#define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__MASK)
189#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__MASK 0x00000007
190#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__SHIFT 0
191#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__MASK)
192
185#define VIVS_HI_CHIP_MINOR_FEATURE_4 0x00000094 193#define VIVS_HI_CHIP_MINOR_FEATURE_4 0x00000094
186 194
195#define VIVS_HI_CHIP_SPECS_4 0x0000009c
196#define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__MASK 0x0001f000
197#define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__SHIFT 12
198#define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__MASK)
199
200#define VIVS_HI_CHIP_MINOR_FEATURE_5 0x000000a0
201
202#define VIVS_HI_CHIP_PRODUCT_ID 0x000000a8
203
187#define VIVS_PM 0x00000000 204#define VIVS_PM 0x00000000
188 205
189#define VIVS_PM_POWER_CONTROLS 0x00000100 206#define VIVS_PM_POWER_CONTROLS 0x00000100
@@ -206,6 +223,11 @@ Copyright (C) 2015
206#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001 223#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001
207#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_DE 0x00000002 224#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_DE 0x00000002
208#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PE 0x00000004 225#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PE 0x00000004
226#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_SH 0x00000008
227#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PA 0x00000010
228#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_SE 0x00000020
229#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_RA 0x00000040
230#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_TX 0x00000080
209 231
210#define VIVS_PM_PULSE_EATER 0x0000010c 232#define VIVS_PM_PULSE_EATER 0x0000010c
211 233
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 83efca941388..f17d39279596 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,6 +1,6 @@
1config DRM_EXYNOS 1config DRM_EXYNOS
2 tristate "DRM Support for Samsung SoC EXYNOS Series" 2 tristate "DRM Support for Samsung SoC EXYNOS Series"
3 depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM) 3 depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER 5 select DRM_KMS_FB_HELPER
6 select FB_CFB_FILLRECT 6 select FB_CFB_FILLRECT
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 1bf6a21130c7..162ab93e99cb 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -93,7 +93,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
93 if (test_bit(BIT_SUSPENDED, &ctx->flags)) 93 if (test_bit(BIT_SUSPENDED, &ctx->flags))
94 return -EPERM; 94 return -EPERM;
95 95
96 if (test_and_set_bit(BIT_IRQS_ENABLED, &ctx->flags)) { 96 if (!test_and_set_bit(BIT_IRQS_ENABLED, &ctx->flags)) {
97 val = VIDINTCON0_INTEN; 97 val = VIDINTCON0_INTEN;
98 if (ctx->out_type == IFTYPE_I80) 98 if (ctx->out_type == IFTYPE_I80)
99 val |= VIDINTCON0_FRAMEDONE; 99 val |= VIDINTCON0_FRAMEDONE;
@@ -402,8 +402,6 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
402 decon_enable_vblank(ctx->crtc); 402 decon_enable_vblank(ctx->crtc);
403 403
404 decon_commit(ctx->crtc); 404 decon_commit(ctx->crtc);
405
406 set_bit(BIT_SUSPENDED, &ctx->flags);
407} 405}
408 406
409static void decon_disable(struct exynos_drm_crtc *crtc) 407static void decon_disable(struct exynos_drm_crtc *crtc)
@@ -582,9 +580,9 @@ out:
582static int exynos5433_decon_suspend(struct device *dev) 580static int exynos5433_decon_suspend(struct device *dev)
583{ 581{
584 struct decon_context *ctx = dev_get_drvdata(dev); 582 struct decon_context *ctx = dev_get_drvdata(dev);
585 int i; 583 int i = ARRAY_SIZE(decon_clks_name);
586 584
587 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) 585 while (--i >= 0)
588 clk_disable_unprepare(ctx->clks[i]); 586 clk_disable_unprepare(ctx->clks[i]);
589 587
590 return 0; 588 return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index b79c316c2ad2..673164b331c8 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -1392,7 +1392,7 @@ static const struct component_ops exynos_dp_ops = {
1392static int exynos_dp_probe(struct platform_device *pdev) 1392static int exynos_dp_probe(struct platform_device *pdev)
1393{ 1393{
1394 struct device *dev = &pdev->dev; 1394 struct device *dev = &pdev->dev;
1395 struct device_node *panel_node = NULL, *bridge_node, *endpoint = NULL; 1395 struct device_node *np = NULL, *endpoint = NULL;
1396 struct exynos_dp_device *dp; 1396 struct exynos_dp_device *dp;
1397 int ret; 1397 int ret;
1398 1398
@@ -1404,41 +1404,36 @@ static int exynos_dp_probe(struct platform_device *pdev)
1404 platform_set_drvdata(pdev, dp); 1404 platform_set_drvdata(pdev, dp);
1405 1405
1406 /* This is for the backward compatibility. */ 1406 /* This is for the backward compatibility. */
1407 panel_node = of_parse_phandle(dev->of_node, "panel", 0); 1407 np = of_parse_phandle(dev->of_node, "panel", 0);
1408 if (panel_node) { 1408 if (np) {
1409 dp->panel = of_drm_find_panel(panel_node); 1409 dp->panel = of_drm_find_panel(np);
1410 of_node_put(panel_node); 1410 of_node_put(np);
1411 if (!dp->panel) 1411 if (!dp->panel)
1412 return -EPROBE_DEFER; 1412 return -EPROBE_DEFER;
1413 } else {
1414 endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
1415 if (endpoint) {
1416 panel_node = of_graph_get_remote_port_parent(endpoint);
1417 if (panel_node) {
1418 dp->panel = of_drm_find_panel(panel_node);
1419 of_node_put(panel_node);
1420 if (!dp->panel)
1421 return -EPROBE_DEFER;
1422 } else {
1423 DRM_ERROR("no port node for panel device.\n");
1424 return -EINVAL;
1425 }
1426 }
1427 }
1428
1429 if (endpoint)
1430 goto out; 1413 goto out;
1414 }
1431 1415
1432 endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); 1416 endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
1433 if (endpoint) { 1417 if (endpoint) {
1434 bridge_node = of_graph_get_remote_port_parent(endpoint); 1418 np = of_graph_get_remote_port_parent(endpoint);
1435 if (bridge_node) { 1419 if (np) {
1436 dp->ptn_bridge = of_drm_find_bridge(bridge_node); 1420 /* The remote port can be either a panel or a bridge */
1437 of_node_put(bridge_node); 1421 dp->panel = of_drm_find_panel(np);
1438 if (!dp->ptn_bridge) 1422 if (!dp->panel) {
1439 return -EPROBE_DEFER; 1423 dp->ptn_bridge = of_drm_find_bridge(np);
1440 } else 1424 if (!dp->ptn_bridge) {
1441 return -EPROBE_DEFER; 1425 of_node_put(np);
1426 return -EPROBE_DEFER;
1427 }
1428 }
1429 of_node_put(np);
1430 } else {
1431 DRM_ERROR("no remote endpoint device node found.\n");
1432 return -EINVAL;
1433 }
1434 } else {
1435 DRM_ERROR("no port endpoint subnode found.\n");
1436 return -EINVAL;
1442 } 1437 }
1443 1438
1444out: 1439out:
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index d84a498ef099..26e81d191f56 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1782,6 +1782,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
1782 1782
1783 bridge = of_drm_find_bridge(dsi->bridge_node); 1783 bridge = of_drm_find_bridge(dsi->bridge_node);
1784 if (bridge) { 1784 if (bridge) {
1785 encoder->bridge = bridge;
1785 drm_bridge_attach(drm_dev, bridge); 1786 drm_bridge_attach(drm_dev, bridge);
1786 } 1787 }
1787 1788
@@ -1906,8 +1907,7 @@ static int exynos_dsi_remove(struct platform_device *pdev)
1906 return 0; 1907 return 0;
1907} 1908}
1908 1909
1909#ifdef CONFIG_PM 1910static int __maybe_unused exynos_dsi_suspend(struct device *dev)
1910static int exynos_dsi_suspend(struct device *dev)
1911{ 1911{
1912 struct drm_encoder *encoder = dev_get_drvdata(dev); 1912 struct drm_encoder *encoder = dev_get_drvdata(dev);
1913 struct exynos_dsi *dsi = encoder_to_dsi(encoder); 1913 struct exynos_dsi *dsi = encoder_to_dsi(encoder);
@@ -1938,7 +1938,7 @@ static int exynos_dsi_suspend(struct device *dev)
1938 return 0; 1938 return 0;
1939} 1939}
1940 1940
1941static int exynos_dsi_resume(struct device *dev) 1941static int __maybe_unused exynos_dsi_resume(struct device *dev)
1942{ 1942{
1943 struct drm_encoder *encoder = dev_get_drvdata(dev); 1943 struct drm_encoder *encoder = dev_get_drvdata(dev);
1944 struct exynos_dsi *dsi = encoder_to_dsi(encoder); 1944 struct exynos_dsi *dsi = encoder_to_dsi(encoder);
@@ -1972,7 +1972,6 @@ err_clk:
1972 1972
1973 return ret; 1973 return ret;
1974} 1974}
1975#endif
1976 1975
1977static const struct dev_pm_ops exynos_dsi_pm_ops = { 1976static const struct dev_pm_ops exynos_dsi_pm_ops = {
1978 SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL) 1977 SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index f6118baa8e3e..8baabd813ff5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -50,7 +50,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
50 if (vm_size > exynos_gem->size) 50 if (vm_size > exynos_gem->size)
51 return -EINVAL; 51 return -EINVAL;
52 52
53 ret = dma_mmap_attrs(helper->dev->dev, vma, exynos_gem->pages, 53 ret = dma_mmap_attrs(helper->dev->dev, vma, exynos_gem->cookie,
54 exynos_gem->dma_addr, exynos_gem->size, 54 exynos_gem->dma_addr, exynos_gem->size,
55 &exynos_gem->dma_attrs); 55 &exynos_gem->dma_attrs);
56 if (ret < 0) { 56 if (ret < 0) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index c747824f3c98..8a4f4a0211d0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1723,7 +1723,7 @@ static int fimc_probe(struct platform_device *pdev)
1723 goto err_put_clk; 1723 goto err_put_clk;
1724 } 1724 }
1725 1725
1726 DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv); 1726 DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv);
1727 1727
1728 spin_lock_init(&ctx->lock); 1728 spin_lock_init(&ctx->lock);
1729 platform_set_drvdata(pdev, ctx); 1729 platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index c17efdb238a6..8dfe6e113a88 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1166,7 +1166,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
1166 goto err_free_event; 1166 goto err_free_event;
1167 } 1167 }
1168 1168
1169 cmd = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd; 1169 cmd = (struct drm_exynos_g2d_cmd *)(unsigned long)req->cmd;
1170 1170
1171 if (copy_from_user(cmdlist->data + cmdlist->last, 1171 if (copy_from_user(cmdlist->data + cmdlist->last,
1172 (void __user *)cmd, 1172 (void __user *)cmd,
@@ -1184,7 +1184,8 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
1184 if (req->cmd_buf_nr) { 1184 if (req->cmd_buf_nr) {
1185 struct drm_exynos_g2d_cmd *cmd_buf; 1185 struct drm_exynos_g2d_cmd *cmd_buf;
1186 1186
1187 cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf; 1187 cmd_buf = (struct drm_exynos_g2d_cmd *)
1188 (unsigned long)req->cmd_buf;
1188 1189
1189 if (copy_from_user(cmdlist->data + cmdlist->last, 1190 if (copy_from_user(cmdlist->data + cmdlist->last,
1190 (void __user *)cmd_buf, 1191 (void __user *)cmd_buf,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 32358c5e3db4..26b5e4bd55b6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -218,7 +218,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
218 return ERR_PTR(ret); 218 return ERR_PTR(ret);
219 } 219 }
220 220
221 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); 221 DRM_DEBUG_KMS("created file object = %p\n", obj->filp);
222 222
223 return exynos_gem; 223 return exynos_gem;
224} 224}
@@ -335,7 +335,7 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
335 if (vm_size > exynos_gem->size) 335 if (vm_size > exynos_gem->size)
336 return -EINVAL; 336 return -EINVAL;
337 337
338 ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->pages, 338 ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->cookie,
339 exynos_gem->dma_addr, exynos_gem->size, 339 exynos_gem->dma_addr, exynos_gem->size,
340 &exynos_gem->dma_attrs); 340 &exynos_gem->dma_attrs);
341 if (ret < 0) { 341 if (ret < 0) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 7aecd23cfa11..5d20da8f957e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1723,7 +1723,7 @@ static int gsc_probe(struct platform_device *pdev)
1723 return ret; 1723 return ret;
1724 } 1724 }
1725 1725
1726 DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv); 1726 DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv);
1727 1727
1728 mutex_init(&ctx->lock); 1728 mutex_init(&ctx->lock);
1729 platform_set_drvdata(pdev, ctx); 1729 platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 67d24236e745..95eeb9116f10 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -208,7 +208,7 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
208 * e.g PAUSE state, queue buf, command control. 208 * e.g PAUSE state, queue buf, command control.
209 */ 209 */
210 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 210 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
211 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv); 211 DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", count++, ippdrv);
212 212
213 mutex_lock(&ippdrv->cmd_lock); 213 mutex_lock(&ippdrv->cmd_lock);
214 list_for_each_entry(c_node, &ippdrv->cmd_list, list) { 214 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
@@ -388,8 +388,8 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
388 } 388 }
389 property->prop_id = ret; 389 property->prop_id = ret;
390 390
391 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n", 391 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%p]\n",
392 property->prop_id, property->cmd, (int)ippdrv); 392 property->prop_id, property->cmd, ippdrv);
393 393
394 /* stored property information and ippdrv in private data */ 394 /* stored property information and ippdrv in private data */
395 c_node->property = *property; 395 c_node->property = *property;
@@ -518,7 +518,7 @@ static int ipp_put_mem_node(struct drm_device *drm_dev,
518{ 518{
519 int i; 519 int i;
520 520
521 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); 521 DRM_DEBUG_KMS("node[%p]\n", m_node);
522 522
523 if (!m_node) { 523 if (!m_node) {
524 DRM_ERROR("invalid dequeue node.\n"); 524 DRM_ERROR("invalid dequeue node.\n");
@@ -562,7 +562,7 @@ static struct drm_exynos_ipp_mem_node
562 m_node->buf_id = qbuf->buf_id; 562 m_node->buf_id = qbuf->buf_id;
563 INIT_LIST_HEAD(&m_node->list); 563 INIT_LIST_HEAD(&m_node->list);
564 564
565 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id); 565 DRM_DEBUG_KMS("m_node[%p]ops_id[%d]\n", m_node, qbuf->ops_id);
566 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); 566 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
567 567
568 for_each_ipp_planar(i) { 568 for_each_ipp_planar(i) {
@@ -582,8 +582,8 @@ static struct drm_exynos_ipp_mem_node
582 582
583 buf_info->handles[i] = qbuf->handle[i]; 583 buf_info->handles[i] = qbuf->handle[i];
584 buf_info->base[i] = *addr; 584 buf_info->base[i] = *addr;
585 DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i, 585 DRM_DEBUG_KMS("i[%d]base[%pad]hd[0x%lx]\n", i,
586 buf_info->base[i], buf_info->handles[i]); 586 &buf_info->base[i], buf_info->handles[i]);
587 } 587 }
588 } 588 }
589 589
@@ -664,7 +664,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
664 664
665 mutex_lock(&c_node->event_lock); 665 mutex_lock(&c_node->event_lock);
666 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { 666 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
667 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); 667 DRM_DEBUG_KMS("count[%d]e[%p]\n", count++, e);
668 668
669 /* 669 /*
670 * qbuf == NULL condition means all event deletion. 670 * qbuf == NULL condition means all event deletion.
@@ -755,7 +755,7 @@ static struct drm_exynos_ipp_mem_node
755 755
756 /* find memory node from memory list */ 756 /* find memory node from memory list */
757 list_for_each_entry(m_node, head, list) { 757 list_for_each_entry(m_node, head, list) {
758 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node); 758 DRM_DEBUG_KMS("count[%d]m_node[%p]\n", count++, m_node);
759 759
760 /* compare buffer id */ 760 /* compare buffer id */
761 if (m_node->buf_id == qbuf->buf_id) 761 if (m_node->buf_id == qbuf->buf_id)
@@ -772,7 +772,7 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
772 struct exynos_drm_ipp_ops *ops = NULL; 772 struct exynos_drm_ipp_ops *ops = NULL;
773 int ret = 0; 773 int ret = 0;
774 774
775 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); 775 DRM_DEBUG_KMS("node[%p]\n", m_node);
776 776
777 if (!m_node) { 777 if (!m_node) {
778 DRM_ERROR("invalid queue node.\n"); 778 DRM_ERROR("invalid queue node.\n");
@@ -1237,7 +1237,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1237 m_node = list_first_entry(head, 1237 m_node = list_first_entry(head,
1238 struct drm_exynos_ipp_mem_node, list); 1238 struct drm_exynos_ipp_mem_node, list);
1239 1239
1240 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node); 1240 DRM_DEBUG_KMS("m_node[%p]\n", m_node);
1241 1241
1242 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1242 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1243 if (ret) { 1243 if (ret) {
@@ -1610,8 +1610,8 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1610 } 1610 }
1611 ippdrv->prop_list.ipp_id = ret; 1611 ippdrv->prop_list.ipp_id = ret;
1612 1612
1613 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n", 1613 DRM_DEBUG_KMS("count[%d]ippdrv[%p]ipp_id[%d]\n",
1614 count++, (int)ippdrv, ret); 1614 count++, ippdrv, ret);
1615 1615
1616 /* store parent device for node */ 1616 /* store parent device for node */
1617 ippdrv->parent_dev = dev; 1617 ippdrv->parent_dev = dev;
@@ -1668,7 +1668,7 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1668 1668
1669 file_priv->ipp_dev = dev; 1669 file_priv->ipp_dev = dev;
1670 1670
1671 DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev); 1671 DRM_DEBUG_KMS("done priv[%p]\n", dev);
1672 1672
1673 return 0; 1673 return 0;
1674} 1674}
@@ -1685,8 +1685,8 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1685 mutex_lock(&ippdrv->cmd_lock); 1685 mutex_lock(&ippdrv->cmd_lock);
1686 list_for_each_entry_safe(c_node, tc_node, 1686 list_for_each_entry_safe(c_node, tc_node,
1687 &ippdrv->cmd_list, list) { 1687 &ippdrv->cmd_list, list) {
1688 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", 1688 DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n",
1689 count++, (int)ippdrv); 1689 count++, ippdrv);
1690 1690
1691 if (c_node->filp == file) { 1691 if (c_node->filp == file) {
1692 /* 1692 /*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 4eaef36aec5a..9869d70e9e54 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -18,6 +18,7 @@
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/of_graph.h> 19#include <linux/of_graph.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/component.h>
21#include <drm/drmP.h> 22#include <drm/drmP.h>
22#include <linux/mfd/syscon.h> 23#include <linux/mfd/syscon.h>
23#include <linux/regmap.h> 24#include <linux/regmap.h>
@@ -306,9 +307,9 @@ exit:
306 return ret; 307 return ret;
307} 308}
308 309
309void mic_disable(struct drm_bridge *bridge) { } 310static void mic_disable(struct drm_bridge *bridge) { }
310 311
311void mic_post_disable(struct drm_bridge *bridge) 312static void mic_post_disable(struct drm_bridge *bridge)
312{ 313{
313 struct exynos_mic *mic = bridge->driver_private; 314 struct exynos_mic *mic = bridge->driver_private;
314 int i; 315 int i;
@@ -328,7 +329,7 @@ already_disabled:
328 mutex_unlock(&mic_mutex); 329 mutex_unlock(&mic_mutex);
329} 330}
330 331
331void mic_pre_enable(struct drm_bridge *bridge) 332static void mic_pre_enable(struct drm_bridge *bridge)
332{ 333{
333 struct exynos_mic *mic = bridge->driver_private; 334 struct exynos_mic *mic = bridge->driver_private;
334 int ret, i; 335 int ret, i;
@@ -371,11 +372,35 @@ already_enabled:
371 mutex_unlock(&mic_mutex); 372 mutex_unlock(&mic_mutex);
372} 373}
373 374
374void mic_enable(struct drm_bridge *bridge) { } 375static void mic_enable(struct drm_bridge *bridge) { }
375 376
376void mic_destroy(struct drm_bridge *bridge) 377static const struct drm_bridge_funcs mic_bridge_funcs = {
378 .disable = mic_disable,
379 .post_disable = mic_post_disable,
380 .pre_enable = mic_pre_enable,
381 .enable = mic_enable,
382};
383
384static int exynos_mic_bind(struct device *dev, struct device *master,
385 void *data)
377{ 386{
378 struct exynos_mic *mic = bridge->driver_private; 387 struct exynos_mic *mic = dev_get_drvdata(dev);
388 int ret;
389
390 mic->bridge.funcs = &mic_bridge_funcs;
391 mic->bridge.of_node = dev->of_node;
392 mic->bridge.driver_private = mic;
393 ret = drm_bridge_add(&mic->bridge);
394 if (ret)
395 DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
396
397 return ret;
398}
399
400static void exynos_mic_unbind(struct device *dev, struct device *master,
401 void *data)
402{
403 struct exynos_mic *mic = dev_get_drvdata(dev);
379 int i; 404 int i;
380 405
381 mutex_lock(&mic_mutex); 406 mutex_lock(&mic_mutex);
@@ -387,16 +412,16 @@ void mic_destroy(struct drm_bridge *bridge)
387 412
388already_disabled: 413already_disabled:
389 mutex_unlock(&mic_mutex); 414 mutex_unlock(&mic_mutex);
415
416 drm_bridge_remove(&mic->bridge);
390} 417}
391 418
392static const struct drm_bridge_funcs mic_bridge_funcs = { 419static const struct component_ops exynos_mic_component_ops = {
393 .disable = mic_disable, 420 .bind = exynos_mic_bind,
394 .post_disable = mic_post_disable, 421 .unbind = exynos_mic_unbind,
395 .pre_enable = mic_pre_enable,
396 .enable = mic_enable,
397}; 422};
398 423
399int exynos_mic_probe(struct platform_device *pdev) 424static int exynos_mic_probe(struct platform_device *pdev)
400{ 425{
401 struct device *dev = &pdev->dev; 426 struct device *dev = &pdev->dev;
402 struct exynos_mic *mic; 427 struct exynos_mic *mic;
@@ -435,17 +460,8 @@ int exynos_mic_probe(struct platform_device *pdev)
435 goto err; 460 goto err;
436 } 461 }
437 462
438 mic->bridge.funcs = &mic_bridge_funcs;
439 mic->bridge.of_node = dev->of_node;
440 mic->bridge.driver_private = mic;
441 ret = drm_bridge_add(&mic->bridge);
442 if (ret) {
443 DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
444 goto err;
445 }
446
447 for (i = 0; i < NUM_CLKS; i++) { 463 for (i = 0; i < NUM_CLKS; i++) {
448 mic->clks[i] = of_clk_get_by_name(dev->of_node, clk_names[i]); 464 mic->clks[i] = devm_clk_get(dev, clk_names[i]);
449 if (IS_ERR(mic->clks[i])) { 465 if (IS_ERR(mic->clks[i])) {
450 DRM_ERROR("mic: Failed to get clock (%s)\n", 466 DRM_ERROR("mic: Failed to get clock (%s)\n",
451 clk_names[i]); 467 clk_names[i]);
@@ -454,7 +470,10 @@ int exynos_mic_probe(struct platform_device *pdev)
454 } 470 }
455 } 471 }
456 472
473 platform_set_drvdata(pdev, mic);
474
457 DRM_DEBUG_KMS("MIC has been probed\n"); 475 DRM_DEBUG_KMS("MIC has been probed\n");
476 return component_add(dev, &exynos_mic_component_ops);
458 477
459err: 478err:
460 return ret; 479 return ret;
@@ -462,14 +481,7 @@ err:
462 481
463static int exynos_mic_remove(struct platform_device *pdev) 482static int exynos_mic_remove(struct platform_device *pdev)
464{ 483{
465 struct exynos_mic *mic = platform_get_drvdata(pdev); 484 component_del(&pdev->dev, &exynos_mic_component_ops);
466 int i;
467
468 drm_bridge_remove(&mic->bridge);
469
470 for (i = NUM_CLKS - 1; i > -1; i--)
471 clk_put(mic->clks[i]);
472
473 return 0; 485 return 0;
474} 486}
475 487
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index bea0f7826d30..ce59f4443394 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -754,7 +754,7 @@ static int rotator_probe(struct platform_device *pdev)
754 goto err_ippdrv_register; 754 goto err_ippdrv_register;
755 } 755 }
756 756
757 DRM_DEBUG_KMS("ippdrv[0x%x]\n", (int)ippdrv); 757 DRM_DEBUG_KMS("ippdrv[%p]\n", ippdrv);
758 758
759 platform_set_drvdata(pdev, rot); 759 platform_set_drvdata(pdev, rot);
760 760
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 62ac4e5fa51d..b605bd7395ec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -223,7 +223,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
223 } 223 }
224} 224}
225 225
226static int vidi_show_connection(struct device *dev, 226static ssize_t vidi_show_connection(struct device *dev,
227 struct device_attribute *attr, char *buf) 227 struct device_attribute *attr, char *buf)
228{ 228{
229 struct vidi_context *ctx = dev_get_drvdata(dev); 229 struct vidi_context *ctx = dev_get_drvdata(dev);
@@ -238,7 +238,7 @@ static int vidi_show_connection(struct device *dev,
238 return rc; 238 return rc;
239} 239}
240 240
241static int vidi_store_connection(struct device *dev, 241static ssize_t vidi_store_connection(struct device *dev,
242 struct device_attribute *attr, 242 struct device_attribute *attr,
243 const char *buf, size_t len) 243 const char *buf, size_t len)
244{ 244{
@@ -294,7 +294,9 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
294 } 294 }
295 295
296 if (vidi->connection) { 296 if (vidi->connection) {
297 struct edid *raw_edid = (struct edid *)(uint32_t)vidi->edid; 297 struct edid *raw_edid;
298
299 raw_edid = (struct edid *)(unsigned long)vidi->edid;
298 if (!drm_edid_is_valid(raw_edid)) { 300 if (!drm_edid_is_valid(raw_edid)) {
299 DRM_DEBUG_KMS("edid data is invalid.\n"); 301 DRM_DEBUG_KMS("edid data is invalid.\n");
300 return -EINVAL; 302 return -EINVAL;
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index b5fbc1cbf024..0a5a60005f7e 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1289,8 +1289,7 @@ static int mixer_remove(struct platform_device *pdev)
1289 return 0; 1289 return 0;
1290} 1290}
1291 1291
1292#ifdef CONFIG_PM_SLEEP 1292static int __maybe_unused exynos_mixer_suspend(struct device *dev)
1293static int exynos_mixer_suspend(struct device *dev)
1294{ 1293{
1295 struct mixer_context *ctx = dev_get_drvdata(dev); 1294 struct mixer_context *ctx = dev_get_drvdata(dev);
1296 struct mixer_resources *res = &ctx->mixer_res; 1295 struct mixer_resources *res = &ctx->mixer_res;
@@ -1306,7 +1305,7 @@ static int exynos_mixer_suspend(struct device *dev)
1306 return 0; 1305 return 0;
1307} 1306}
1308 1307
1309static int exynos_mixer_resume(struct device *dev) 1308static int __maybe_unused exynos_mixer_resume(struct device *dev)
1310{ 1309{
1311 struct mixer_context *ctx = dev_get_drvdata(dev); 1310 struct mixer_context *ctx = dev_get_drvdata(dev);
1312 struct mixer_resources *res = &ctx->mixer_res; 1311 struct mixer_resources *res = &ctx->mixer_res;
@@ -1342,7 +1341,6 @@ static int exynos_mixer_resume(struct device *dev)
1342 1341
1343 return 0; 1342 return 0;
1344} 1343}
1345#endif
1346 1344
1347static const struct dev_pm_ops exynos_mixer_pm_ops = { 1345static const struct dev_pm_ops exynos_mixer_pm_ops = {
1348 SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL) 1346 SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL)
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 533d1e3d4a99..a02112ba1c3d 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -136,6 +136,7 @@ static bool adv7511_register_volatile(struct device *dev, unsigned int reg)
136 case ADV7511_REG_BKSV(3): 136 case ADV7511_REG_BKSV(3):
137 case ADV7511_REG_BKSV(4): 137 case ADV7511_REG_BKSV(4):
138 case ADV7511_REG_DDC_STATUS: 138 case ADV7511_REG_DDC_STATUS:
139 case ADV7511_REG_EDID_READ_CTRL:
139 case ADV7511_REG_BSTATUS(0): 140 case ADV7511_REG_BSTATUS(0):
140 case ADV7511_REG_BSTATUS(1): 141 case ADV7511_REG_BSTATUS(1):
141 case ADV7511_REG_CHIP_ID_HIGH: 142 case ADV7511_REG_CHIP_ID_HIGH:
@@ -362,24 +363,31 @@ static void adv7511_power_on(struct adv7511 *adv7511)
362{ 363{
363 adv7511->current_edid_segment = -1; 364 adv7511->current_edid_segment = -1;
364 365
365 regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
366 ADV7511_INT0_EDID_READY);
367 regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
368 ADV7511_INT1_DDC_ERROR);
369 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, 366 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
370 ADV7511_POWER_POWER_DOWN, 0); 367 ADV7511_POWER_POWER_DOWN, 0);
368 if (adv7511->i2c_main->irq) {
369 /*
370 * Documentation says the INT_ENABLE registers are reset in
371 * POWER_DOWN mode. My 7511w preserved the bits, however.
372 * Still, let's be safe and stick to the documentation.
373 */
374 regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
375 ADV7511_INT0_EDID_READY);
376 regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
377 ADV7511_INT1_DDC_ERROR);
378 }
371 379
372 /* 380 /*
373 * Per spec it is allowed to pulse the HDP signal to indicate that the 381 * Per spec it is allowed to pulse the HPD signal to indicate that the
374 * EDID information has changed. Some monitors do this when they wakeup 382 * EDID information has changed. Some monitors do this when they wakeup
375 * from standby or are enabled. When the HDP goes low the adv7511 is 383 * from standby or are enabled. When the HPD goes low the adv7511 is
376 * reset and the outputs are disabled which might cause the monitor to 384 * reset and the outputs are disabled which might cause the monitor to
377 * go to standby again. To avoid this we ignore the HDP pin for the 385 * go to standby again. To avoid this we ignore the HPD pin for the
378 * first few seconds after enabling the output. 386 * first few seconds after enabling the output.
379 */ 387 */
380 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, 388 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
381 ADV7511_REG_POWER2_HDP_SRC_MASK, 389 ADV7511_REG_POWER2_HPD_SRC_MASK,
382 ADV7511_REG_POWER2_HDP_SRC_NONE); 390 ADV7511_REG_POWER2_HPD_SRC_NONE);
383 391
384 /* 392 /*
385 * Most of the registers are reset during power down or when HPD is low. 393 * Most of the registers are reset during power down or when HPD is low.
@@ -413,9 +421,9 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
413 if (ret < 0) 421 if (ret < 0)
414 return false; 422 return false;
415 423
416 if (irq0 & ADV7511_INT0_HDP) { 424 if (irq0 & ADV7511_INT0_HPD) {
417 regmap_write(adv7511->regmap, ADV7511_REG_INT(0), 425 regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
418 ADV7511_INT0_HDP); 426 ADV7511_INT0_HPD);
419 return true; 427 return true;
420 } 428 }
421 429
@@ -438,7 +446,7 @@ static int adv7511_irq_process(struct adv7511 *adv7511)
438 regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0); 446 regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
439 regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1); 447 regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
440 448
441 if (irq0 & ADV7511_INT0_HDP && adv7511->encoder) 449 if (irq0 & ADV7511_INT0_HPD && adv7511->encoder)
442 drm_helper_hpd_irq_event(adv7511->encoder->dev); 450 drm_helper_hpd_irq_event(adv7511->encoder->dev);
443 451
444 if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) { 452 if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
@@ -567,12 +575,14 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
567 575
568 /* Reading the EDID only works if the device is powered */ 576 /* Reading the EDID only works if the device is powered */
569 if (!adv7511->powered) { 577 if (!adv7511->powered) {
570 regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
571 ADV7511_INT0_EDID_READY);
572 regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
573 ADV7511_INT1_DDC_ERROR);
574 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, 578 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
575 ADV7511_POWER_POWER_DOWN, 0); 579 ADV7511_POWER_POWER_DOWN, 0);
580 if (adv7511->i2c_main->irq) {
581 regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
582 ADV7511_INT0_EDID_READY);
583 regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
584 ADV7511_INT1_DDC_ERROR);
585 }
576 adv7511->current_edid_segment = -1; 586 adv7511->current_edid_segment = -1;
577 } 587 }
578 588
@@ -638,10 +648,10 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
638 if (adv7511->status == connector_status_connected) 648 if (adv7511->status == connector_status_connected)
639 status = connector_status_disconnected; 649 status = connector_status_disconnected;
640 } else { 650 } else {
641 /* Renable HDP sensing */ 651 /* Renable HPD sensing */
642 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, 652 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
643 ADV7511_REG_POWER2_HDP_SRC_MASK, 653 ADV7511_REG_POWER2_HPD_SRC_MASK,
644 ADV7511_REG_POWER2_HDP_SRC_BOTH); 654 ADV7511_REG_POWER2_HPD_SRC_BOTH);
645 } 655 }
646 656
647 adv7511->status = status; 657 adv7511->status = status;
diff --git a/drivers/gpu/drm/i2c/adv7511.h b/drivers/gpu/drm/i2c/adv7511.h
index 6599ed538426..38515b30cedf 100644
--- a/drivers/gpu/drm/i2c/adv7511.h
+++ b/drivers/gpu/drm/i2c/adv7511.h
@@ -90,7 +90,7 @@
90#define ADV7511_CSC_ENABLE BIT(7) 90#define ADV7511_CSC_ENABLE BIT(7)
91#define ADV7511_CSC_UPDATE_MODE BIT(5) 91#define ADV7511_CSC_UPDATE_MODE BIT(5)
92 92
93#define ADV7511_INT0_HDP BIT(7) 93#define ADV7511_INT0_HPD BIT(7)
94#define ADV7511_INT0_VSYNC BIT(5) 94#define ADV7511_INT0_VSYNC BIT(5)
95#define ADV7511_INT0_AUDIO_FIFO_FULL BIT(4) 95#define ADV7511_INT0_AUDIO_FIFO_FULL BIT(4)
96#define ADV7511_INT0_EDID_READY BIT(2) 96#define ADV7511_INT0_EDID_READY BIT(2)
@@ -157,11 +157,11 @@
157#define ADV7511_PACKET_ENABLE_SPARE2 BIT(1) 157#define ADV7511_PACKET_ENABLE_SPARE2 BIT(1)
158#define ADV7511_PACKET_ENABLE_SPARE1 BIT(0) 158#define ADV7511_PACKET_ENABLE_SPARE1 BIT(0)
159 159
160#define ADV7511_REG_POWER2_HDP_SRC_MASK 0xc0 160#define ADV7511_REG_POWER2_HPD_SRC_MASK 0xc0
161#define ADV7511_REG_POWER2_HDP_SRC_BOTH 0x00 161#define ADV7511_REG_POWER2_HPD_SRC_BOTH 0x00
162#define ADV7511_REG_POWER2_HDP_SRC_HDP 0x40 162#define ADV7511_REG_POWER2_HPD_SRC_HPD 0x40
163#define ADV7511_REG_POWER2_HDP_SRC_CEC 0x80 163#define ADV7511_REG_POWER2_HPD_SRC_CEC 0x80
164#define ADV7511_REG_POWER2_HDP_SRC_NONE 0xc0 164#define ADV7511_REG_POWER2_HPD_SRC_NONE 0xc0
165#define ADV7511_REG_POWER2_TDMS_ENABLE BIT(4) 165#define ADV7511_REG_POWER2_TDMS_ENABLE BIT(4)
166#define ADV7511_REG_POWER2_GATE_INPUT_CLK BIT(0) 166#define ADV7511_REG_POWER2_GATE_INPUT_CLK BIT(0)
167 167
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 34e38749a817..f8ee740c0e26 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1382,8 +1382,16 @@ static void tda998x_connector_destroy(struct drm_connector *connector)
1382 drm_connector_cleanup(connector); 1382 drm_connector_cleanup(connector);
1383} 1383}
1384 1384
1385static int tda998x_connector_dpms(struct drm_connector *connector, int mode)
1386{
1387 if (drm_core_check_feature(connector->dev, DRIVER_ATOMIC))
1388 return drm_atomic_helper_connector_dpms(connector, mode);
1389 else
1390 return drm_helper_connector_dpms(connector, mode);
1391}
1392
1385static const struct drm_connector_funcs tda998x_connector_funcs = { 1393static const struct drm_connector_funcs tda998x_connector_funcs = {
1386 .dpms = drm_atomic_helper_connector_dpms, 1394 .dpms = tda998x_connector_dpms,
1387 .reset = drm_atomic_helper_connector_reset, 1395 .reset = drm_atomic_helper_connector_reset,
1388 .fill_modes = drm_helper_probe_single_connector_modes, 1396 .fill_modes = drm_helper_probe_single_connector_modes,
1389 .detect = tda998x_connector_detect, 1397 .detect = tda998x_connector_detect,
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index fcd77b27514d..051eab33e4c7 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -10,7 +10,6 @@ config DRM_I915
10 # the shmem_readpage() which depends upon tmpfs 10 # the shmem_readpage() which depends upon tmpfs
11 select SHMEM 11 select SHMEM
12 select TMPFS 12 select TMPFS
13 select STOP_MACHINE
14 select DRM_KMS_HELPER 13 select DRM_KMS_HELPER
15 select DRM_PANEL 14 select DRM_PANEL
16 select DRM_MIPI_DSI 15 select DRM_MIPI_DSI
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0fc38bb7276c..cf39ed3133d6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -825,8 +825,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
825 } 825 }
826 826
827 for_each_pipe(dev_priv, pipe) { 827 for_each_pipe(dev_priv, pipe) {
828 if (!intel_display_power_is_enabled(dev_priv, 828 enum intel_display_power_domain power_domain;
829 POWER_DOMAIN_PIPE(pipe))) { 829
830 power_domain = POWER_DOMAIN_PIPE(pipe);
831 if (!intel_display_power_get_if_enabled(dev_priv,
832 power_domain)) {
830 seq_printf(m, "Pipe %c power disabled\n", 833 seq_printf(m, "Pipe %c power disabled\n",
831 pipe_name(pipe)); 834 pipe_name(pipe));
832 continue; 835 continue;
@@ -840,6 +843,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
840 seq_printf(m, "Pipe %c IER:\t%08x\n", 843 seq_printf(m, "Pipe %c IER:\t%08x\n",
841 pipe_name(pipe), 844 pipe_name(pipe),
842 I915_READ(GEN8_DE_PIPE_IER(pipe))); 845 I915_READ(GEN8_DE_PIPE_IER(pipe)));
846
847 intel_display_power_put(dev_priv, power_domain);
843 } 848 }
844 849
845 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 850 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
@@ -3985,6 +3990,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3985 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 3990 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3986 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, 3991 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
3987 pipe)); 3992 pipe));
3993 enum intel_display_power_domain power_domain;
3988 u32 val = 0; /* shut up gcc */ 3994 u32 val = 0; /* shut up gcc */
3989 int ret; 3995 int ret;
3990 3996
@@ -3995,7 +4001,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3995 if (pipe_crc->source && source) 4001 if (pipe_crc->source && source)
3996 return -EINVAL; 4002 return -EINVAL;
3997 4003
3998 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) { 4004 power_domain = POWER_DOMAIN_PIPE(pipe);
4005 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
3999 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); 4006 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
4000 return -EIO; 4007 return -EIO;
4001 } 4008 }
@@ -4012,7 +4019,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
4012 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val); 4019 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4013 4020
4014 if (ret != 0) 4021 if (ret != 0)
4015 return ret; 4022 goto out;
4016 4023
4017 /* none -> real source transition */ 4024 /* none -> real source transition */
4018 if (source) { 4025 if (source) {
@@ -4024,8 +4031,10 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
4024 entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR, 4031 entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
4025 sizeof(pipe_crc->entries[0]), 4032 sizeof(pipe_crc->entries[0]),
4026 GFP_KERNEL); 4033 GFP_KERNEL);
4027 if (!entries) 4034 if (!entries) {
4028 return -ENOMEM; 4035 ret = -ENOMEM;
4036 goto out;
4037 }
4029 4038
4030 /* 4039 /*
4031 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 4040 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
@@ -4081,7 +4090,12 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
4081 hsw_enable_ips(crtc); 4090 hsw_enable_ips(crtc);
4082 } 4091 }
4083 4092
4084 return 0; 4093 ret = 0;
4094
4095out:
4096 intel_display_power_put(dev_priv, power_domain);
4097
4098 return ret;
4085} 4099}
4086 4100
4087/* 4101/*
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3ac616d7363b..f357058c74d9 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -501,7 +501,9 @@ void intel_detect_pch(struct drm_device *dev)
501 WARN_ON(!IS_SKYLAKE(dev) && 501 WARN_ON(!IS_SKYLAKE(dev) &&
502 !IS_KABYLAKE(dev)); 502 !IS_KABYLAKE(dev));
503 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || 503 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
504 (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE)) { 504 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
505 pch->subsystem_vendor == 0x1af4 &&
506 pch->subsystem_device == 0x1100)) {
505 dev_priv->pch_type = intel_virt_detect_pch(dev); 507 dev_priv->pch_type = intel_virt_detect_pch(dev);
506 } else 508 } else
507 continue; 509 continue;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f0f75d7c0d94..b0847b915545 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -751,6 +751,7 @@ struct intel_csr {
751 uint32_t mmio_count; 751 uint32_t mmio_count;
752 i915_reg_t mmioaddr[8]; 752 i915_reg_t mmioaddr[8];
753 uint32_t mmiodata[8]; 753 uint32_t mmiodata[8];
754 uint32_t dc_state;
754}; 755};
755 756
756#define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 757#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -1988,6 +1989,9 @@ enum hdmi_force_audio {
1988#define I915_GTT_OFFSET_NONE ((u32)-1) 1989#define I915_GTT_OFFSET_NONE ((u32)-1)
1989 1990
1990struct drm_i915_gem_object_ops { 1991struct drm_i915_gem_object_ops {
1992 unsigned int flags;
1993#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
1994
1991 /* Interface between the GEM object and its backing storage. 1995 /* Interface between the GEM object and its backing storage.
1992 * get_pages() is called once prior to the use of the associated set 1996 * get_pages() is called once prior to the use of the associated set
1993 * of pages before to binding them into the GTT, and put_pages() is 1997 * of pages before to binding them into the GTT, and put_pages() is
@@ -2003,6 +2007,7 @@ struct drm_i915_gem_object_ops {
2003 */ 2007 */
2004 int (*get_pages)(struct drm_i915_gem_object *); 2008 int (*get_pages)(struct drm_i915_gem_object *);
2005 void (*put_pages)(struct drm_i915_gem_object *); 2009 void (*put_pages)(struct drm_i915_gem_object *);
2010
2006 int (*dmabuf_export)(struct drm_i915_gem_object *); 2011 int (*dmabuf_export)(struct drm_i915_gem_object *);
2007 void (*release)(struct drm_i915_gem_object *); 2012 void (*release)(struct drm_i915_gem_object *);
2008}; 2013};
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ddc21d4b388d..bb44bad15403 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4425,6 +4425,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
4425} 4425}
4426 4426
4427static const struct drm_i915_gem_object_ops i915_gem_object_ops = { 4427static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4428 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
4428 .get_pages = i915_gem_object_get_pages_gtt, 4429 .get_pages = i915_gem_object_get_pages_gtt,
4429 .put_pages = i915_gem_object_put_pages_gtt, 4430 .put_pages = i915_gem_object_put_pages_gtt,
4430}; 4431};
@@ -5261,7 +5262,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
5261 struct page *page; 5262 struct page *page;
5262 5263
5263 /* Only default objects have per-page dirty tracking */ 5264 /* Only default objects have per-page dirty tracking */
5264 if (WARN_ON(obj->ops != &i915_gem_object_ops)) 5265 if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
5265 return NULL; 5266 return NULL;
5266 5267
5267 page = i915_gem_object_get_page(obj, n); 5268 page = i915_gem_object_get_page(obj, n);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 19fb0bddc1cd..59e45b3a6937 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -789,9 +789,10 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
789} 789}
790 790
791static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { 791static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
792 .dmabuf_export = i915_gem_userptr_dmabuf_export, 792 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
793 .get_pages = i915_gem_userptr_get_pages, 793 .get_pages = i915_gem_userptr_get_pages,
794 .put_pages = i915_gem_userptr_put_pages, 794 .put_pages = i915_gem_userptr_put_pages,
795 .dmabuf_export = i915_gem_userptr_dmabuf_export,
795 .release = i915_gem_userptr_release, 796 .release = i915_gem_userptr_release,
796}; 797};
797 798
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 007ae83a4086..4897728713f6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3287,19 +3287,20 @@ enum skl_disp_power_wells {
3287 3287
3288#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114) 3288#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114)
3289/* 3289/*
3290 * HDMI/DP bits are gen4+ 3290 * HDMI/DP bits are g4x+
3291 * 3291 *
3292 * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused. 3292 * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
3293 * Please check the detailed lore in the commit message for for experimental 3293 * Please check the detailed lore in the commit message for for experimental
3294 * evidence. 3294 * evidence.
3295 */ 3295 */
3296#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29) 3296/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */
3297#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29)
3298#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28)
3299#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27)
3300/* G4X/VLV/CHV DP/HDMI bits again match Bspec */
3301#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
3297#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28) 3302#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
3298#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27) 3303#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
3299/* VLV DP/HDMI bits again match Bspec */
3300#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
3301#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
3302#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
3303#define PORTD_HOTPLUG_INT_STATUS (3 << 21) 3304#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
3304#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21) 3305#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
3305#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21) 3306#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
@@ -7514,7 +7515,7 @@ enum skl_disp_power_wells {
7514#define DPLL_CFGCR2_PDIV_7 (4<<2) 7515#define DPLL_CFGCR2_PDIV_7 (4<<2)
7515#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3) 7516#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
7516 7517
7517#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR2) 7518#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
7518#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2) 7519#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
7519 7520
7520/* BXT display engine PLL */ 7521/* BXT display engine PLL */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a2aa09ce3202..a8af594fbd00 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -49,7 +49,7 @@ static void i915_save_display(struct drm_device *dev)
49 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); 49 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
50 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); 50 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
51 dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); 51 dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
52 } else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { 52 } else if (INTEL_INFO(dev)->gen <= 4) {
53 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); 53 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
54 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); 54 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
55 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); 55 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
@@ -84,7 +84,7 @@ static void i915_restore_display(struct drm_device *dev)
84 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); 84 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
85 I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); 85 I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
86 I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); 86 I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
87 } else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { 87 } else if (INTEL_INFO(dev)->gen <= 4) {
88 I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); 88 I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
89 I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); 89 I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
90 I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); 90 I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9c89df1af036..a7b4a524fadd 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -71,22 +71,29 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
71 struct intel_crt *crt = intel_encoder_to_crt(encoder); 71 struct intel_crt *crt = intel_encoder_to_crt(encoder);
72 enum intel_display_power_domain power_domain; 72 enum intel_display_power_domain power_domain;
73 u32 tmp; 73 u32 tmp;
74 bool ret;
74 75
75 power_domain = intel_display_port_power_domain(encoder); 76 power_domain = intel_display_port_power_domain(encoder);
76 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 77 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
77 return false; 78 return false;
78 79
80 ret = false;
81
79 tmp = I915_READ(crt->adpa_reg); 82 tmp = I915_READ(crt->adpa_reg);
80 83
81 if (!(tmp & ADPA_DAC_ENABLE)) 84 if (!(tmp & ADPA_DAC_ENABLE))
82 return false; 85 goto out;
83 86
84 if (HAS_PCH_CPT(dev)) 87 if (HAS_PCH_CPT(dev))
85 *pipe = PORT_TO_PIPE_CPT(tmp); 88 *pipe = PORT_TO_PIPE_CPT(tmp);
86 else 89 else
87 *pipe = PORT_TO_PIPE(tmp); 90 *pipe = PORT_TO_PIPE(tmp);
88 91
89 return true; 92 ret = true;
93out:
94 intel_display_power_put(dev_priv, power_domain);
95
96 return ret;
90} 97}
91 98
92static unsigned int intel_crt_get_flags(struct intel_encoder *encoder) 99static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 9bb63a85997a..647d85e77c2f 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -240,6 +240,8 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
240 I915_WRITE(dev_priv->csr.mmioaddr[i], 240 I915_WRITE(dev_priv->csr.mmioaddr[i],
241 dev_priv->csr.mmiodata[i]); 241 dev_priv->csr.mmiodata[i]);
242 } 242 }
243
244 dev_priv->csr.dc_state = 0;
243} 245}
244 246
245static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, 247static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index e6408e5583d7..0f3df2c39f7c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1589,7 +1589,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
1589 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | 1589 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1590 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | 1590 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1591 wrpll_params.central_freq; 1591 wrpll_params.central_freq;
1592 } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { 1592 } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
1593 intel_encoder->type == INTEL_OUTPUT_DP_MST) {
1593 switch (crtc_state->port_clock / 2) { 1594 switch (crtc_state->port_clock / 2) {
1594 case 81000: 1595 case 81000:
1595 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0); 1596 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
@@ -1968,13 +1969,16 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
1968 enum transcoder cpu_transcoder; 1969 enum transcoder cpu_transcoder;
1969 enum intel_display_power_domain power_domain; 1970 enum intel_display_power_domain power_domain;
1970 uint32_t tmp; 1971 uint32_t tmp;
1972 bool ret;
1971 1973
1972 power_domain = intel_display_port_power_domain(intel_encoder); 1974 power_domain = intel_display_port_power_domain(intel_encoder);
1973 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 1975 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
1974 return false; 1976 return false;
1975 1977
1976 if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) 1978 if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) {
1977 return false; 1979 ret = false;
1980 goto out;
1981 }
1978 1982
1979 if (port == PORT_A) 1983 if (port == PORT_A)
1980 cpu_transcoder = TRANSCODER_EDP; 1984 cpu_transcoder = TRANSCODER_EDP;
@@ -1986,23 +1990,33 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
1986 switch (tmp & TRANS_DDI_MODE_SELECT_MASK) { 1990 switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
1987 case TRANS_DDI_MODE_SELECT_HDMI: 1991 case TRANS_DDI_MODE_SELECT_HDMI:
1988 case TRANS_DDI_MODE_SELECT_DVI: 1992 case TRANS_DDI_MODE_SELECT_DVI:
1989 return (type == DRM_MODE_CONNECTOR_HDMIA); 1993 ret = type == DRM_MODE_CONNECTOR_HDMIA;
1994 break;
1990 1995
1991 case TRANS_DDI_MODE_SELECT_DP_SST: 1996 case TRANS_DDI_MODE_SELECT_DP_SST:
1992 if (type == DRM_MODE_CONNECTOR_eDP) 1997 ret = type == DRM_MODE_CONNECTOR_eDP ||
1993 return true; 1998 type == DRM_MODE_CONNECTOR_DisplayPort;
1994 return (type == DRM_MODE_CONNECTOR_DisplayPort); 1999 break;
2000
1995 case TRANS_DDI_MODE_SELECT_DP_MST: 2001 case TRANS_DDI_MODE_SELECT_DP_MST:
1996 /* if the transcoder is in MST state then 2002 /* if the transcoder is in MST state then
1997 * connector isn't connected */ 2003 * connector isn't connected */
1998 return false; 2004 ret = false;
2005 break;
1999 2006
2000 case TRANS_DDI_MODE_SELECT_FDI: 2007 case TRANS_DDI_MODE_SELECT_FDI:
2001 return (type == DRM_MODE_CONNECTOR_VGA); 2008 ret = type == DRM_MODE_CONNECTOR_VGA;
2009 break;
2002 2010
2003 default: 2011 default:
2004 return false; 2012 ret = false;
2013 break;
2005 } 2014 }
2015
2016out:
2017 intel_display_power_put(dev_priv, power_domain);
2018
2019 return ret;
2006} 2020}
2007 2021
2008bool intel_ddi_get_hw_state(struct intel_encoder *encoder, 2022bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -2014,15 +2028,18 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
2014 enum intel_display_power_domain power_domain; 2028 enum intel_display_power_domain power_domain;
2015 u32 tmp; 2029 u32 tmp;
2016 int i; 2030 int i;
2031 bool ret;
2017 2032
2018 power_domain = intel_display_port_power_domain(encoder); 2033 power_domain = intel_display_port_power_domain(encoder);
2019 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 2034 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2020 return false; 2035 return false;
2021 2036
2037 ret = false;
2038
2022 tmp = I915_READ(DDI_BUF_CTL(port)); 2039 tmp = I915_READ(DDI_BUF_CTL(port));
2023 2040
2024 if (!(tmp & DDI_BUF_CTL_ENABLE)) 2041 if (!(tmp & DDI_BUF_CTL_ENABLE))
2025 return false; 2042 goto out;
2026 2043
2027 if (port == PORT_A) { 2044 if (port == PORT_A) {
2028 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 2045 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -2040,25 +2057,32 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
2040 break; 2057 break;
2041 } 2058 }
2042 2059
2043 return true; 2060 ret = true;
2044 } else {
2045 for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
2046 tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
2047 2061
2048 if ((tmp & TRANS_DDI_PORT_MASK) 2062 goto out;
2049 == TRANS_DDI_SELECT_PORT(port)) { 2063 }
2050 if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST)
2051 return false;
2052 2064
2053 *pipe = i; 2065 for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
2054 return true; 2066 tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
2055 } 2067
2068 if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
2069 if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
2070 TRANS_DDI_MODE_SELECT_DP_MST)
2071 goto out;
2072
2073 *pipe = i;
2074 ret = true;
2075
2076 goto out;
2056 } 2077 }
2057 } 2078 }
2058 2079
2059 DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port)); 2080 DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
2060 2081
2061 return false; 2082out:
2083 intel_display_power_put(dev_priv, power_domain);
2084
2085 return ret;
2062} 2086}
2063 2087
2064void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) 2088void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
@@ -2507,12 +2531,14 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
2507{ 2531{
2508 uint32_t val; 2532 uint32_t val;
2509 2533
2510 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 2534 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
2511 return false; 2535 return false;
2512 2536
2513 val = I915_READ(WRPLL_CTL(pll->id)); 2537 val = I915_READ(WRPLL_CTL(pll->id));
2514 hw_state->wrpll = val; 2538 hw_state->wrpll = val;
2515 2539
2540 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2541
2516 return val & WRPLL_PLL_ENABLE; 2542 return val & WRPLL_PLL_ENABLE;
2517} 2543}
2518 2544
@@ -2522,12 +2548,14 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
2522{ 2548{
2523 uint32_t val; 2549 uint32_t val;
2524 2550
2525 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 2551 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
2526 return false; 2552 return false;
2527 2553
2528 val = I915_READ(SPLL_CTL); 2554 val = I915_READ(SPLL_CTL);
2529 hw_state->spll = val; 2555 hw_state->spll = val;
2530 2556
2557 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2558
2531 return val & SPLL_PLL_ENABLE; 2559 return val & SPLL_PLL_ENABLE;
2532} 2560}
2533 2561
@@ -2644,16 +2672,19 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2644 uint32_t val; 2672 uint32_t val;
2645 unsigned int dpll; 2673 unsigned int dpll;
2646 const struct skl_dpll_regs *regs = skl_dpll_regs; 2674 const struct skl_dpll_regs *regs = skl_dpll_regs;
2675 bool ret;
2647 2676
2648 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 2677 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
2649 return false; 2678 return false;
2650 2679
2680 ret = false;
2681
2651 /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */ 2682 /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
2652 dpll = pll->id + 1; 2683 dpll = pll->id + 1;
2653 2684
2654 val = I915_READ(regs[pll->id].ctl); 2685 val = I915_READ(regs[pll->id].ctl);
2655 if (!(val & LCPLL_PLL_ENABLE)) 2686 if (!(val & LCPLL_PLL_ENABLE))
2656 return false; 2687 goto out;
2657 2688
2658 val = I915_READ(DPLL_CTRL1); 2689 val = I915_READ(DPLL_CTRL1);
2659 hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f; 2690 hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
@@ -2663,8 +2694,12 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2663 hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1); 2694 hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
2664 hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2); 2695 hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
2665 } 2696 }
2697 ret = true;
2666 2698
2667 return true; 2699out:
2700 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2701
2702 return ret;
2668} 2703}
2669 2704
2670static void skl_shared_dplls_init(struct drm_i915_private *dev_priv) 2705static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
@@ -2931,13 +2966,16 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2931{ 2966{
2932 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ 2967 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
2933 uint32_t val; 2968 uint32_t val;
2969 bool ret;
2934 2970
2935 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 2971 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
2936 return false; 2972 return false;
2937 2973
2974 ret = false;
2975
2938 val = I915_READ(BXT_PORT_PLL_ENABLE(port)); 2976 val = I915_READ(BXT_PORT_PLL_ENABLE(port));
2939 if (!(val & PORT_PLL_ENABLE)) 2977 if (!(val & PORT_PLL_ENABLE))
2940 return false; 2978 goto out;
2941 2979
2942 hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port)); 2980 hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
2943 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK; 2981 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
@@ -2984,7 +3022,12 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2984 I915_READ(BXT_PORT_PCS_DW12_LN23(port))); 3022 I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
2985 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD; 3023 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2986 3024
2987 return true; 3025 ret = true;
3026
3027out:
3028 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
3029
3030 return ret;
2988} 3031}
2989 3032
2990static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv) 3033static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv)
@@ -3119,11 +3162,15 @@ bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
3119{ 3162{
3120 u32 temp; 3163 u32 temp;
3121 3164
3122 if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) { 3165 if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
3123 temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); 3166 temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
3167
3168 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
3169
3124 if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe)) 3170 if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
3125 return true; 3171 return true;
3126 } 3172 }
3173
3127 return false; 3174 return false;
3128} 3175}
3129 3176
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2f00828ccc6e..46947fffd599 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1351,18 +1351,21 @@ void assert_pipe(struct drm_i915_private *dev_priv,
1351 bool cur_state; 1351 bool cur_state;
1352 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1352 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1353 pipe); 1353 pipe);
1354 enum intel_display_power_domain power_domain;
1354 1355
1355 /* if we need the pipe quirk it must be always on */ 1356 /* if we need the pipe quirk it must be always on */
1356 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1357 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1357 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1358 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1358 state = true; 1359 state = true;
1359 1360
1360 if (!intel_display_power_is_enabled(dev_priv, 1361 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1361 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { 1362 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1362 cur_state = false;
1363 } else {
1364 u32 val = I915_READ(PIPECONF(cpu_transcoder)); 1363 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1365 cur_state = !!(val & PIPECONF_ENABLE); 1364 cur_state = !!(val & PIPECONF_ENABLE);
1365
1366 intel_display_power_put(dev_priv, power_domain);
1367 } else {
1368 cur_state = false;
1366 } 1369 }
1367 1370
1368 I915_STATE_WARN(cur_state != state, 1371 I915_STATE_WARN(cur_state != state,
@@ -2946,7 +2949,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2946 struct i915_vma *vma; 2949 struct i915_vma *vma;
2947 u64 offset; 2950 u64 offset;
2948 2951
2949 intel_fill_fb_ggtt_view(&view, intel_plane->base.fb, 2952 intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
2950 intel_plane->base.state); 2953 intel_plane->base.state);
2951 2954
2952 vma = i915_gem_obj_to_ggtt_view(obj, &view); 2955 vma = i915_gem_obj_to_ggtt_view(obj, &view);
@@ -8171,18 +8174,22 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8171{ 8174{
8172 struct drm_device *dev = crtc->base.dev; 8175 struct drm_device *dev = crtc->base.dev;
8173 struct drm_i915_private *dev_priv = dev->dev_private; 8176 struct drm_i915_private *dev_priv = dev->dev_private;
8177 enum intel_display_power_domain power_domain;
8174 uint32_t tmp; 8178 uint32_t tmp;
8179 bool ret;
8175 8180
8176 if (!intel_display_power_is_enabled(dev_priv, 8181 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8177 POWER_DOMAIN_PIPE(crtc->pipe))) 8182 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8178 return false; 8183 return false;
8179 8184
8180 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8185 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8181 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 8186 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8182 8187
8188 ret = false;
8189
8183 tmp = I915_READ(PIPECONF(crtc->pipe)); 8190 tmp = I915_READ(PIPECONF(crtc->pipe));
8184 if (!(tmp & PIPECONF_ENABLE)) 8191 if (!(tmp & PIPECONF_ENABLE))
8185 return false; 8192 goto out;
8186 8193
8187 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 8194 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
8188 switch (tmp & PIPECONF_BPC_MASK) { 8195 switch (tmp & PIPECONF_BPC_MASK) {
@@ -8262,7 +8269,12 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8262 pipe_config->base.adjusted_mode.crtc_clock = 8269 pipe_config->base.adjusted_mode.crtc_clock =
8263 pipe_config->port_clock / pipe_config->pixel_multiplier; 8270 pipe_config->port_clock / pipe_config->pixel_multiplier;
8264 8271
8265 return true; 8272 ret = true;
8273
8274out:
8275 intel_display_power_put(dev_priv, power_domain);
8276
8277 return ret;
8266} 8278}
8267 8279
8268static void ironlake_init_pch_refclk(struct drm_device *dev) 8280static void ironlake_init_pch_refclk(struct drm_device *dev)
@@ -9366,18 +9378,21 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9366{ 9378{
9367 struct drm_device *dev = crtc->base.dev; 9379 struct drm_device *dev = crtc->base.dev;
9368 struct drm_i915_private *dev_priv = dev->dev_private; 9380 struct drm_i915_private *dev_priv = dev->dev_private;
9381 enum intel_display_power_domain power_domain;
9369 uint32_t tmp; 9382 uint32_t tmp;
9383 bool ret;
9370 9384
9371 if (!intel_display_power_is_enabled(dev_priv, 9385 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9372 POWER_DOMAIN_PIPE(crtc->pipe))) 9386 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9373 return false; 9387 return false;
9374 9388
9375 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9389 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9376 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 9390 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9377 9391
9392 ret = false;
9378 tmp = I915_READ(PIPECONF(crtc->pipe)); 9393 tmp = I915_READ(PIPECONF(crtc->pipe));
9379 if (!(tmp & PIPECONF_ENABLE)) 9394 if (!(tmp & PIPECONF_ENABLE))
9380 return false; 9395 goto out;
9381 9396
9382 switch (tmp & PIPECONF_BPC_MASK) { 9397 switch (tmp & PIPECONF_BPC_MASK) {
9383 case PIPECONF_6BPC: 9398 case PIPECONF_6BPC:
@@ -9440,7 +9455,12 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9440 9455
9441 ironlake_get_pfit_config(crtc, pipe_config); 9456 ironlake_get_pfit_config(crtc, pipe_config);
9442 9457
9443 return true; 9458 ret = true;
9459
9460out:
9461 intel_display_power_put(dev_priv, power_domain);
9462
9463 return ret;
9444} 9464}
9445 9465
9446static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 9466static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
@@ -9950,12 +9970,17 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9950{ 9970{
9951 struct drm_device *dev = crtc->base.dev; 9971 struct drm_device *dev = crtc->base.dev;
9952 struct drm_i915_private *dev_priv = dev->dev_private; 9972 struct drm_i915_private *dev_priv = dev->dev_private;
9953 enum intel_display_power_domain pfit_domain; 9973 enum intel_display_power_domain power_domain;
9974 unsigned long power_domain_mask;
9954 uint32_t tmp; 9975 uint32_t tmp;
9976 bool ret;
9955 9977
9956 if (!intel_display_power_is_enabled(dev_priv, 9978 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9957 POWER_DOMAIN_PIPE(crtc->pipe))) 9979 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9958 return false; 9980 return false;
9981 power_domain_mask = BIT(power_domain);
9982
9983 ret = false;
9959 9984
9960 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9985 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9961 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 9986 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -9982,13 +10007,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9982 pipe_config->cpu_transcoder = TRANSCODER_EDP; 10007 pipe_config->cpu_transcoder = TRANSCODER_EDP;
9983 } 10008 }
9984 10009
9985 if (!intel_display_power_is_enabled(dev_priv, 10010 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9986 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 10011 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9987 return false; 10012 goto out;
10013 power_domain_mask |= BIT(power_domain);
9988 10014
9989 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 10015 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9990 if (!(tmp & PIPECONF_ENABLE)) 10016 if (!(tmp & PIPECONF_ENABLE))
9991 return false; 10017 goto out;
9992 10018
9993 haswell_get_ddi_port_state(crtc, pipe_config); 10019 haswell_get_ddi_port_state(crtc, pipe_config);
9994 10020
@@ -9998,14 +10024,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9998 skl_init_scalers(dev, crtc, pipe_config); 10024 skl_init_scalers(dev, crtc, pipe_config);
9999 } 10025 }
10000 10026
10001 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10002
10003 if (INTEL_INFO(dev)->gen >= 9) { 10027 if (INTEL_INFO(dev)->gen >= 9) {
10004 pipe_config->scaler_state.scaler_id = -1; 10028 pipe_config->scaler_state.scaler_id = -1;
10005 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); 10029 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
10006 } 10030 }
10007 10031
10008 if (intel_display_power_is_enabled(dev_priv, pfit_domain)) { 10032 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10033 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
10034 power_domain_mask |= BIT(power_domain);
10009 if (INTEL_INFO(dev)->gen >= 9) 10035 if (INTEL_INFO(dev)->gen >= 9)
10010 skylake_get_pfit_config(crtc, pipe_config); 10036 skylake_get_pfit_config(crtc, pipe_config);
10011 else 10037 else
@@ -10023,7 +10049,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10023 pipe_config->pixel_multiplier = 1; 10049 pipe_config->pixel_multiplier = 1;
10024 } 10050 }
10025 10051
10026 return true; 10052 ret = true;
10053
10054out:
10055 for_each_power_domain(power_domain, power_domain_mask)
10056 intel_display_power_put(dev_priv, power_domain);
10057
10058 return ret;
10027} 10059}
10028 10060
10029static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on) 10061static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
@@ -12075,11 +12107,21 @@ connected_sink_compute_bpp(struct intel_connector *connector,
12075 pipe_config->pipe_bpp = connector->base.display_info.bpc*3; 12107 pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
12076 } 12108 }
12077 12109
12078 /* Clamp bpp to 8 on screens without EDID 1.4 */ 12110 /* Clamp bpp to default limit on screens without EDID 1.4 */
12079 if (connector->base.display_info.bpc == 0 && bpp > 24) { 12111 if (connector->base.display_info.bpc == 0) {
12080 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", 12112 int type = connector->base.connector_type;
12081 bpp); 12113 int clamp_bpp = 24;
12082 pipe_config->pipe_bpp = 24; 12114
12115 /* Fall back to 18 bpp when DP sink capability is unknown. */
12116 if (type == DRM_MODE_CONNECTOR_DisplayPort ||
12117 type == DRM_MODE_CONNECTOR_eDP)
12118 clamp_bpp = 18;
12119
12120 if (bpp > clamp_bpp) {
12121 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
12122 bpp, clamp_bpp);
12123 pipe_config->pipe_bpp = clamp_bpp;
12124 }
12083 } 12125 }
12084} 12126}
12085 12127
@@ -13620,7 +13662,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
13620{ 13662{
13621 uint32_t val; 13663 uint32_t val;
13622 13664
13623 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 13665 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
13624 return false; 13666 return false;
13625 13667
13626 val = I915_READ(PCH_DPLL(pll->id)); 13668 val = I915_READ(PCH_DPLL(pll->id));
@@ -13628,6 +13670,8 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
13628 hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); 13670 hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
13629 hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); 13671 hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
13630 13672
13673 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
13674
13631 return val & DPLL_VCO_ENABLE; 13675 return val & DPLL_VCO_ENABLE;
13632} 13676}
13633 13677
@@ -13883,11 +13927,12 @@ intel_check_primary_plane(struct drm_plane *plane,
13883 int max_scale = DRM_PLANE_HELPER_NO_SCALING; 13927 int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13884 bool can_position = false; 13928 bool can_position = false;
13885 13929
13886 /* use scaler when colorkey is not required */ 13930 if (INTEL_INFO(plane->dev)->gen >= 9) {
13887 if (INTEL_INFO(plane->dev)->gen >= 9 && 13931 /* use scaler when colorkey is not required */
13888 state->ckey.flags == I915_SET_COLORKEY_NONE) { 13932 if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
13889 min_scale = 1; 13933 min_scale = 1;
13890 max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); 13934 max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
13935 }
13891 can_position = true; 13936 can_position = true;
13892 } 13937 }
13893 13938
@@ -15557,10 +15602,12 @@ void i915_redisable_vga(struct drm_device *dev)
15557 * level, just check if the power well is enabled instead of trying to 15602 * level, just check if the power well is enabled instead of trying to
15558 * follow the "don't touch the power well if we don't need it" policy 15603 * follow the "don't touch the power well if we don't need it" policy
15559 * the rest of the driver uses. */ 15604 * the rest of the driver uses. */
15560 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA)) 15605 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15561 return; 15606 return;
15562 15607
15563 i915_redisable_vga_power_on(dev); 15608 i915_redisable_vga_power_on(dev);
15609
15610 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15564} 15611}
15565 15612
15566static bool primary_get_hw_state(struct intel_plane *plane) 15613static bool primary_get_hw_state(struct intel_plane *plane)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 796e3d313cb9..1d8de43bed56 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2362,15 +2362,18 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2362 struct drm_i915_private *dev_priv = dev->dev_private; 2362 struct drm_i915_private *dev_priv = dev->dev_private;
2363 enum intel_display_power_domain power_domain; 2363 enum intel_display_power_domain power_domain;
2364 u32 tmp; 2364 u32 tmp;
2365 bool ret;
2365 2366
2366 power_domain = intel_display_port_power_domain(encoder); 2367 power_domain = intel_display_port_power_domain(encoder);
2367 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 2368 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2368 return false; 2369 return false;
2369 2370
2371 ret = false;
2372
2370 tmp = I915_READ(intel_dp->output_reg); 2373 tmp = I915_READ(intel_dp->output_reg);
2371 2374
2372 if (!(tmp & DP_PORT_EN)) 2375 if (!(tmp & DP_PORT_EN))
2373 return false; 2376 goto out;
2374 2377
2375 if (IS_GEN7(dev) && port == PORT_A) { 2378 if (IS_GEN7(dev) && port == PORT_A) {
2376 *pipe = PORT_TO_PIPE_CPT(tmp); 2379 *pipe = PORT_TO_PIPE_CPT(tmp);
@@ -2381,7 +2384,9 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2381 u32 trans_dp = I915_READ(TRANS_DP_CTL(p)); 2384 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2382 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) { 2385 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2383 *pipe = p; 2386 *pipe = p;
2384 return true; 2387 ret = true;
2388
2389 goto out;
2385 } 2390 }
2386 } 2391 }
2387 2392
@@ -2393,7 +2398,12 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2393 *pipe = PORT_TO_PIPE(tmp); 2398 *pipe = PORT_TO_PIPE(tmp);
2394 } 2399 }
2395 2400
2396 return true; 2401 ret = true;
2402
2403out:
2404 intel_display_power_put(dev_priv, power_domain);
2405
2406 return ret;
2397} 2407}
2398 2408
2399static void intel_dp_get_config(struct intel_encoder *encoder, 2409static void intel_dp_get_config(struct intel_encoder *encoder,
@@ -4493,20 +4503,20 @@ static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4493 return I915_READ(PORT_HOTPLUG_STAT) & bit; 4503 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4494} 4504}
4495 4505
4496static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv, 4506static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4497 struct intel_digital_port *port) 4507 struct intel_digital_port *port)
4498{ 4508{
4499 u32 bit; 4509 u32 bit;
4500 4510
4501 switch (port->port) { 4511 switch (port->port) {
4502 case PORT_B: 4512 case PORT_B:
4503 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV; 4513 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4504 break; 4514 break;
4505 case PORT_C: 4515 case PORT_C:
4506 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV; 4516 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4507 break; 4517 break;
4508 case PORT_D: 4518 case PORT_D:
4509 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; 4519 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4510 break; 4520 break;
4511 default: 4521 default:
4512 MISSING_CASE(port->port); 4522 MISSING_CASE(port->port);
@@ -4558,8 +4568,8 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4558 return cpt_digital_port_connected(dev_priv, port); 4568 return cpt_digital_port_connected(dev_priv, port);
4559 else if (IS_BROXTON(dev_priv)) 4569 else if (IS_BROXTON(dev_priv))
4560 return bxt_digital_port_connected(dev_priv, port); 4570 return bxt_digital_port_connected(dev_priv, port);
4561 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4571 else if (IS_GM45(dev_priv))
4562 return vlv_digital_port_connected(dev_priv, port); 4572 return gm45_digital_port_connected(dev_priv, port);
4563 else 4573 else
4564 return g4x_digital_port_connected(dev_priv, port); 4574 return g4x_digital_port_connected(dev_priv, port);
4565} 4575}
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 88887938e0bf..0b8eefc2acc5 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -215,27 +215,46 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
215 } 215 }
216} 216}
217 217
218static void 218/*
219intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) 219 * Pick training pattern for channel equalization. Training Pattern 3 for HBR2
220 * or 1.2 devices that support it, Training Pattern 2 otherwise.
221 */
222static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
220{ 223{
221 bool channel_eq = false; 224 u32 training_pattern = DP_TRAINING_PATTERN_2;
222 int tries, cr_tries; 225 bool source_tps3, sink_tps3;
223 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
224 226
225 /* 227 /*
226 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
227 *
228 * Intel platforms that support HBR2 also support TPS3. TPS3 support is 228 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
229 * also mandatory for downstream devices that support HBR2. 229 * also mandatory for downstream devices that support HBR2. However, not
230 * all sinks follow the spec.
230 * 231 *
231 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is 232 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
232 * supported but still not enabled. 233 * supported in source but still not enabled.
233 */ 234 */
234 if (intel_dp_source_supports_hbr2(intel_dp) && 235 source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
235 drm_dp_tps3_supported(intel_dp->dpcd)) 236 sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
237
238 if (source_tps3 && sink_tps3) {
236 training_pattern = DP_TRAINING_PATTERN_3; 239 training_pattern = DP_TRAINING_PATTERN_3;
237 else if (intel_dp->link_rate == 540000) 240 } else if (intel_dp->link_rate == 540000) {
238 DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n"); 241 if (!source_tps3)
242 DRM_DEBUG_KMS("5.4 Gbps link rate without source HBR2/TPS3 support\n");
243 if (!sink_tps3)
244 DRM_DEBUG_KMS("5.4 Gbps link rate without sink TPS3 support\n");
245 }
246
247 return training_pattern;
248}
249
250static void
251intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
252{
253 bool channel_eq = false;
254 int tries, cr_tries;
255 u32 training_pattern;
256
257 training_pattern = intel_dp_training_pattern(intel_dp);
239 258
240 /* channel equalization */ 259 /* channel equalization */
241 if (!intel_dp_set_link_train(intel_dp, 260 if (!intel_dp_set_link_train(intel_dp,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ea5415851c6e..df7f3cb66056 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1428,6 +1428,8 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
1428 enum intel_display_power_domain domain); 1428 enum intel_display_power_domain domain);
1429void intel_display_power_get(struct drm_i915_private *dev_priv, 1429void intel_display_power_get(struct drm_i915_private *dev_priv,
1430 enum intel_display_power_domain domain); 1430 enum intel_display_power_domain domain);
1431bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1432 enum intel_display_power_domain domain);
1431void intel_display_power_put(struct drm_i915_private *dev_priv, 1433void intel_display_power_put(struct drm_i915_private *dev_priv,
1432 enum intel_display_power_domain domain); 1434 enum intel_display_power_domain domain);
1433 1435
@@ -1514,6 +1516,7 @@ enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
1514 enable_rpm_wakeref_asserts(dev_priv) 1516 enable_rpm_wakeref_asserts(dev_priv)
1515 1517
1516void intel_runtime_pm_get(struct drm_i915_private *dev_priv); 1518void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
1519bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
1517void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); 1520void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
1518void intel_runtime_pm_put(struct drm_i915_private *dev_priv); 1521void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
1519 1522
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 44742fa2f616..0193c62a53ef 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -664,13 +664,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
664 struct drm_device *dev = encoder->base.dev; 664 struct drm_device *dev = encoder->base.dev;
665 enum intel_display_power_domain power_domain; 665 enum intel_display_power_domain power_domain;
666 enum port port; 666 enum port port;
667 bool ret;
667 668
668 DRM_DEBUG_KMS("\n"); 669 DRM_DEBUG_KMS("\n");
669 670
670 power_domain = intel_display_port_power_domain(encoder); 671 power_domain = intel_display_port_power_domain(encoder);
671 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 672 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
672 return false; 673 return false;
673 674
675 ret = false;
676
674 /* XXX: this only works for one DSI output */ 677 /* XXX: this only works for one DSI output */
675 for_each_dsi_port(port, intel_dsi->ports) { 678 for_each_dsi_port(port, intel_dsi->ports) {
676 i915_reg_t ctrl_reg = IS_BROXTON(dev) ? 679 i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
@@ -691,12 +694,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
691 if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) { 694 if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
692 if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) { 695 if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
693 *pipe = port == PORT_A ? PIPE_A : PIPE_B; 696 *pipe = port == PORT_A ? PIPE_A : PIPE_B;
694 return true; 697 ret = true;
698
699 goto out;
695 } 700 }
696 } 701 }
697 } 702 }
703out:
704 intel_display_power_put(dev_priv, power_domain);
698 705
699 return false; 706 return ret;
700} 707}
701 708
702static void intel_dsi_get_config(struct intel_encoder *encoder, 709static void intel_dsi_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index a5e99ac305da..e8113ad65477 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -204,10 +204,28 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
204 struct drm_device *dev = intel_dsi->base.base.dev; 204 struct drm_device *dev = intel_dsi->base.base.dev;
205 struct drm_i915_private *dev_priv = dev->dev_private; 205 struct drm_i915_private *dev_priv = dev->dev_private;
206 206
207 if (dev_priv->vbt.dsi.seq_version >= 3)
208 data++;
209
207 gpio = *data++; 210 gpio = *data++;
208 211
209 /* pull up/down */ 212 /* pull up/down */
210 action = *data++; 213 action = *data++ & 1;
214
215 if (gpio >= ARRAY_SIZE(gtable)) {
216 DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
217 goto out;
218 }
219
220 if (!IS_VALLEYVIEW(dev_priv)) {
221 DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
222 goto out;
223 }
224
225 if (dev_priv->vbt.dsi.seq_version >= 3) {
226 DRM_DEBUG_KMS("GPIO element v3 not supported\n");
227 goto out;
228 }
211 229
212 function = gtable[gpio].function_reg; 230 function = gtable[gpio].function_reg;
213 pad = gtable[gpio].pad_reg; 231 pad = gtable[gpio].pad_reg;
@@ -226,6 +244,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
226 vlv_gpio_nc_write(dev_priv, pad, val); 244 vlv_gpio_nc_write(dev_priv, pad, val);
227 mutex_unlock(&dev_priv->sb_lock); 245 mutex_unlock(&dev_priv->sb_lock);
228 246
247out:
229 return data; 248 return data;
230} 249}
231 250
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4a77639a489d..cb5d1b15755c 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -880,15 +880,18 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
880 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 880 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
881 enum intel_display_power_domain power_domain; 881 enum intel_display_power_domain power_domain;
882 u32 tmp; 882 u32 tmp;
883 bool ret;
883 884
884 power_domain = intel_display_port_power_domain(encoder); 885 power_domain = intel_display_port_power_domain(encoder);
885 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 886 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
886 return false; 887 return false;
887 888
889 ret = false;
890
888 tmp = I915_READ(intel_hdmi->hdmi_reg); 891 tmp = I915_READ(intel_hdmi->hdmi_reg);
889 892
890 if (!(tmp & SDVO_ENABLE)) 893 if (!(tmp & SDVO_ENABLE))
891 return false; 894 goto out;
892 895
893 if (HAS_PCH_CPT(dev)) 896 if (HAS_PCH_CPT(dev))
894 *pipe = PORT_TO_PIPE_CPT(tmp); 897 *pipe = PORT_TO_PIPE_CPT(tmp);
@@ -897,7 +900,12 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
897 else 900 else
898 *pipe = PORT_TO_PIPE(tmp); 901 *pipe = PORT_TO_PIPE(tmp);
899 902
900 return true; 903 ret = true;
904
905out:
906 intel_display_power_put(dev_priv, power_domain);
907
908 return ret;
901} 909}
902 910
903static void intel_hdmi_get_config(struct intel_encoder *encoder, 911static void intel_hdmi_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 25254b5c1ac5..deb8282c26d8 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -683,7 +683,7 @@ int intel_setup_gmbus(struct drm_device *dev)
683 return 0; 683 return 0;
684 684
685err: 685err:
686 while (--pin) { 686 while (pin--) {
687 if (!intel_gmbus_is_valid_pin(dev_priv, pin)) 687 if (!intel_gmbus_is_valid_pin(dev_priv, pin))
688 continue; 688 continue;
689 689
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3aa614731d7e..f1fa756c5d5d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1707,6 +1707,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1707 if (flush_domains) { 1707 if (flush_domains) {
1708 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 1708 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1709 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 1709 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
1710 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
1710 flags |= PIPE_CONTROL_FLUSH_ENABLE; 1711 flags |= PIPE_CONTROL_FLUSH_ENABLE;
1711 } 1712 }
1712 1713
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0da0240caf81..bc04d8d29acb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -75,22 +75,30 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
75 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 75 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
76 enum intel_display_power_domain power_domain; 76 enum intel_display_power_domain power_domain;
77 u32 tmp; 77 u32 tmp;
78 bool ret;
78 79
79 power_domain = intel_display_port_power_domain(encoder); 80 power_domain = intel_display_port_power_domain(encoder);
80 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 81 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
81 return false; 82 return false;
82 83
84 ret = false;
85
83 tmp = I915_READ(lvds_encoder->reg); 86 tmp = I915_READ(lvds_encoder->reg);
84 87
85 if (!(tmp & LVDS_PORT_EN)) 88 if (!(tmp & LVDS_PORT_EN))
86 return false; 89 goto out;
87 90
88 if (HAS_PCH_CPT(dev)) 91 if (HAS_PCH_CPT(dev))
89 *pipe = PORT_TO_PIPE_CPT(tmp); 92 *pipe = PORT_TO_PIPE_CPT(tmp);
90 else 93 else
91 *pipe = PORT_TO_PIPE(tmp); 94 *pipe = PORT_TO_PIPE(tmp);
92 95
93 return true; 96 ret = true;
97
98out:
99 intel_display_power_put(dev_priv, power_domain);
100
101 return ret;
94} 102}
95 103
96static void intel_lvds_get_config(struct intel_encoder *encoder, 104static void intel_lvds_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index eb5fa05cf476..b28c29f20e75 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1783,16 +1783,20 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
1783 const struct intel_plane_state *pstate, 1783 const struct intel_plane_state *pstate,
1784 uint32_t mem_value) 1784 uint32_t mem_value)
1785{ 1785{
1786 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; 1786 /*
1787 * We treat the cursor plane as always-on for the purposes of watermark
1788 * calculation. Until we have two-stage watermark programming merged,
1789 * this is necessary to avoid flickering.
1790 */
1791 int cpp = 4;
1792 int width = pstate->visible ? pstate->base.crtc_w : 64;
1787 1793
1788 if (!cstate->base.active || !pstate->visible) 1794 if (!cstate->base.active)
1789 return 0; 1795 return 0;
1790 1796
1791 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate), 1797 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1792 cstate->base.adjusted_mode.crtc_htotal, 1798 cstate->base.adjusted_mode.crtc_htotal,
1793 drm_rect_width(&pstate->dst), 1799 width, cpp, mem_value);
1794 bpp,
1795 mem_value);
1796} 1800}
1797 1801
1798/* Only for WM_LP. */ 1802/* Only for WM_LP. */
@@ -2825,7 +2829,10 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2825 memset(ddb, 0, sizeof(*ddb)); 2829 memset(ddb, 0, sizeof(*ddb));
2826 2830
2827 for_each_pipe(dev_priv, pipe) { 2831 for_each_pipe(dev_priv, pipe) {
2828 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) 2832 enum intel_display_power_domain power_domain;
2833
2834 power_domain = POWER_DOMAIN_PIPE(pipe);
2835 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2829 continue; 2836 continue;
2830 2837
2831 for_each_plane(dev_priv, pipe, plane) { 2838 for_each_plane(dev_priv, pipe, plane) {
@@ -2837,6 +2844,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2837 val = I915_READ(CUR_BUF_CFG(pipe)); 2844 val = I915_READ(CUR_BUF_CFG(pipe));
2838 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR], 2845 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
2839 val); 2846 val);
2847
2848 intel_display_power_put(dev_priv, power_domain);
2840 } 2849 }
2841} 2850}
2842 2851
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 339701d7a9a5..40c6aff57256 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -331,6 +331,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
331 if (flush_domains) { 331 if (flush_domains) {
332 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 332 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
333 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 333 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
334 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
334 flags |= PIPE_CONTROL_FLUSH_ENABLE; 335 flags |= PIPE_CONTROL_FLUSH_ENABLE;
335 } 336 }
336 if (invalidate_domains) { 337 if (invalidate_domains) {
@@ -403,6 +404,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
403 if (flush_domains) { 404 if (flush_domains) {
404 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 405 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
405 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 406 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
407 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
406 flags |= PIPE_CONTROL_FLUSH_ENABLE; 408 flags |= PIPE_CONTROL_FLUSH_ENABLE;
407 } 409 }
408 if (invalidate_domains) { 410 if (invalidate_domains) {
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index ddbdbffe829a..4f43d9b32e66 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -470,6 +470,43 @@ static void gen9_set_dc_state_debugmask_memory_up(
470 } 470 }
471} 471}
472 472
473static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
474 u32 state)
475{
476 int rewrites = 0;
477 int rereads = 0;
478 u32 v;
479
480 I915_WRITE(DC_STATE_EN, state);
481
482 /* It has been observed that disabling the dc6 state sometimes
483 * doesn't stick and dmc keeps returning old value. Make sure
484 * the write really sticks enough times and also force rewrite until
485 * we are confident that state is exactly what we want.
486 */
487 do {
488 v = I915_READ(DC_STATE_EN);
489
490 if (v != state) {
491 I915_WRITE(DC_STATE_EN, state);
492 rewrites++;
493 rereads = 0;
494 } else if (rereads++ > 5) {
495 break;
496 }
497
498 } while (rewrites < 100);
499
500 if (v != state)
501 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
502 state, v);
503
504 /* Most of the times we need one retry, avoid spam */
505 if (rewrites > 1)
506 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
507 state, rewrites);
508}
509
473static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) 510static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
474{ 511{
475 uint32_t val; 512 uint32_t val;
@@ -494,10 +531,18 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
494 val = I915_READ(DC_STATE_EN); 531 val = I915_READ(DC_STATE_EN);
495 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", 532 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
496 val & mask, state); 533 val & mask, state);
534
535 /* Check if DMC is ignoring our DC state requests */
536 if ((val & mask) != dev_priv->csr.dc_state)
537 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
538 dev_priv->csr.dc_state, val & mask);
539
497 val &= ~mask; 540 val &= ~mask;
498 val |= state; 541 val |= state;
499 I915_WRITE(DC_STATE_EN, val); 542
500 POSTING_READ(DC_STATE_EN); 543 gen9_write_dc_state(dev_priv, val);
544
545 dev_priv->csr.dc_state = val & mask;
501} 546}
502 547
503void bxt_enable_dc9(struct drm_i915_private *dev_priv) 548void bxt_enable_dc9(struct drm_i915_private *dev_priv)
@@ -1442,6 +1487,22 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1442 chv_set_pipe_power_well(dev_priv, power_well, false); 1487 chv_set_pipe_power_well(dev_priv, power_well, false);
1443} 1488}
1444 1489
1490static void
1491__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1492 enum intel_display_power_domain domain)
1493{
1494 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1495 struct i915_power_well *power_well;
1496 int i;
1497
1498 for_each_power_well(i, power_well, BIT(domain), power_domains) {
1499 if (!power_well->count++)
1500 intel_power_well_enable(dev_priv, power_well);
1501 }
1502
1503 power_domains->domain_use_count[domain]++;
1504}
1505
1445/** 1506/**
1446 * intel_display_power_get - grab a power domain reference 1507 * intel_display_power_get - grab a power domain reference
1447 * @dev_priv: i915 device instance 1508 * @dev_priv: i915 device instance
@@ -1457,24 +1518,53 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1457void intel_display_power_get(struct drm_i915_private *dev_priv, 1518void intel_display_power_get(struct drm_i915_private *dev_priv,
1458 enum intel_display_power_domain domain) 1519 enum intel_display_power_domain domain)
1459{ 1520{
1460 struct i915_power_domains *power_domains; 1521 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1461 struct i915_power_well *power_well;
1462 int i;
1463 1522
1464 intel_runtime_pm_get(dev_priv); 1523 intel_runtime_pm_get(dev_priv);
1465 1524
1466 power_domains = &dev_priv->power_domains; 1525 mutex_lock(&power_domains->lock);
1526
1527 __intel_display_power_get_domain(dev_priv, domain);
1528
1529 mutex_unlock(&power_domains->lock);
1530}
1531
1532/**
1533 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1534 * @dev_priv: i915 device instance
1535 * @domain: power domain to reference
1536 *
1537 * This function grabs a power domain reference for @domain and ensures that the
1538 * power domain and all its parents are powered up. Therefore users should only
1539 * grab a reference to the innermost power domain they need.
1540 *
1541 * Any power domain reference obtained by this function must have a symmetric
1542 * call to intel_display_power_put() to release the reference again.
1543 */
1544bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1545 enum intel_display_power_domain domain)
1546{
1547 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1548 bool is_enabled;
1549
1550 if (!intel_runtime_pm_get_if_in_use(dev_priv))
1551 return false;
1467 1552
1468 mutex_lock(&power_domains->lock); 1553 mutex_lock(&power_domains->lock);
1469 1554
1470 for_each_power_well(i, power_well, BIT(domain), power_domains) { 1555 if (__intel_display_power_is_enabled(dev_priv, domain)) {
1471 if (!power_well->count++) 1556 __intel_display_power_get_domain(dev_priv, domain);
1472 intel_power_well_enable(dev_priv, power_well); 1557 is_enabled = true;
1558 } else {
1559 is_enabled = false;
1473 } 1560 }
1474 1561
1475 power_domains->domain_use_count[domain]++;
1476
1477 mutex_unlock(&power_domains->lock); 1562 mutex_unlock(&power_domains->lock);
1563
1564 if (!is_enabled)
1565 intel_runtime_pm_put(dev_priv);
1566
1567 return is_enabled;
1478} 1568}
1479 1569
1480/** 1570/**
@@ -2213,15 +2303,15 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2213 */ 2303 */
2214void intel_power_domains_suspend(struct drm_i915_private *dev_priv) 2304void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
2215{ 2305{
2216 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
2217 skl_display_core_uninit(dev_priv);
2218
2219 /* 2306 /*
2220 * Even if power well support was disabled we still want to disable 2307 * Even if power well support was disabled we still want to disable
2221 * power wells while we are system suspended. 2308 * power wells while we are system suspended.
2222 */ 2309 */
2223 if (!i915.disable_power_well) 2310 if (!i915.disable_power_well)
2224 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 2311 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2312
2313 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
2314 skl_display_core_uninit(dev_priv);
2225} 2315}
2226 2316
2227/** 2317/**
@@ -2246,6 +2336,41 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2246} 2336}
2247 2337
2248/** 2338/**
2339 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
2340 * @dev_priv: i915 device instance
2341 *
2342 * This function grabs a device-level runtime pm reference if the device is
2343 * already in use and ensures that it is powered up.
2344 *
2345 * Any runtime pm reference obtained by this function must have a symmetric
2346 * call to intel_runtime_pm_put() to release the reference again.
2347 */
2348bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2349{
2350 struct drm_device *dev = dev_priv->dev;
2351 struct device *device = &dev->pdev->dev;
2352
2353 if (IS_ENABLED(CONFIG_PM)) {
2354 int ret = pm_runtime_get_if_in_use(device);
2355
2356 /*
2357 * In cases runtime PM is disabled by the RPM core and we get
2358 * an -EINVAL return value we are not supposed to call this
2359 * function, since the power state is undefined. This applies
2360 * atm to the late/early system suspend/resume handlers.
2361 */
2362 WARN_ON_ONCE(ret < 0);
2363 if (ret <= 0)
2364 return false;
2365 }
2366
2367 atomic_inc(&dev_priv->pm.wakeref_count);
2368 assert_rpm_wakelock_held(dev_priv);
2369
2370 return true;
2371}
2372
2373/**
2249 * intel_runtime_pm_get_noresume - grab a runtime pm reference 2374 * intel_runtime_pm_get_noresume - grab a runtime pm reference
2250 * @dev_priv: i915 device instance 2375 * @dev_priv: i915 device instance
2251 * 2376 *
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 30a57185bdb4..287226311413 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -64,6 +64,7 @@ static void ipu_fb_enable(struct ipu_crtc *ipu_crtc)
64 /* Start DC channel and DI after IDMAC */ 64 /* Start DC channel and DI after IDMAC */
65 ipu_dc_enable_channel(ipu_crtc->dc); 65 ipu_dc_enable_channel(ipu_crtc->dc);
66 ipu_di_enable(ipu_crtc->di); 66 ipu_di_enable(ipu_crtc->di);
67 drm_crtc_vblank_on(&ipu_crtc->base);
67 68
68 ipu_crtc->enabled = 1; 69 ipu_crtc->enabled = 1;
69} 70}
@@ -80,6 +81,7 @@ static void ipu_fb_disable(struct ipu_crtc *ipu_crtc)
80 ipu_di_disable(ipu_crtc->di); 81 ipu_di_disable(ipu_crtc->di);
81 ipu_plane_disable(ipu_crtc->plane[0]); 82 ipu_plane_disable(ipu_crtc->plane[0]);
82 ipu_dc_disable(ipu); 83 ipu_dc_disable(ipu);
84 drm_crtc_vblank_off(&ipu_crtc->base);
83 85
84 ipu_crtc->enabled = 0; 86 ipu_crtc->enabled = 0;
85} 87}
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 591ba2f1ae03..26bb1b626fe3 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -42,6 +42,7 @@ static const uint32_t ipu_plane_formats[] = {
42 DRM_FORMAT_YVYU, 42 DRM_FORMAT_YVYU,
43 DRM_FORMAT_YUV420, 43 DRM_FORMAT_YUV420,
44 DRM_FORMAT_YVU420, 44 DRM_FORMAT_YVU420,
45 DRM_FORMAT_RGB565,
45}; 46};
46 47
47int ipu_plane_irq(struct ipu_plane *ipu_plane) 48int ipu_plane_irq(struct ipu_plane *ipu_plane)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 78f520d05de9..e3acc35e3805 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1520,7 +1520,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1520 DMA_BIDIRECTIONAL); 1520 DMA_BIDIRECTIONAL);
1521 1521
1522 if (dma_mapping_error(pdev, addr)) { 1522 if (dma_mapping_error(pdev, addr)) {
1523 while (--i) { 1523 while (i--) {
1524 dma_unmap_page(pdev, ttm_dma->dma_address[i], 1524 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1525 PAGE_SIZE, DMA_BIDIRECTIONAL); 1525 PAGE_SIZE, DMA_BIDIRECTIONAL);
1526 ttm_dma->dma_address[i] = 0; 1526 ttm_dma->dma_address[i] = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 24be27d3cd18..20935eb2a09e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -635,10 +635,6 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
635 nv_crtc->lut.depth = 0; 635 nv_crtc->lut.depth = 0;
636 } 636 }
637 637
638 /* Make sure that drm and hw vblank irqs get resumed if needed. */
639 for (head = 0; head < dev->mode_config.num_crtc; head++)
640 drm_vblank_on(dev, head);
641
642 /* This should ensure we don't hit a locking problem when someone 638 /* This should ensure we don't hit a locking problem when someone
643 * wakes us up via a connector. We should never go into suspend 639 * wakes us up via a connector. We should never go into suspend
644 * while the display is on anyways. 640 * while the display is on anyways.
@@ -648,6 +644,10 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
648 644
649 drm_helper_resume_force_mode(dev); 645 drm_helper_resume_force_mode(dev);
650 646
647 /* Make sure that drm and hw vblank irqs get resumed if needed. */
648 for (head = 0; head < dev->mode_config.num_crtc; head++)
649 drm_vblank_on(dev, head);
650
651 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 651 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
652 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 652 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
653 653
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 8a70cec59bcd..2dfe58af12e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -24,7 +24,7 @@
24static int nouveau_platform_probe(struct platform_device *pdev) 24static int nouveau_platform_probe(struct platform_device *pdev)
25{ 25{
26 const struct nvkm_device_tegra_func *func; 26 const struct nvkm_device_tegra_func *func;
27 struct nvkm_device *device; 27 struct nvkm_device *device = NULL;
28 struct drm_device *drm; 28 struct drm_device *drm;
29 int ret; 29 int ret;
30 30
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 7f8a42721eb2..e7e581d6a8ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -252,32 +252,40 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
252 252
253 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL))) 253 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
254 return -ENOMEM; 254 return -ENOMEM;
255 *pdevice = &tdev->device; 255
256 tdev->func = func; 256 tdev->func = func;
257 tdev->pdev = pdev; 257 tdev->pdev = pdev;
258 tdev->irq = -1; 258 tdev->irq = -1;
259 259
260 tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); 260 tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
261 if (IS_ERR(tdev->vdd)) 261 if (IS_ERR(tdev->vdd)) {
262 return PTR_ERR(tdev->vdd); 262 ret = PTR_ERR(tdev->vdd);
263 goto free;
264 }
263 265
264 tdev->rst = devm_reset_control_get(&pdev->dev, "gpu"); 266 tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
265 if (IS_ERR(tdev->rst)) 267 if (IS_ERR(tdev->rst)) {
266 return PTR_ERR(tdev->rst); 268 ret = PTR_ERR(tdev->rst);
269 goto free;
270 }
267 271
268 tdev->clk = devm_clk_get(&pdev->dev, "gpu"); 272 tdev->clk = devm_clk_get(&pdev->dev, "gpu");
269 if (IS_ERR(tdev->clk)) 273 if (IS_ERR(tdev->clk)) {
270 return PTR_ERR(tdev->clk); 274 ret = PTR_ERR(tdev->clk);
275 goto free;
276 }
271 277
272 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); 278 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
273 if (IS_ERR(tdev->clk_pwr)) 279 if (IS_ERR(tdev->clk_pwr)) {
274 return PTR_ERR(tdev->clk_pwr); 280 ret = PTR_ERR(tdev->clk_pwr);
281 goto free;
282 }
275 283
276 nvkm_device_tegra_probe_iommu(tdev); 284 nvkm_device_tegra_probe_iommu(tdev);
277 285
278 ret = nvkm_device_tegra_power_up(tdev); 286 ret = nvkm_device_tegra_power_up(tdev);
279 if (ret) 287 if (ret)
280 return ret; 288 goto remove;
281 289
282 tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value; 290 tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
283 ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev, 291 ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
@@ -285,9 +293,19 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
285 cfg, dbg, detect, mmio, subdev_mask, 293 cfg, dbg, detect, mmio, subdev_mask,
286 &tdev->device); 294 &tdev->device);
287 if (ret) 295 if (ret)
288 return ret; 296 goto powerdown;
297
298 *pdevice = &tdev->device;
289 299
290 return 0; 300 return 0;
301
302powerdown:
303 nvkm_device_tegra_power_down(tdev);
304remove:
305 nvkm_device_tegra_remove_iommu(tdev);
306free:
307 kfree(tdev);
308 return ret;
291} 309}
292#else 310#else
293int 311int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
index 74e2f7c6c07e..9688970eca47 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
@@ -328,6 +328,7 @@ nvkm_dp_train(struct work_struct *w)
328 .outp = outp, 328 .outp = outp,
329 }, *dp = &_dp; 329 }, *dp = &_dp;
330 u32 datarate = 0; 330 u32 datarate = 0;
331 u8 pwr;
331 int ret; 332 int ret;
332 333
333 if (!outp->base.info.location && disp->func->sor.magic) 334 if (!outp->base.info.location && disp->func->sor.magic)
@@ -355,6 +356,15 @@ nvkm_dp_train(struct work_struct *w)
355 /* disable link interrupt handling during link training */ 356 /* disable link interrupt handling during link training */
356 nvkm_notify_put(&outp->irq); 357 nvkm_notify_put(&outp->irq);
357 358
359 /* ensure sink is not in a low-power state */
360 if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
361 if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
362 pwr &= ~DPCD_SC00_SET_POWER;
363 pwr |= DPCD_SC00_SET_POWER_D0;
364 nvkm_wraux(outp->aux, DPCD_SC00, &pwr, 1);
365 }
366 }
367
358 /* enable down-spreading and execute pre-train script from vbios */ 368 /* enable down-spreading and execute pre-train script from vbios */
359 dp_link_train_init(dp, outp->dpcd[3] & 0x01); 369 dp_link_train_init(dp, outp->dpcd[3] & 0x01);
360 370
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
index 9596290329c7..6e10c5e0ef11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
@@ -71,5 +71,11 @@
71#define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c 71#define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c
72#define DPCD_LS0C_LANE0_POST_CURSOR2 0x03 72#define DPCD_LS0C_LANE0_POST_CURSOR2 0x03
73 73
74/* DPCD Sink Control */
75#define DPCD_SC00 0x00600
76#define DPCD_SC00_SET_POWER 0x03
77#define DPCD_SC00_SET_POWER_D0 0x01
78#define DPCD_SC00_SET_POWER_D3 0x03
79
74void nvkm_dp_train(struct work_struct *); 80void nvkm_dp_train(struct work_struct *);
75#endif 81#endif
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 2ae8577497ca..7c2e78201ead 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
168 cmd->command_size)) 168 cmd->command_size))
169 return -EFAULT; 169 return -EFAULT;
170 170
171 reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL); 171 reloc_info = kmalloc_array(cmd->relocs_num,
172 sizeof(struct qxl_reloc_info), GFP_KERNEL);
172 if (!reloc_info) 173 if (!reloc_info)
173 return -ENOMEM; 174 return -ENOMEM;
174 175
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index 3d031b50a8fd..9f029dda1f07 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -68,5 +68,5 @@ int qxl_gem_prime_mmap(struct drm_gem_object *obj,
68 struct vm_area_struct *area) 68 struct vm_area_struct *area)
69{ 69{
70 WARN_ONCE(1, "not implemented"); 70 WARN_ONCE(1, "not implemented");
71 return ENOSYS; 71 return -ENOSYS;
72} 72}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 44ee72e04df9..6af832545bc5 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -315,15 +315,27 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
315 unsigned max_lane_num = drm_dp_max_lane_count(dpcd); 315 unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
316 unsigned lane_num, i, max_pix_clock; 316 unsigned lane_num, i, max_pix_clock;
317 317
318 for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { 318 if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
319 for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { 319 ENCODER_OBJECT_ID_NUTMEG) {
320 max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; 320 for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
321 max_pix_clock = (lane_num * 270000 * 8) / bpp;
321 if (max_pix_clock >= pix_clock) { 322 if (max_pix_clock >= pix_clock) {
322 *dp_lanes = lane_num; 323 *dp_lanes = lane_num;
323 *dp_rate = link_rates[i]; 324 *dp_rate = 270000;
324 return 0; 325 return 0;
325 } 326 }
326 } 327 }
328 } else {
329 for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
330 for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
331 max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
332 if (max_pix_clock >= pix_clock) {
333 *dp_lanes = lane_num;
334 *dp_rate = link_rates[i];
335 return 0;
336 }
337 }
338 }
327 } 339 }
328 340
329 return -EINVAL; 341 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 6bfc46369db1..367a916f364e 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -304,18 +304,10 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
304 unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) & 304 unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) &
305 DENTIST_DPREFCLK_WDIVIDER_MASK) >> 305 DENTIST_DPREFCLK_WDIVIDER_MASK) >>
306 DENTIST_DPREFCLK_WDIVIDER_SHIFT; 306 DENTIST_DPREFCLK_WDIVIDER_SHIFT;
307 307 div = radeon_audio_decode_dfs_div(div);
308 if (div < 128 && div >= 96)
309 div -= 64;
310 else if (div >= 64)
311 div = div / 2 - 16;
312 else if (div >= 8)
313 div /= 4;
314 else
315 div = 0;
316 308
317 if (div) 309 if (div)
318 clock = rdev->clock.gpupll_outputfreq * 10 / div; 310 clock = clock * 100 / div;
319 311
320 WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000); 312 WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
321 WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock); 313 WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 9953356fe263..3cf04a2f44bb 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -289,6 +289,16 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
289 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 289 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
290 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 290 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
291 */ 291 */
292 if (ASIC_IS_DCE41(rdev)) {
293 unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) &
294 DENTIST_DPREFCLK_WDIVIDER_MASK) >>
295 DENTIST_DPREFCLK_WDIVIDER_SHIFT;
296 div = radeon_audio_decode_dfs_div(div);
297
298 if (div)
299 clock = 100 * clock / div;
300 }
301
292 WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); 302 WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
293 WREG32(DCCG_AUDIO_DTO1_MODULE, clock); 303 WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
294} 304}
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 4aa5f755572b..13b6029d65cc 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -511,6 +511,11 @@
511#define DCCG_AUDIO_DTO1_CNTL 0x05cc 511#define DCCG_AUDIO_DTO1_CNTL 0x05cc
512# define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3) 512# define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3)
513 513
514#define DCE41_DENTIST_DISPCLK_CNTL 0x049c
515# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
516# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
517# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
518
514/* DCE 4.0 AFMT */ 519/* DCE 4.0 AFMT */
515#define HDMI_CONTROL 0x7030 520#define HDMI_CONTROL 0x7030
516# define HDMI_KEEPOUT_MODE (1 << 0) 521# define HDMI_KEEPOUT_MODE (1 << 0)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5ae6db98aa4d..78a51b3eda10 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -268,7 +268,7 @@ struct radeon_clock {
268 uint32_t current_dispclk; 268 uint32_t current_dispclk;
269 uint32_t dp_extclk; 269 uint32_t dp_extclk;
270 uint32_t max_pixel_clock; 270 uint32_t max_pixel_clock;
271 uint32_t gpupll_outputfreq; 271 uint32_t vco_freq;
272}; 272};
273 273
274/* 274/*
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 08fc1b5effa8..de9a2ffcf5f7 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1106,6 +1106,31 @@ union firmware_info {
1106 ATOM_FIRMWARE_INFO_V2_2 info_22; 1106 ATOM_FIRMWARE_INFO_V2_2 info_22;
1107}; 1107};
1108 1108
1109union igp_info {
1110 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
1111 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
1112 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
1113 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
1114 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
1115};
1116
1117static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev)
1118{
1119 struct radeon_mode_info *mode_info = &rdev->mode_info;
1120 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
1121 union igp_info *igp_info;
1122 u8 frev, crev;
1123 u16 data_offset;
1124
1125 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1126 &frev, &crev, &data_offset)) {
1127 igp_info = (union igp_info *)(mode_info->atom_context->bios +
1128 data_offset);
1129 rdev->clock.vco_freq =
1130 le32_to_cpu(igp_info->info_6.ulDentistVCOFreq);
1131 }
1132}
1133
1109bool radeon_atom_get_clock_info(struct drm_device *dev) 1134bool radeon_atom_get_clock_info(struct drm_device *dev)
1110{ 1135{
1111 struct radeon_device *rdev = dev->dev_private; 1136 struct radeon_device *rdev = dev->dev_private;
@@ -1257,12 +1282,18 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1257 rdev->mode_info.firmware_flags = 1282 rdev->mode_info.firmware_flags =
1258 le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess); 1283 le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
1259 1284
1260 if (ASIC_IS_DCE8(rdev)) { 1285 if (ASIC_IS_DCE8(rdev))
1261 rdev->clock.gpupll_outputfreq = 1286 rdev->clock.vco_freq =
1262 le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq); 1287 le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq);
1263 if (rdev->clock.gpupll_outputfreq == 0) 1288 else if (ASIC_IS_DCE5(rdev))
1264 rdev->clock.gpupll_outputfreq = 360000; /* 3.6 GHz */ 1289 rdev->clock.vco_freq = rdev->clock.current_dispclk;
1265 } 1290 else if (ASIC_IS_DCE41(rdev))
1291 radeon_atombios_get_dentist_vco_freq(rdev);
1292 else
1293 rdev->clock.vco_freq = rdev->clock.current_dispclk;
1294
1295 if (rdev->clock.vco_freq == 0)
1296 rdev->clock.vco_freq = 360000; /* 3.6 GHz */
1266 1297
1267 return true; 1298 return true;
1268 } 1299 }
@@ -1270,14 +1301,6 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1270 return false; 1301 return false;
1271} 1302}
1272 1303
1273union igp_info {
1274 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
1275 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
1276 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
1277 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
1278 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
1279};
1280
1281bool radeon_atombios_sideport_present(struct radeon_device *rdev) 1304bool radeon_atombios_sideport_present(struct radeon_device *rdev)
1282{ 1305{
1283 struct radeon_mode_info *mode_info = &rdev->mode_info; 1306 struct radeon_mode_info *mode_info = &rdev->mode_info;
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index 2c02e99b5f95..b214663b370d 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -739,9 +739,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
739 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 739 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
740 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 740 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
741 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 741 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
742 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
743 struct radeon_connector_atom_dig *dig_connector =
744 radeon_connector->con_priv;
745 742
746 if (!dig || !dig->afmt) 743 if (!dig || !dig->afmt)
747 return; 744 return;
@@ -753,10 +750,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
753 radeon_audio_write_speaker_allocation(encoder); 750 radeon_audio_write_speaker_allocation(encoder);
754 radeon_audio_write_sad_regs(encoder); 751 radeon_audio_write_sad_regs(encoder);
755 radeon_audio_write_latency_fields(encoder, mode); 752 radeon_audio_write_latency_fields(encoder, mode);
756 if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev)) 753 radeon_audio_set_dto(encoder, rdev->clock.vco_freq * 10);
757 radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
758 else
759 radeon_audio_set_dto(encoder, dig_connector->dp_clock);
760 radeon_audio_set_audio_packet(encoder); 754 radeon_audio_set_audio_packet(encoder);
761 radeon_audio_select_pin(encoder); 755 radeon_audio_select_pin(encoder);
762 756
@@ -781,3 +775,15 @@ void radeon_audio_dpms(struct drm_encoder *encoder, int mode)
781 if (radeon_encoder->audio && radeon_encoder->audio->dpms) 775 if (radeon_encoder->audio && radeon_encoder->audio->dpms)
782 radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON); 776 radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON);
783} 777}
778
779unsigned int radeon_audio_decode_dfs_div(unsigned int div)
780{
781 if (div >= 8 && div < 64)
782 return (div - 8) * 25 + 200;
783 else if (div >= 64 && div < 96)
784 return (div - 64) * 50 + 1600;
785 else if (div >= 96 && div < 128)
786 return (div - 96) * 100 + 3200;
787 else
788 return 0;
789}
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
index 059cc3012062..5c70cceaa4a6 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.h
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -79,5 +79,6 @@ void radeon_audio_fini(struct radeon_device *rdev);
79void radeon_audio_mode_set(struct drm_encoder *encoder, 79void radeon_audio_mode_set(struct drm_encoder *encoder,
80 struct drm_display_mode *mode); 80 struct drm_display_mode *mode);
81void radeon_audio_dpms(struct drm_encoder *encoder, int mode); 81void radeon_audio_dpms(struct drm_encoder *encoder, int mode);
82unsigned int radeon_audio_decode_dfs_div(unsigned int div);
82 83
83#endif 84#endif
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 902b59cebac5..4197ca1bb1e4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1744,7 +1744,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1744 } 1744 }
1745 1745
1746 drm_kms_helper_poll_enable(dev); 1746 drm_kms_helper_poll_enable(dev);
1747 drm_helper_hpd_irq_event(dev);
1748 1747
1749 /* set the power state here in case we are a PX system or headless */ 1748 /* set the power state here in case we are a PX system or headless */
1750 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) 1749 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b3bb92368ae0..2d9196a447fd 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -403,7 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
403 struct drm_crtc *crtc = &radeon_crtc->base; 403 struct drm_crtc *crtc = &radeon_crtc->base;
404 unsigned long flags; 404 unsigned long flags;
405 int r; 405 int r;
406 int vpos, hpos, stat, min_udelay; 406 int vpos, hpos, stat, min_udelay = 0;
407 unsigned repcnt = 4;
407 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; 408 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
408 409
409 down_read(&rdev->exclusive_lock); 410 down_read(&rdev->exclusive_lock);
@@ -454,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
454 * In practice this won't execute very often unless on very fast 455 * In practice this won't execute very often unless on very fast
455 * machines because the time window for this to happen is very small. 456 * machines because the time window for this to happen is very small.
456 */ 457 */
457 for (;;) { 458 while (radeon_crtc->enabled && --repcnt) {
458 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank 459 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
459 * start in hpos, and to the "fudged earlier" vblank start in 460 * start in hpos, and to the "fudged earlier" vblank start in
460 * vpos. 461 * vpos.
@@ -470,12 +471,24 @@ static void radeon_flip_work_func(struct work_struct *__work)
470 break; 471 break;
471 472
472 /* Sleep at least until estimated real start of hw vblank */ 473 /* Sleep at least until estimated real start of hw vblank */
473 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
474 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); 474 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
475 if (min_udelay > vblank->framedur_ns / 2000) {
476 /* Don't wait ridiculously long - something is wrong */
477 repcnt = 0;
478 break;
479 }
480 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
475 usleep_range(min_udelay, 2 * min_udelay); 481 usleep_range(min_udelay, 2 * min_udelay);
476 spin_lock_irqsave(&crtc->dev->event_lock, flags); 482 spin_lock_irqsave(&crtc->dev->event_lock, flags);
477 }; 483 };
478 484
485 if (!repcnt)
486 DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
487 "framedur %d, linedur %d, stat %d, vpos %d, "
488 "hpos %d\n", work->crtc_id, min_udelay,
489 vblank->framedur_ns / 1000,
490 vblank->linedur_ns / 1000, stat, vpos, hpos);
491
479 /* do the flip (mmio) */ 492 /* do the flip (mmio) */
480 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); 493 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
481 494
@@ -1670,8 +1683,10 @@ int radeon_modeset_init(struct radeon_device *rdev)
1670 /* setup afmt */ 1683 /* setup afmt */
1671 radeon_afmt_init(rdev); 1684 radeon_afmt_init(rdev);
1672 1685
1673 radeon_fbdev_init(rdev); 1686 if (!list_empty(&rdev->ddev->mode_config.connector_list)) {
1674 drm_kms_helper_poll_init(rdev->ddev); 1687 radeon_fbdev_init(rdev);
1688 drm_kms_helper_poll_init(rdev->ddev);
1689 }
1675 1690
1676 /* do pm late init */ 1691 /* do pm late init */
1677 ret = radeon_pm_late_init(rdev); 1692 ret = radeon_pm_late_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 3dcc5733ff69..e26c963f2e93 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -663,6 +663,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
663 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); 663 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
664 if (!bo_va) { 664 if (!bo_va) {
665 args->operation = RADEON_VA_RESULT_ERROR; 665 args->operation = RADEON_VA_RESULT_ERROR;
666 radeon_bo_unreserve(rbo);
666 drm_gem_object_unreference_unlocked(gobj); 667 drm_gem_object_unreference_unlocked(gobj);
667 return -ENOENT; 668 return -ENOENT;
668 } 669 }
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 84d45633d28c..fb6ad143873f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -33,6 +33,7 @@
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <drm/drmP.h> 34#include <drm/drmP.h>
35#include <drm/radeon_drm.h> 35#include <drm/radeon_drm.h>
36#include <drm/drm_cache.h>
36#include "radeon.h" 37#include "radeon.h"
37#include "radeon_trace.h" 38#include "radeon_trace.h"
38 39
@@ -245,6 +246,12 @@ int radeon_bo_create(struct radeon_device *rdev,
245 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " 246 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
246 "better performance thanks to write-combining\n"); 247 "better performance thanks to write-combining\n");
247 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 248 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
249#else
250 /* For architectures that don't support WC memory,
251 * mask out the WC flag from the BO
252 */
253 if (!drm_arch_can_wc_memory())
254 bo->flags &= ~RADEON_GEM_GTT_WC;
248#endif 255#endif
249 256
250 radeon_ttm_placement_from_domain(bo, domain); 257 radeon_ttm_placement_from_domain(bo, domain);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 460c8f2989da..7a98823bacd1 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -276,8 +276,12 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
276 if (rdev->irq.installed) { 276 if (rdev->irq.installed) {
277 for (i = 0; i < rdev->num_crtc; i++) { 277 for (i = 0; i < rdev->num_crtc; i++) {
278 if (rdev->pm.active_crtcs & (1 << i)) { 278 if (rdev->pm.active_crtcs & (1 << i)) {
279 rdev->pm.req_vblank |= (1 << i); 279 /* This can fail if a modeset is in progress */
280 drm_vblank_get(rdev->ddev, i); 280 if (drm_vblank_get(rdev->ddev, i) == 0)
281 rdev->pm.req_vblank |= (1 << i);
282 else
283 DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
284 i);
281 } 285 }
282 } 286 }
283 } 287 }
@@ -1078,10 +1082,6 @@ force:
1078 /* update displays */ 1082 /* update displays */
1079 radeon_dpm_display_configuration_changed(rdev); 1083 radeon_dpm_display_configuration_changed(rdev);
1080 1084
1081 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1082 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1083 rdev->pm.dpm.single_display = single_display;
1084
1085 /* wait for the rings to drain */ 1085 /* wait for the rings to drain */
1086 for (i = 0; i < RADEON_NUM_RINGS; i++) { 1086 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1087 struct radeon_ring *ring = &rdev->ring[i]; 1087 struct radeon_ring *ring = &rdev->ring[i];
@@ -1097,6 +1097,10 @@ force:
1097 1097
1098 radeon_dpm_post_set_power_state(rdev); 1098 radeon_dpm_post_set_power_state(rdev);
1099 1099
1100 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1101 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1102 rdev->pm.dpm.single_display = single_display;
1103
1100 if (rdev->asic->dpm.force_performance_level) { 1104 if (rdev->asic->dpm.force_performance_level) {
1101 if (rdev->pm.dpm.thermal_active) { 1105 if (rdev->pm.dpm.thermal_active) {
1102 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 1106 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index c507896aca45..197b157b73d0 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
349 /* see if we can skip over some allocations */ 349 /* see if we can skip over some allocations */
350 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); 350 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
351 351
352 for (i = 0; i < RADEON_NUM_RINGS; ++i)
353 radeon_fence_ref(fences[i]);
354
352 spin_unlock(&sa_manager->wq.lock); 355 spin_unlock(&sa_manager->wq.lock);
353 r = radeon_fence_wait_any(rdev, fences, false); 356 r = radeon_fence_wait_any(rdev, fences, false);
357 for (i = 0; i < RADEON_NUM_RINGS; ++i)
358 radeon_fence_unref(&fences[i]);
354 spin_lock(&sa_manager->wq.lock); 359 spin_lock(&sa_manager->wq.lock);
355 /* if we have nothing to wait for block */ 360 /* if we have nothing to wait for block */
356 if (r == -ENOENT) { 361 if (r == -ENOENT) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e34307459e50..e06ac546a90f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -758,7 +758,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
758 0, PAGE_SIZE, 758 0, PAGE_SIZE,
759 PCI_DMA_BIDIRECTIONAL); 759 PCI_DMA_BIDIRECTIONAL);
760 if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { 760 if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
761 while (--i) { 761 while (i--) {
762 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], 762 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
763 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 763 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
764 gtt->ttm.dma_address[i] = 0; 764 gtt->ttm.dma_address[i] = 0;
diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c
index 07a0d378e122..a01efe39a820 100644
--- a/drivers/gpu/drm/radeon/vce_v1_0.c
+++ b/drivers/gpu/drm/radeon/vce_v1_0.c
@@ -178,12 +178,12 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
178 return -EINVAL; 178 return -EINVAL;
179 } 179 }
180 180
181 for (i = 0; i < sign->num; ++i) { 181 for (i = 0; i < le32_to_cpu(sign->num); ++i) {
182 if (sign->val[i].chip_id == chip_id) 182 if (le32_to_cpu(sign->val[i].chip_id) == chip_id)
183 break; 183 break;
184 } 184 }
185 185
186 if (i == sign->num) 186 if (i == le32_to_cpu(sign->num))
187 return -EINVAL; 187 return -EINVAL;
188 188
189 data += (256 - 64) / 4; 189 data += (256 - 64) / 4;
@@ -191,18 +191,18 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
191 data[1] = sign->val[i].nonce[1]; 191 data[1] = sign->val[i].nonce[1];
192 data[2] = sign->val[i].nonce[2]; 192 data[2] = sign->val[i].nonce[2];
193 data[3] = sign->val[i].nonce[3]; 193 data[3] = sign->val[i].nonce[3];
194 data[4] = sign->len + 64; 194 data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64);
195 195
196 memset(&data[5], 0, 44); 196 memset(&data[5], 0, 44);
197 memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign)); 197 memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign));
198 198
199 data += data[4] / 4; 199 data += le32_to_cpu(data[4]) / 4;
200 data[0] = sign->val[i].sigval[0]; 200 data[0] = sign->val[i].sigval[0];
201 data[1] = sign->val[i].sigval[1]; 201 data[1] = sign->val[i].sigval[1];
202 data[2] = sign->val[i].sigval[2]; 202 data[2] = sign->val[i].sigval[2];
203 data[3] = sign->val[i].sigval[3]; 203 data[3] = sign->val[i].sigval[3];
204 204
205 rdev->vce.keyselect = sign->val[i].keyselect; 205 rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect);
206 206
207 return 0; 207 return 0;
208} 208}
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index d1dc0f7b01db..f6a809afceec 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -2,11 +2,11 @@
2# Makefile for the drm device driver. This driver provides support for the 2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4 4
5rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o rockchip_drm_fbdev.o \ 5rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \
6 rockchip_drm_gem.o 6 rockchip_drm_gem.o rockchip_drm_vop.o
7rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
7 8
8obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o 9obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
9obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o 10obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o
10 11
11obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_drm_vop.o \ 12obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_vop_reg.o
12 rockchip_vop_reg.o
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index 7bfe243c6173..f8f8f29fb7c3 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -461,10 +461,11 @@ static int dw_mipi_dsi_phy_init(struct dw_mipi_dsi *dsi)
461 461
462static int dw_mipi_dsi_get_lane_bps(struct dw_mipi_dsi *dsi) 462static int dw_mipi_dsi_get_lane_bps(struct dw_mipi_dsi *dsi)
463{ 463{
464 unsigned int bpp, i, pre; 464 unsigned int i, pre;
465 unsigned long mpclk, pllref, tmp; 465 unsigned long mpclk, pllref, tmp;
466 unsigned int m = 1, n = 1, target_mbps = 1000; 466 unsigned int m = 1, n = 1, target_mbps = 1000;
467 unsigned int max_mbps = dptdin_map[ARRAY_SIZE(dptdin_map) - 1].max_mbps; 467 unsigned int max_mbps = dptdin_map[ARRAY_SIZE(dptdin_map) - 1].max_mbps;
468 int bpp;
468 469
469 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); 470 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
470 if (bpp < 0) { 471 if (bpp < 0) {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 8397d1b62ef9..a0d51ccb6ea4 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -55,14 +55,12 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
55 55
56 return arm_iommu_attach_device(dev, mapping); 56 return arm_iommu_attach_device(dev, mapping);
57} 57}
58EXPORT_SYMBOL_GPL(rockchip_drm_dma_attach_device);
59 58
60void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, 59void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
61 struct device *dev) 60 struct device *dev)
62{ 61{
63 arm_iommu_detach_device(dev); 62 arm_iommu_detach_device(dev);
64} 63}
65EXPORT_SYMBOL_GPL(rockchip_drm_dma_detach_device);
66 64
67int rockchip_register_crtc_funcs(struct drm_crtc *crtc, 65int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
68 const struct rockchip_crtc_funcs *crtc_funcs) 66 const struct rockchip_crtc_funcs *crtc_funcs)
@@ -77,7 +75,6 @@ int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
77 75
78 return 0; 76 return 0;
79} 77}
80EXPORT_SYMBOL_GPL(rockchip_register_crtc_funcs);
81 78
82void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc) 79void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc)
83{ 80{
@@ -89,7 +86,6 @@ void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc)
89 86
90 priv->crtc_funcs[pipe] = NULL; 87 priv->crtc_funcs[pipe] = NULL;
91} 88}
92EXPORT_SYMBOL_GPL(rockchip_unregister_crtc_funcs);
93 89
94static struct drm_crtc *rockchip_crtc_from_pipe(struct drm_device *drm, 90static struct drm_crtc *rockchip_crtc_from_pipe(struct drm_device *drm,
95 int pipe) 91 int pipe)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index f7844883cb76..3b8f652698f8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -39,7 +39,6 @@ struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
39 39
40 return rk_fb->obj[plane]; 40 return rk_fb->obj[plane];
41} 41}
42EXPORT_SYMBOL_GPL(rockchip_fb_get_gem_obj);
43 42
44static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb) 43static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
45{ 44{
@@ -177,8 +176,23 @@ static void rockchip_crtc_wait_for_update(struct drm_crtc *crtc)
177 crtc_funcs->wait_for_update(crtc); 176 crtc_funcs->wait_for_update(crtc);
178} 177}
179 178
179/*
180 * We can't use drm_atomic_helper_wait_for_vblanks() because rk3288 and rk3066
181 * have hardware counters for neither vblanks nor scanlines, which results in
182 * a race where:
183 * | <-- HW vsync irq and reg take effect
184 * plane_commit --> |
185 * get_vblank and wait --> |
186 * | <-- handle_vblank, vblank->count + 1
187 * cleanup_fb --> |
188 * iommu crash --> |
189 * | <-- HW vsync irq and reg take effect
190 *
191 * This function is equivalent but uses rockchip_crtc_wait_for_update() instead
192 * of waiting for vblank_count to change.
193 */
180static void 194static void
181rockchip_atomic_wait_for_complete(struct drm_atomic_state *old_state) 195rockchip_atomic_wait_for_complete(struct drm_device *dev, struct drm_atomic_state *old_state)
182{ 196{
183 struct drm_crtc_state *old_crtc_state; 197 struct drm_crtc_state *old_crtc_state;
184 struct drm_crtc *crtc; 198 struct drm_crtc *crtc;
@@ -194,6 +208,10 @@ rockchip_atomic_wait_for_complete(struct drm_atomic_state *old_state)
194 if (!crtc->state->active) 208 if (!crtc->state->active)
195 continue; 209 continue;
196 210
211 if (!drm_atomic_helper_framebuffer_changed(dev,
212 old_state, crtc))
213 continue;
214
197 ret = drm_crtc_vblank_get(crtc); 215 ret = drm_crtc_vblank_get(crtc);
198 if (ret != 0) 216 if (ret != 0)
199 continue; 217 continue;
@@ -241,7 +259,7 @@ rockchip_atomic_commit_complete(struct rockchip_atomic_commit *commit)
241 259
242 drm_atomic_helper_commit_planes(dev, state, true); 260 drm_atomic_helper_commit_planes(dev, state, true);
243 261
244 rockchip_atomic_wait_for_complete(state); 262 rockchip_atomic_wait_for_complete(dev, state);
245 263
246 drm_atomic_helper_cleanup_planes(dev, state); 264 drm_atomic_helper_cleanup_planes(dev, state);
247 265
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
index 50432e9b5b37..73718c5f5bbf 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
@@ -15,7 +15,18 @@
15#ifndef _ROCKCHIP_DRM_FBDEV_H 15#ifndef _ROCKCHIP_DRM_FBDEV_H
16#define _ROCKCHIP_DRM_FBDEV_H 16#define _ROCKCHIP_DRM_FBDEV_H
17 17
18#ifdef CONFIG_DRM_FBDEV_EMULATION
18int rockchip_drm_fbdev_init(struct drm_device *dev); 19int rockchip_drm_fbdev_init(struct drm_device *dev);
19void rockchip_drm_fbdev_fini(struct drm_device *dev); 20void rockchip_drm_fbdev_fini(struct drm_device *dev);
21#else
22static inline int rockchip_drm_fbdev_init(struct drm_device *dev)
23{
24 return 0;
25}
26
27static inline void rockchip_drm_fbdev_fini(struct drm_device *dev)
28{
29}
30#endif
20 31
21#endif /* _ROCKCHIP_DRM_FBDEV_H */ 32#endif /* _ROCKCHIP_DRM_FBDEV_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index d908321b94ce..18e07338c6e5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -234,13 +234,8 @@ int rockchip_gem_dumb_create(struct drm_file *file_priv,
234 /* 234 /*
235 * align to 64 bytes since Mali requires it. 235 * align to 64 bytes since Mali requires it.
236 */ 236 */
237 min_pitch = ALIGN(min_pitch, 64); 237 args->pitch = ALIGN(min_pitch, 64);
238 238 args->size = args->pitch * args->height;
239 if (args->pitch < min_pitch)
240 args->pitch = min_pitch;
241
242 if (args->size < args->pitch * args->height)
243 args->size = args->pitch * args->height;
244 239
245 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size, 240 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
246 &args->handle); 241 &args->handle);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 46c2a8dfd8aa..fd370548d7d7 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -43,8 +43,8 @@
43 43
44#define REG_SET(x, base, reg, v, mode) \ 44#define REG_SET(x, base, reg, v, mode) \
45 __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v) 45 __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
46#define REG_SET_MASK(x, base, reg, v, mode) \ 46#define REG_SET_MASK(x, base, reg, mask, v, mode) \
47 __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v) 47 __REG_SET_##mode(x, base + reg.offset, mask, reg.shift, v)
48 48
49#define VOP_WIN_SET(x, win, name, v) \ 49#define VOP_WIN_SET(x, win, name, v) \
50 REG_SET(x, win->base, win->phy->name, v, RELAXED) 50 REG_SET(x, win->base, win->phy->name, v, RELAXED)
@@ -58,16 +58,18 @@
58#define VOP_INTR_GET(vop, name) \ 58#define VOP_INTR_GET(vop, name) \
59 vop_read_reg(vop, 0, &vop->data->ctrl->name) 59 vop_read_reg(vop, 0, &vop->data->ctrl->name)
60 60
61#define VOP_INTR_SET(vop, name, v) \ 61#define VOP_INTR_SET(vop, name, mask, v) \
62 REG_SET(vop, 0, vop->data->intr->name, v, NORMAL) 62 REG_SET_MASK(vop, 0, vop->data->intr->name, mask, v, NORMAL)
63#define VOP_INTR_SET_TYPE(vop, name, type, v) \ 63#define VOP_INTR_SET_TYPE(vop, name, type, v) \
64 do { \ 64 do { \
65 int i, reg = 0; \ 65 int i, reg = 0, mask = 0; \
66 for (i = 0; i < vop->data->intr->nintrs; i++) { \ 66 for (i = 0; i < vop->data->intr->nintrs; i++) { \
67 if (vop->data->intr->intrs[i] & type) \ 67 if (vop->data->intr->intrs[i] & type) { \
68 reg |= (v) << i; \ 68 reg |= (v) << i; \
69 mask |= 1 << i; \
70 } \
69 } \ 71 } \
70 VOP_INTR_SET(vop, name, reg); \ 72 VOP_INTR_SET(vop, name, mask, reg); \
71 } while (0) 73 } while (0)
72#define VOP_INTR_GET_TYPE(vop, name, type) \ 74#define VOP_INTR_GET_TYPE(vop, name, type) \
73 vop_get_intr_type(vop, &vop->data->intr->name, type) 75 vop_get_intr_type(vop, &vop->data->intr->name, type)
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 18dfe3ec9a62..22278bcfc60e 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -215,7 +215,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
215 struct drm_gem_cma_object *cma_obj; 215 struct drm_gem_cma_object *cma_obj;
216 216
217 if (size == 0) 217 if (size == 0)
218 return NULL; 218 return ERR_PTR(-EINVAL);
219 219
220 /* First, try to get a vc4_bo from the kernel BO cache. */ 220 /* First, try to get a vc4_bo from the kernel BO cache. */
221 if (from_cache) { 221 if (from_cache) {
@@ -237,7 +237,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
237 if (IS_ERR(cma_obj)) { 237 if (IS_ERR(cma_obj)) {
238 DRM_ERROR("Failed to allocate from CMA:\n"); 238 DRM_ERROR("Failed to allocate from CMA:\n");
239 vc4_bo_stats_dump(vc4); 239 vc4_bo_stats_dump(vc4);
240 return NULL; 240 return ERR_PTR(-ENOMEM);
241 } 241 }
242 } 242 }
243 243
@@ -259,8 +259,8 @@ int vc4_dumb_create(struct drm_file *file_priv,
259 args->size = args->pitch * args->height; 259 args->size = args->pitch * args->height;
260 260
261 bo = vc4_bo_create(dev, args->size, false); 261 bo = vc4_bo_create(dev, args->size, false);
262 if (!bo) 262 if (IS_ERR(bo))
263 return -ENOMEM; 263 return PTR_ERR(bo);
264 264
265 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); 265 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
266 drm_gem_object_unreference_unlocked(&bo->base.base); 266 drm_gem_object_unreference_unlocked(&bo->base.base);
@@ -443,8 +443,8 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
443 * get zeroed, and that might leak data between users. 443 * get zeroed, and that might leak data between users.
444 */ 444 */
445 bo = vc4_bo_create(dev, args->size, false); 445 bo = vc4_bo_create(dev, args->size, false);
446 if (!bo) 446 if (IS_ERR(bo))
447 return -ENOMEM; 447 return PTR_ERR(bo);
448 448
449 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); 449 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
450 drm_gem_object_unreference_unlocked(&bo->base.base); 450 drm_gem_object_unreference_unlocked(&bo->base.base);
@@ -496,8 +496,8 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
496 } 496 }
497 497
498 bo = vc4_bo_create(dev, args->size, true); 498 bo = vc4_bo_create(dev, args->size, true);
499 if (!bo) 499 if (IS_ERR(bo))
500 return -ENOMEM; 500 return PTR_ERR(bo);
501 501
502 ret = copy_from_user(bo->base.vaddr, 502 ret = copy_from_user(bo->base.vaddr,
503 (void __user *)(uintptr_t)args->data, 503 (void __user *)(uintptr_t)args->data,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 080865ec2bae..51a63330d4f8 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -91,8 +91,12 @@ struct vc4_dev {
91 struct vc4_bo *overflow_mem; 91 struct vc4_bo *overflow_mem;
92 struct work_struct overflow_mem_work; 92 struct work_struct overflow_mem_work;
93 93
94 int power_refcount;
95
96 /* Mutex controlling the power refcount. */
97 struct mutex power_lock;
98
94 struct { 99 struct {
95 uint32_t last_ct0ca, last_ct1ca;
96 struct timer_list timer; 100 struct timer_list timer;
97 struct work_struct reset_work; 101 struct work_struct reset_work;
98 } hangcheck; 102 } hangcheck;
@@ -142,6 +146,7 @@ struct vc4_seqno_cb {
142}; 146};
143 147
144struct vc4_v3d { 148struct vc4_v3d {
149 struct vc4_dev *vc4;
145 struct platform_device *pdev; 150 struct platform_device *pdev;
146 void __iomem *regs; 151 void __iomem *regs;
147}; 152};
@@ -192,6 +197,11 @@ struct vc4_exec_info {
192 /* Sequence number for this bin/render job. */ 197 /* Sequence number for this bin/render job. */
193 uint64_t seqno; 198 uint64_t seqno;
194 199
200 /* Last current addresses the hardware was processing when the
201 * hangcheck timer checked on us.
202 */
203 uint32_t last_ct0ca, last_ct1ca;
204
195 /* Kernel-space copy of the ioctl arguments */ 205 /* Kernel-space copy of the ioctl arguments */
196 struct drm_vc4_submit_cl *args; 206 struct drm_vc4_submit_cl *args;
197 207
@@ -434,7 +444,6 @@ void vc4_plane_async_set_fb(struct drm_plane *plane,
434extern struct platform_driver vc4_v3d_driver; 444extern struct platform_driver vc4_v3d_driver;
435int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused); 445int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
436int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused); 446int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
437int vc4_v3d_set_power(struct vc4_dev *vc4, bool on);
438 447
439/* vc4_validate.c */ 448/* vc4_validate.c */
440int 449int
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 48ce30a6f4b5..202aa1544acc 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
26#include <linux/device.h> 27#include <linux/device.h>
27#include <linux/io.h> 28#include <linux/io.h>
28 29
@@ -228,8 +229,16 @@ vc4_reset(struct drm_device *dev)
228 struct vc4_dev *vc4 = to_vc4_dev(dev); 229 struct vc4_dev *vc4 = to_vc4_dev(dev);
229 230
230 DRM_INFO("Resetting GPU.\n"); 231 DRM_INFO("Resetting GPU.\n");
231 vc4_v3d_set_power(vc4, false); 232
232 vc4_v3d_set_power(vc4, true); 233 mutex_lock(&vc4->power_lock);
234 if (vc4->power_refcount) {
235 /* Power the device off and back on the by dropping the
236 * reference on runtime PM.
237 */
238 pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
239 pm_runtime_get_sync(&vc4->v3d->pdev->dev);
240 }
241 mutex_unlock(&vc4->power_lock);
233 242
234 vc4_irq_reset(dev); 243 vc4_irq_reset(dev);
235 244
@@ -257,10 +266,17 @@ vc4_hangcheck_elapsed(unsigned long data)
257 struct drm_device *dev = (struct drm_device *)data; 266 struct drm_device *dev = (struct drm_device *)data;
258 struct vc4_dev *vc4 = to_vc4_dev(dev); 267 struct vc4_dev *vc4 = to_vc4_dev(dev);
259 uint32_t ct0ca, ct1ca; 268 uint32_t ct0ca, ct1ca;
269 unsigned long irqflags;
270 struct vc4_exec_info *exec;
271
272 spin_lock_irqsave(&vc4->job_lock, irqflags);
273 exec = vc4_first_job(vc4);
260 274
261 /* If idle, we can stop watching for hangs. */ 275 /* If idle, we can stop watching for hangs. */
262 if (list_empty(&vc4->job_list)) 276 if (!exec) {
277 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
263 return; 278 return;
279 }
264 280
265 ct0ca = V3D_READ(V3D_CTNCA(0)); 281 ct0ca = V3D_READ(V3D_CTNCA(0));
266 ct1ca = V3D_READ(V3D_CTNCA(1)); 282 ct1ca = V3D_READ(V3D_CTNCA(1));
@@ -268,14 +284,16 @@ vc4_hangcheck_elapsed(unsigned long data)
268 /* If we've made any progress in execution, rearm the timer 284 /* If we've made any progress in execution, rearm the timer
269 * and wait. 285 * and wait.
270 */ 286 */
271 if (ct0ca != vc4->hangcheck.last_ct0ca || 287 if (ct0ca != exec->last_ct0ca || ct1ca != exec->last_ct1ca) {
272 ct1ca != vc4->hangcheck.last_ct1ca) { 288 exec->last_ct0ca = ct0ca;
273 vc4->hangcheck.last_ct0ca = ct0ca; 289 exec->last_ct1ca = ct1ca;
274 vc4->hangcheck.last_ct1ca = ct1ca; 290 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
275 vc4_queue_hangcheck(dev); 291 vc4_queue_hangcheck(dev);
276 return; 292 return;
277 } 293 }
278 294
295 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
296
279 /* We've gone too long with no progress, reset. This has to 297 /* We've gone too long with no progress, reset. This has to
280 * be done from a work struct, since resetting can sleep and 298 * be done from a work struct, since resetting can sleep and
281 * this timer hook isn't allowed to. 299 * this timer hook isn't allowed to.
@@ -340,12 +358,7 @@ vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
340 finish_wait(&vc4->job_wait_queue, &wait); 358 finish_wait(&vc4->job_wait_queue, &wait);
341 trace_vc4_wait_for_seqno_end(dev, seqno); 359 trace_vc4_wait_for_seqno_end(dev, seqno);
342 360
343 if (ret && ret != -ERESTARTSYS) { 361 return ret;
344 DRM_ERROR("timeout waiting for render thread idle\n");
345 return ret;
346 }
347
348 return 0;
349} 362}
350 363
351static void 364static void
@@ -578,9 +591,9 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
578 } 591 }
579 592
580 bo = vc4_bo_create(dev, exec_size, true); 593 bo = vc4_bo_create(dev, exec_size, true);
581 if (!bo) { 594 if (IS_ERR(bo)) {
582 DRM_ERROR("Couldn't allocate BO for binning\n"); 595 DRM_ERROR("Couldn't allocate BO for binning\n");
583 ret = -ENOMEM; 596 ret = PTR_ERR(bo);
584 goto fail; 597 goto fail;
585 } 598 }
586 exec->exec_bo = &bo->base; 599 exec->exec_bo = &bo->base;
@@ -617,6 +630,7 @@ fail:
617static void 630static void
618vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) 631vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
619{ 632{
633 struct vc4_dev *vc4 = to_vc4_dev(dev);
620 unsigned i; 634 unsigned i;
621 635
622 /* Need the struct lock for drm_gem_object_unreference(). */ 636 /* Need the struct lock for drm_gem_object_unreference(). */
@@ -635,6 +649,11 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
635 } 649 }
636 mutex_unlock(&dev->struct_mutex); 650 mutex_unlock(&dev->struct_mutex);
637 651
652 mutex_lock(&vc4->power_lock);
653 if (--vc4->power_refcount == 0)
654 pm_runtime_put(&vc4->v3d->pdev->dev);
655 mutex_unlock(&vc4->power_lock);
656
638 kfree(exec); 657 kfree(exec);
639} 658}
640 659
@@ -746,6 +765,9 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
746 struct drm_gem_object *gem_obj; 765 struct drm_gem_object *gem_obj;
747 struct vc4_bo *bo; 766 struct vc4_bo *bo;
748 767
768 if (args->pad != 0)
769 return -EINVAL;
770
749 gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle); 771 gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
750 if (!gem_obj) { 772 if (!gem_obj) {
751 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); 773 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
@@ -772,7 +794,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
772 struct vc4_dev *vc4 = to_vc4_dev(dev); 794 struct vc4_dev *vc4 = to_vc4_dev(dev);
773 struct drm_vc4_submit_cl *args = data; 795 struct drm_vc4_submit_cl *args = data;
774 struct vc4_exec_info *exec; 796 struct vc4_exec_info *exec;
775 int ret; 797 int ret = 0;
776 798
777 if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) { 799 if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
778 DRM_ERROR("Unknown flags: 0x%02x\n", args->flags); 800 DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
@@ -785,6 +807,15 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
785 return -ENOMEM; 807 return -ENOMEM;
786 } 808 }
787 809
810 mutex_lock(&vc4->power_lock);
811 if (vc4->power_refcount++ == 0)
812 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
813 mutex_unlock(&vc4->power_lock);
814 if (ret < 0) {
815 kfree(exec);
816 return ret;
817 }
818
788 exec->args = args; 819 exec->args = args;
789 INIT_LIST_HEAD(&exec->unref_list); 820 INIT_LIST_HEAD(&exec->unref_list);
790 821
@@ -839,6 +870,8 @@ vc4_gem_init(struct drm_device *dev)
839 (unsigned long)dev); 870 (unsigned long)dev);
840 871
841 INIT_WORK(&vc4->job_done_work, vc4_job_done_work); 872 INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
873
874 mutex_init(&vc4->power_lock);
842} 875}
843 876
844void 877void
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index b68060e758db..78a21357fb2d 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -57,7 +57,7 @@ vc4_overflow_mem_work(struct work_struct *work)
57 struct vc4_bo *bo; 57 struct vc4_bo *bo;
58 58
59 bo = vc4_bo_create(dev, 256 * 1024, true); 59 bo = vc4_bo_create(dev, 256 * 1024, true);
60 if (!bo) { 60 if (IS_ERR(bo)) {
61 DRM_ERROR("Couldn't allocate binner overflow mem\n"); 61 DRM_ERROR("Couldn't allocate binner overflow mem\n");
62 return; 62 return;
63 } 63 }
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 8a2a312e2c1b..0f12418725e5 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -316,20 +316,11 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
316 size += xtiles * ytiles * loop_body_size; 316 size += xtiles * ytiles * loop_body_size;
317 317
318 setup->rcl = &vc4_bo_create(dev, size, true)->base; 318 setup->rcl = &vc4_bo_create(dev, size, true)->base;
319 if (!setup->rcl) 319 if (IS_ERR(setup->rcl))
320 return -ENOMEM; 320 return PTR_ERR(setup->rcl);
321 list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head, 321 list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head,
322 &exec->unref_list); 322 &exec->unref_list);
323 323
324 rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
325 rcl_u32(setup,
326 (setup->color_write ? (setup->color_write->paddr +
327 args->color_write.offset) :
328 0));
329 rcl_u16(setup, args->width);
330 rcl_u16(setup, args->height);
331 rcl_u16(setup, args->color_write.bits);
332
333 /* The tile buffer gets cleared when the previous tile is stored. If 324 /* The tile buffer gets cleared when the previous tile is stored. If
334 * the clear values changed between frames, then the tile buffer has 325 * the clear values changed between frames, then the tile buffer has
335 * stale clear values in it, so we have to do a store in None mode (no 326 * stale clear values in it, so we have to do a store in None mode (no
@@ -349,6 +340,15 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
349 rcl_u32(setup, 0); /* no address, since we're in None mode */ 340 rcl_u32(setup, 0); /* no address, since we're in None mode */
350 } 341 }
351 342
343 rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
344 rcl_u32(setup,
345 (setup->color_write ? (setup->color_write->paddr +
346 args->color_write.offset) :
347 0));
348 rcl_u16(setup, args->width);
349 rcl_u16(setup, args->height);
350 rcl_u16(setup, args->color_write.bits);
351
352 for (y = min_y_tile; y <= max_y_tile; y++) { 352 for (y = min_y_tile; y <= max_y_tile; y++) {
353 for (x = min_x_tile; x <= max_x_tile; x++) { 353 for (x = min_x_tile; x <= max_x_tile; x++) {
354 bool first = (x == min_x_tile && y == min_y_tile); 354 bool first = (x == min_x_tile && y == min_y_tile);
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 424d515ffcda..31de5d17bc85 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include "linux/component.h" 19#include "linux/component.h"
20#include "linux/pm_runtime.h"
20#include "vc4_drv.h" 21#include "vc4_drv.h"
21#include "vc4_regs.h" 22#include "vc4_regs.h"
22 23
@@ -144,21 +145,6 @@ int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
144} 145}
145#endif /* CONFIG_DEBUG_FS */ 146#endif /* CONFIG_DEBUG_FS */
146 147
147/*
148 * Asks the firmware to turn on power to the V3D engine.
149 *
150 * This may be doable with just the clocks interface, though this
151 * packet does some other register setup from the firmware, too.
152 */
153int
154vc4_v3d_set_power(struct vc4_dev *vc4, bool on)
155{
156 if (on)
157 return pm_generic_poweroff(&vc4->v3d->pdev->dev);
158 else
159 return pm_generic_resume(&vc4->v3d->pdev->dev);
160}
161
162static void vc4_v3d_init_hw(struct drm_device *dev) 148static void vc4_v3d_init_hw(struct drm_device *dev)
163{ 149{
164 struct vc4_dev *vc4 = to_vc4_dev(dev); 150 struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -170,6 +156,29 @@ static void vc4_v3d_init_hw(struct drm_device *dev)
170 V3D_WRITE(V3D_VPMBASE, 0); 156 V3D_WRITE(V3D_VPMBASE, 0);
171} 157}
172 158
159#ifdef CONFIG_PM
160static int vc4_v3d_runtime_suspend(struct device *dev)
161{
162 struct vc4_v3d *v3d = dev_get_drvdata(dev);
163 struct vc4_dev *vc4 = v3d->vc4;
164
165 vc4_irq_uninstall(vc4->dev);
166
167 return 0;
168}
169
170static int vc4_v3d_runtime_resume(struct device *dev)
171{
172 struct vc4_v3d *v3d = dev_get_drvdata(dev);
173 struct vc4_dev *vc4 = v3d->vc4;
174
175 vc4_v3d_init_hw(vc4->dev);
176 vc4_irq_postinstall(vc4->dev);
177
178 return 0;
179}
180#endif
181
173static int vc4_v3d_bind(struct device *dev, struct device *master, void *data) 182static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
174{ 183{
175 struct platform_device *pdev = to_platform_device(dev); 184 struct platform_device *pdev = to_platform_device(dev);
@@ -182,6 +191,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
182 if (!v3d) 191 if (!v3d)
183 return -ENOMEM; 192 return -ENOMEM;
184 193
194 dev_set_drvdata(dev, v3d);
195
185 v3d->pdev = pdev; 196 v3d->pdev = pdev;
186 197
187 v3d->regs = vc4_ioremap_regs(pdev, 0); 198 v3d->regs = vc4_ioremap_regs(pdev, 0);
@@ -189,6 +200,7 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
189 return PTR_ERR(v3d->regs); 200 return PTR_ERR(v3d->regs);
190 201
191 vc4->v3d = v3d; 202 vc4->v3d = v3d;
203 v3d->vc4 = vc4;
192 204
193 if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) { 205 if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
194 DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n", 206 DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
@@ -210,6 +222,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
210 return ret; 222 return ret;
211 } 223 }
212 224
225 pm_runtime_enable(dev);
226
213 return 0; 227 return 0;
214} 228}
215 229
@@ -219,6 +233,8 @@ static void vc4_v3d_unbind(struct device *dev, struct device *master,
219 struct drm_device *drm = dev_get_drvdata(master); 233 struct drm_device *drm = dev_get_drvdata(master);
220 struct vc4_dev *vc4 = to_vc4_dev(drm); 234 struct vc4_dev *vc4 = to_vc4_dev(drm);
221 235
236 pm_runtime_disable(dev);
237
222 drm_irq_uninstall(drm); 238 drm_irq_uninstall(drm);
223 239
224 /* Disable the binner's overflow memory address, so the next 240 /* Disable the binner's overflow memory address, so the next
@@ -231,6 +247,10 @@ static void vc4_v3d_unbind(struct device *dev, struct device *master,
231 vc4->v3d = NULL; 247 vc4->v3d = NULL;
232} 248}
233 249
250static const struct dev_pm_ops vc4_v3d_pm_ops = {
251 SET_RUNTIME_PM_OPS(vc4_v3d_runtime_suspend, vc4_v3d_runtime_resume, NULL)
252};
253
234static const struct component_ops vc4_v3d_ops = { 254static const struct component_ops vc4_v3d_ops = {
235 .bind = vc4_v3d_bind, 255 .bind = vc4_v3d_bind,
236 .unbind = vc4_v3d_unbind, 256 .unbind = vc4_v3d_unbind,
@@ -258,5 +278,6 @@ struct platform_driver vc4_v3d_driver = {
258 .driver = { 278 .driver = {
259 .name = "vc4_v3d", 279 .name = "vc4_v3d",
260 .of_match_table = vc4_v3d_dt_match, 280 .of_match_table = vc4_v3d_dt_match,
281 .pm = &vc4_v3d_pm_ops,
261 }, 282 },
262}; 283};
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index e26d9f6face3..24c2c746e8f3 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -401,8 +401,8 @@ validate_tile_binning_config(VALIDATE_ARGS)
401 tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size, 401 tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size,
402 true); 402 true);
403 exec->tile_bo = &tile_bo->base; 403 exec->tile_bo = &tile_bo->base;
404 if (!exec->tile_bo) 404 if (IS_ERR(exec->tile_bo))
405 return -ENOMEM; 405 return PTR_ERR(exec->tile_bo);
406 list_add_tail(&tile_bo->unref_head, &exec->unref_list); 406 list_add_tail(&tile_bo->unref_head, &exec->unref_list);
407 407
408 /* tile alloc address. */ 408 /* tile alloc address. */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index c49812b80dd0..24fb348a44e1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -25,6 +25,7 @@
25 * 25 *
26 **************************************************************************/ 26 **************************************************************************/
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/console.h>
28 29
29#include <drm/drmP.h> 30#include <drm/drmP.h>
30#include "vmwgfx_drv.h" 31#include "vmwgfx_drv.h"
@@ -1538,6 +1539,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1538static int __init vmwgfx_init(void) 1539static int __init vmwgfx_init(void)
1539{ 1540{
1540 int ret; 1541 int ret;
1542
1543#ifdef CONFIG_VGA_CONSOLE
1544 if (vgacon_text_force())
1545 return -EINVAL;
1546#endif
1547
1541 ret = drm_pci_init(&driver, &vmw_pci_driver); 1548 ret = drm_pci_init(&driver, &vmw_pci_driver);
1542 if (ret) 1549 if (ret)
1543 DRM_ERROR("Failed initializing DRM.\n"); 1550 DRM_ERROR("Failed initializing DRM.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index db082bea8daf..c5a1a08b0449 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -563,6 +563,8 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
563 563
564static const struct drm_connector_funcs vmw_sou_connector_funcs = { 564static const struct drm_connector_funcs vmw_sou_connector_funcs = {
565 .dpms = vmw_du_connector_dpms, 565 .dpms = vmw_du_connector_dpms,
566 .detect = vmw_du_connector_detect,
567 .fill_modes = vmw_du_connector_fill_modes,
566 .set_property = vmw_du_connector_set_property, 568 .set_property = vmw_du_connector_set_property,
567 .destroy = vmw_sou_connector_destroy, 569 .destroy = vmw_sou_connector_destroy,
568}; 570};
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index da462afcb225..dd2dbb9746ce 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -18,6 +18,7 @@
18#include <linux/host1x.h> 18#include <linux/host1x.h>
19#include <linux/of.h> 19#include <linux/of.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/of_device.h>
21 22
22#include "bus.h" 23#include "bus.h"
23#include "dev.h" 24#include "dev.h"
@@ -394,6 +395,7 @@ static int host1x_device_add(struct host1x *host1x,
394 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; 395 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
395 device->dev.dma_mask = &device->dev.coherent_dma_mask; 396 device->dev.dma_mask = &device->dev.coherent_dma_mask;
396 dev_set_name(&device->dev, "%s", driver->driver.name); 397 dev_set_name(&device->dev, "%s", driver->driver.name);
398 of_dma_configure(&device->dev, host1x->dev->of_node);
397 device->dev.release = host1x_device_release; 399 device->dev.release = host1x_device_release;
398 device->dev.bus = &host1x_bus_type; 400 device->dev.bus = &host1x_bus_type;
399 device->dev.parent = host1x->dev; 401 device->dev.parent = host1x->dev;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 314bf3718cc7..ff348690df94 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -23,6 +23,7 @@
23#include <linux/of_device.h> 23#include <linux/of_device.h>
24#include <linux/clk.h> 24#include <linux/clk.h>
25#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/dma-mapping.h>
26 27
27#define CREATE_TRACE_POINTS 28#define CREATE_TRACE_POINTS
28#include <trace/events/host1x.h> 29#include <trace/events/host1x.h>
@@ -68,6 +69,7 @@ static const struct host1x_info host1x01_info = {
68 .nb_bases = 8, 69 .nb_bases = 8,
69 .init = host1x01_init, 70 .init = host1x01_init,
70 .sync_offset = 0x3000, 71 .sync_offset = 0x3000,
72 .dma_mask = DMA_BIT_MASK(32),
71}; 73};
72 74
73static const struct host1x_info host1x02_info = { 75static const struct host1x_info host1x02_info = {
@@ -77,6 +79,7 @@ static const struct host1x_info host1x02_info = {
77 .nb_bases = 12, 79 .nb_bases = 12,
78 .init = host1x02_init, 80 .init = host1x02_init,
79 .sync_offset = 0x3000, 81 .sync_offset = 0x3000,
82 .dma_mask = DMA_BIT_MASK(32),
80}; 83};
81 84
82static const struct host1x_info host1x04_info = { 85static const struct host1x_info host1x04_info = {
@@ -86,6 +89,7 @@ static const struct host1x_info host1x04_info = {
86 .nb_bases = 64, 89 .nb_bases = 64,
87 .init = host1x04_init, 90 .init = host1x04_init,
88 .sync_offset = 0x2100, 91 .sync_offset = 0x2100,
92 .dma_mask = DMA_BIT_MASK(34),
89}; 93};
90 94
91static const struct host1x_info host1x05_info = { 95static const struct host1x_info host1x05_info = {
@@ -95,6 +99,7 @@ static const struct host1x_info host1x05_info = {
95 .nb_bases = 64, 99 .nb_bases = 64,
96 .init = host1x05_init, 100 .init = host1x05_init,
97 .sync_offset = 0x2100, 101 .sync_offset = 0x2100,
102 .dma_mask = DMA_BIT_MASK(34),
98}; 103};
99 104
100static struct of_device_id host1x_of_match[] = { 105static struct of_device_id host1x_of_match[] = {
@@ -148,6 +153,8 @@ static int host1x_probe(struct platform_device *pdev)
148 if (IS_ERR(host->regs)) 153 if (IS_ERR(host->regs))
149 return PTR_ERR(host->regs); 154 return PTR_ERR(host->regs);
150 155
156 dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
157
151 if (host->info->init) { 158 if (host->info->init) {
152 err = host->info->init(host); 159 err = host->info->init(host);
153 if (err) 160 if (err)
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index 0b6e8e9629c5..dace124994bb 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -96,6 +96,7 @@ struct host1x_info {
96 int nb_mlocks; /* host1x: number of mlocks */ 96 int nb_mlocks; /* host1x: number of mlocks */
97 int (*init)(struct host1x *); /* initialize per SoC ops */ 97 int (*init)(struct host1x *); /* initialize per SoC ops */
98 int sync_offset; 98 int sync_offset;
99 u64 dma_mask; /* mask of addressable memory */
99}; 100};
100 101
101struct host1x { 102struct host1x {
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index f2e13eb8339f..e00db3f510dd 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1050,6 +1050,17 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
1050 for (i = 0; i < ARRAY_SIZE(client_reg); i++) { 1050 for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
1051 const struct ipu_platform_reg *reg = &client_reg[i]; 1051 const struct ipu_platform_reg *reg = &client_reg[i];
1052 struct platform_device *pdev; 1052 struct platform_device *pdev;
1053 struct device_node *of_node;
1054
1055 /* Associate subdevice with the corresponding port node */
1056 of_node = of_graph_get_port_by_id(dev->of_node, i);
1057 if (!of_node) {
1058 dev_info(dev,
1059 "no port@%d node in %s, not using %s%d\n",
1060 i, dev->of_node->full_name,
1061 (i / 2) ? "DI" : "CSI", i % 2);
1062 continue;
1063 }
1053 1064
1054 pdev = platform_device_alloc(reg->name, id++); 1065 pdev = platform_device_alloc(reg->name, id++);
1055 if (!pdev) { 1066 if (!pdev) {
@@ -1057,17 +1068,9 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
1057 goto err_register; 1068 goto err_register;
1058 } 1069 }
1059 1070
1071 pdev->dev.of_node = of_node;
1060 pdev->dev.parent = dev; 1072 pdev->dev.parent = dev;
1061 1073
1062 /* Associate subdevice with the corresponding port node */
1063 pdev->dev.of_node = of_graph_get_port_by_id(dev->of_node, i);
1064 if (!pdev->dev.of_node) {
1065 dev_err(dev, "missing port@%d node in %s\n", i,
1066 dev->of_node->full_name);
1067 ret = -ENODEV;
1068 goto err_register;
1069 }
1070
1071 ret = platform_device_add_data(pdev, &reg->pdata, 1074 ret = platform_device_add_data(pdev, &reg->pdata,
1072 sizeof(reg->pdata)); 1075 sizeof(reg->pdata));
1073 if (!ret) 1076 if (!ret)
@@ -1289,10 +1292,6 @@ static int ipu_probe(struct platform_device *pdev)
1289 ipu->irq_sync = irq_sync; 1292 ipu->irq_sync = irq_sync;
1290 ipu->irq_err = irq_err; 1293 ipu->irq_err = irq_err;
1291 1294
1292 ret = ipu_irq_init(ipu);
1293 if (ret)
1294 goto out_failed_irq;
1295
1296 ret = device_reset(&pdev->dev); 1295 ret = device_reset(&pdev->dev);
1297 if (ret) { 1296 if (ret) {
1298 dev_err(&pdev->dev, "failed to reset: %d\n", ret); 1297 dev_err(&pdev->dev, "failed to reset: %d\n", ret);
@@ -1302,6 +1301,10 @@ static int ipu_probe(struct platform_device *pdev)
1302 if (ret) 1301 if (ret)
1303 goto out_failed_reset; 1302 goto out_failed_reset;
1304 1303
1304 ret = ipu_irq_init(ipu);
1305 if (ret)
1306 goto out_failed_irq;
1307
1305 /* Set MCU_T to divide MCU access window into 2 */ 1308 /* Set MCU_T to divide MCU access window into 2 */
1306 ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18), 1309 ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
1307 IPU_DISP_GEN); 1310 IPU_DISP_GEN);
@@ -1324,9 +1327,9 @@ static int ipu_probe(struct platform_device *pdev)
1324failed_add_clients: 1327failed_add_clients:
1325 ipu_submodules_exit(ipu); 1328 ipu_submodules_exit(ipu);
1326failed_submodules_init: 1329failed_submodules_init:
1327out_failed_reset:
1328 ipu_irq_exit(ipu); 1330 ipu_irq_exit(ipu);
1329out_failed_irq: 1331out_failed_irq:
1332out_failed_reset:
1330 clk_disable_unprepare(ipu->clk); 1333 clk_disable_unprepare(ipu->clk);
1331 return ret; 1334 return ret;
1332} 1335}
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
index f155b8380481..2b3105c8aed3 100644
--- a/drivers/hwmon/ads1015.c
+++ b/drivers/hwmon/ads1015.c
@@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
126 struct ads1015_data *data = i2c_get_clientdata(client); 126 struct ads1015_data *data = i2c_get_clientdata(client);
127 unsigned int pga = data->channel_data[channel].pga; 127 unsigned int pga = data->channel_data[channel].pga;
128 int fullscale = fullscale_table[pga]; 128 int fullscale = fullscale_table[pga];
129 const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0; 129 const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
130 130
131 return DIV_ROUND_CLOSEST(reg * fullscale, mask); 131 return DIV_ROUND_CLOSEST(reg * fullscale, mask);
132} 132}
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index c8487894b312..c43318d3416e 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -932,6 +932,17 @@ MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
932static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = { 932static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
933 { 933 {
934 /* 934 /*
935 * CPU fan speed going up and down on Dell Studio XPS 8000
936 * for unknown reasons.
937 */
938 .ident = "Dell Studio XPS 8000",
939 .matches = {
940 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
941 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8000"),
942 },
943 },
944 {
945 /*
935 * CPU fan speed going up and down on Dell Studio XPS 8100 946 * CPU fan speed going up and down on Dell Studio XPS 8100
936 * for unknown reasons. 947 * for unknown reasons.
937 */ 948 */
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index f77eb971ce95..4f695d8fcafa 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -90,7 +90,15 @@ static ssize_t show_power(struct device *dev,
90 pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5), 90 pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5),
91 REG_TDP_LIMIT3, &val); 91 REG_TDP_LIMIT3, &val);
92 92
93 tdp_limit = val >> 16; 93 /*
94 * On Carrizo and later platforms, ApmTdpLimit bit field
95 * is extended to 16:31 from 16:28.
96 */
97 if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model >= 0x60)
98 tdp_limit = val >> 16;
99 else
100 tdp_limit = (val >> 16) & 0x1fff;
101
94 curr_pwr_watts = ((u64)(tdp_limit + 102 curr_pwr_watts = ((u64)(tdp_limit +
95 data->base_tdp)) << running_avg_range; 103 data->base_tdp)) << running_avg_range;
96 curr_pwr_watts -= running_avg_capture; 104 curr_pwr_watts -= running_avg_capture;
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 82de3deeb18a..685568b1236d 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -406,16 +406,11 @@ static int gpio_fan_get_cur_state(struct thermal_cooling_device *cdev,
406 unsigned long *state) 406 unsigned long *state)
407{ 407{
408 struct gpio_fan_data *fan_data = cdev->devdata; 408 struct gpio_fan_data *fan_data = cdev->devdata;
409 int r;
410 409
411 if (!fan_data) 410 if (!fan_data)
412 return -EINVAL; 411 return -EINVAL;
413 412
414 r = get_fan_speed_index(fan_data); 413 *state = fan_data->speed_index;
415 if (r < 0)
416 return r;
417
418 *state = r;
419 return 0; 414 return 0;
420} 415}
421 416
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index 52f708bcf77f..d50c701b19d6 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -313,6 +313,10 @@ int of_hwspin_lock_get_id(struct device_node *np, int index)
313 hwlock = radix_tree_deref_slot(slot); 313 hwlock = radix_tree_deref_slot(slot);
314 if (unlikely(!hwlock)) 314 if (unlikely(!hwlock))
315 continue; 315 continue;
316 if (radix_tree_is_indirect_ptr(hwlock)) {
317 slot = radix_tree_iter_retry(&iter);
318 continue;
319 }
316 320
317 if (hwlock->bank->dev->of_node == args.np) { 321 if (hwlock->bank->dev->of_node == args.np) {
318 ret = 0; 322 ret = 0;
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 3711df1d4526..4a45408dd820 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -586,8 +586,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
586 if (!dev) 586 if (!dev)
587 return -ENOMEM; 587 return -ENOMEM;
588 588
589 dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(struct bsc_regs *), 589 dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(*dev->bsc_regmap), GFP_KERNEL);
590 GFP_KERNEL);
591 if (!dev->bsc_regmap) 590 if (!dev->bsc_regmap)
592 return -ENOMEM; 591 return -ENOMEM;
593 592
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index ba9732c236c5..10fbd6d841e0 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -874,7 +874,8 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
874 i2c_set_adapdata(adap, dev); 874 i2c_set_adapdata(adap, dev);
875 875
876 i2c_dw_disable_int(dev); 876 i2c_dw_disable_int(dev);
877 r = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, IRQF_SHARED, 877 r = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr,
878 IRQF_SHARED | IRQF_COND_SUSPEND,
878 dev_name(dev->dev), dev); 879 dev_name(dev->dev), dev);
879 if (r) { 880 if (r) {
880 dev_err(dev->dev, "failure requesting irq %i: %d\n", 881 dev_err(dev->dev, "failure requesting irq %i: %d\n",
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index f62d69799a9c..27fa0cb09538 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1271,6 +1271,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1271 switch (dev->device) { 1271 switch (dev->device) {
1272 case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS: 1272 case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS:
1273 case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS: 1273 case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS:
1274 case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
1275 case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
1274 case PCI_DEVICE_ID_INTEL_DNV_SMBUS: 1276 case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
1275 priv->features |= FEATURE_I2C_BLOCK_READ; 1277 priv->features |= FEATURE_I2C_BLOCK_READ;
1276 priv->features |= FEATURE_IRQ; 1278 priv->features |= FEATURE_IRQ;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 08d26ba61ed3..13c45296ce5b 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1450,7 +1450,8 @@ omap_i2c_probe(struct platform_device *pdev)
1450 1450
1451err_unuse_clocks: 1451err_unuse_clocks:
1452 omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0); 1452 omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
1453 pm_runtime_put(omap->dev); 1453 pm_runtime_dont_use_autosuspend(omap->dev);
1454 pm_runtime_put_sync(omap->dev);
1454 pm_runtime_disable(&pdev->dev); 1455 pm_runtime_disable(&pdev->dev);
1455err_free_mem: 1456err_free_mem:
1456 1457
@@ -1468,6 +1469,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
1468 return ret; 1469 return ret;
1469 1470
1470 omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0); 1471 omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
1472 pm_runtime_dont_use_autosuspend(&pdev->dev);
1471 pm_runtime_put_sync(&pdev->dev); 1473 pm_runtime_put_sync(&pdev->dev);
1472 pm_runtime_disable(&pdev->dev); 1474 pm_runtime_disable(&pdev->dev);
1473 return 0; 1475 return 0;
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index e04598595073..93f2895383ee 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -137,10 +137,11 @@ static const struct dmi_system_id piix4_dmi_ibm[] = {
137}; 137};
138 138
139/* SB800 globals */ 139/* SB800 globals */
140static DEFINE_MUTEX(piix4_mutex_sb800);
140static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = { 141static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
141 "SDA0", "SDA2", "SDA3", "SDA4" 142 " port 0", " port 2", " port 3", " port 4"
142}; 143};
143static const char *piix4_aux_port_name_sb800 = "SDA1"; 144static const char *piix4_aux_port_name_sb800 = " port 1";
144 145
145struct i2c_piix4_adapdata { 146struct i2c_piix4_adapdata {
146 unsigned short smba; 147 unsigned short smba;
@@ -148,7 +149,6 @@ struct i2c_piix4_adapdata {
148 /* SB800 */ 149 /* SB800 */
149 bool sb800_main; 150 bool sb800_main;
150 unsigned short port; 151 unsigned short port;
151 struct mutex *mutex;
152}; 152};
153 153
154static int piix4_setup(struct pci_dev *PIIX4_dev, 154static int piix4_setup(struct pci_dev *PIIX4_dev,
@@ -275,10 +275,12 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
275 else 275 else
276 smb_en = (aux) ? 0x28 : 0x2c; 276 smb_en = (aux) ? 0x28 : 0x2c;
277 277
278 mutex_lock(&piix4_mutex_sb800);
278 outb_p(smb_en, SB800_PIIX4_SMB_IDX); 279 outb_p(smb_en, SB800_PIIX4_SMB_IDX);
279 smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1); 280 smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
280 outb_p(smb_en + 1, SB800_PIIX4_SMB_IDX); 281 outb_p(smb_en + 1, SB800_PIIX4_SMB_IDX);
281 smba_en_hi = inb_p(SB800_PIIX4_SMB_IDX + 1); 282 smba_en_hi = inb_p(SB800_PIIX4_SMB_IDX + 1);
283 mutex_unlock(&piix4_mutex_sb800);
282 284
283 if (!smb_en) { 285 if (!smb_en) {
284 smb_en_status = smba_en_lo & 0x10; 286 smb_en_status = smba_en_lo & 0x10;
@@ -559,7 +561,7 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
559 u8 port; 561 u8 port;
560 int retval; 562 int retval;
561 563
562 mutex_lock(adapdata->mutex); 564 mutex_lock(&piix4_mutex_sb800);
563 565
564 outb_p(SB800_PIIX4_PORT_IDX, SB800_PIIX4_SMB_IDX); 566 outb_p(SB800_PIIX4_PORT_IDX, SB800_PIIX4_SMB_IDX);
565 smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1); 567 smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
@@ -574,7 +576,7 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
574 576
575 outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1); 577 outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1);
576 578
577 mutex_unlock(adapdata->mutex); 579 mutex_unlock(&piix4_mutex_sb800);
578 580
579 return retval; 581 return retval;
580} 582}
@@ -625,6 +627,7 @@ static struct i2c_adapter *piix4_main_adapters[PIIX4_MAX_ADAPTERS];
625static struct i2c_adapter *piix4_aux_adapter; 627static struct i2c_adapter *piix4_aux_adapter;
626 628
627static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, 629static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
630 bool sb800_main, unsigned short port,
628 const char *name, struct i2c_adapter **padap) 631 const char *name, struct i2c_adapter **padap)
629{ 632{
630 struct i2c_adapter *adap; 633 struct i2c_adapter *adap;
@@ -639,7 +642,8 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
639 642
640 adap->owner = THIS_MODULE; 643 adap->owner = THIS_MODULE;
641 adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 644 adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
642 adap->algo = &smbus_algorithm; 645 adap->algo = sb800_main ? &piix4_smbus_algorithm_sb800
646 : &smbus_algorithm;
643 647
644 adapdata = kzalloc(sizeof(*adapdata), GFP_KERNEL); 648 adapdata = kzalloc(sizeof(*adapdata), GFP_KERNEL);
645 if (adapdata == NULL) { 649 if (adapdata == NULL) {
@@ -649,12 +653,14 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
649 } 653 }
650 654
651 adapdata->smba = smba; 655 adapdata->smba = smba;
656 adapdata->sb800_main = sb800_main;
657 adapdata->port = port;
652 658
653 /* set up the sysfs linkage to our parent device */ 659 /* set up the sysfs linkage to our parent device */
654 adap->dev.parent = &dev->dev; 660 adap->dev.parent = &dev->dev;
655 661
656 snprintf(adap->name, sizeof(adap->name), 662 snprintf(adap->name, sizeof(adap->name),
657 "SMBus PIIX4 adapter %s at %04x", name, smba); 663 "SMBus PIIX4 adapter%s at %04x", name, smba);
658 664
659 i2c_set_adapdata(adap, adapdata); 665 i2c_set_adapdata(adap, adapdata);
660 666
@@ -673,30 +679,16 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
673 679
674static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba) 680static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba)
675{ 681{
676 struct mutex *mutex;
677 struct i2c_piix4_adapdata *adapdata; 682 struct i2c_piix4_adapdata *adapdata;
678 int port; 683 int port;
679 int retval; 684 int retval;
680 685
681 mutex = kzalloc(sizeof(*mutex), GFP_KERNEL);
682 if (mutex == NULL)
683 return -ENOMEM;
684
685 mutex_init(mutex);
686
687 for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) { 686 for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) {
688 retval = piix4_add_adapter(dev, smba, 687 retval = piix4_add_adapter(dev, smba, true, port,
689 piix4_main_port_names_sb800[port], 688 piix4_main_port_names_sb800[port],
690 &piix4_main_adapters[port]); 689 &piix4_main_adapters[port]);
691 if (retval < 0) 690 if (retval < 0)
692 goto error; 691 goto error;
693
694 piix4_main_adapters[port]->algo = &piix4_smbus_algorithm_sb800;
695
696 adapdata = i2c_get_adapdata(piix4_main_adapters[port]);
697 adapdata->sb800_main = true;
698 adapdata->port = port;
699 adapdata->mutex = mutex;
700 } 692 }
701 693
702 return retval; 694 return retval;
@@ -714,19 +706,20 @@ error:
714 } 706 }
715 } 707 }
716 708
717 kfree(mutex);
718
719 return retval; 709 return retval;
720} 710}
721 711
722static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) 712static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
723{ 713{
724 int retval; 714 int retval;
715 bool is_sb800 = false;
725 716
726 if ((dev->vendor == PCI_VENDOR_ID_ATI && 717 if ((dev->vendor == PCI_VENDOR_ID_ATI &&
727 dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && 718 dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
728 dev->revision >= 0x40) || 719 dev->revision >= 0x40) ||
729 dev->vendor == PCI_VENDOR_ID_AMD) { 720 dev->vendor == PCI_VENDOR_ID_AMD) {
721 is_sb800 = true;
722
730 if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) { 723 if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) {
731 dev_err(&dev->dev, 724 dev_err(&dev->dev,
732 "SMBus base address index region 0x%x already in use!\n", 725 "SMBus base address index region 0x%x already in use!\n",
@@ -756,7 +749,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
756 return retval; 749 return retval;
757 750
758 /* Try to register main SMBus adapter, give up if we can't */ 751 /* Try to register main SMBus adapter, give up if we can't */
759 retval = piix4_add_adapter(dev, retval, "main", 752 retval = piix4_add_adapter(dev, retval, false, 0, "",
760 &piix4_main_adapters[0]); 753 &piix4_main_adapters[0]);
761 if (retval < 0) 754 if (retval < 0)
762 return retval; 755 return retval;
@@ -783,7 +776,8 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
783 if (retval > 0) { 776 if (retval > 0) {
784 /* Try to add the aux adapter if it exists, 777 /* Try to add the aux adapter if it exists,
785 * piix4_add_adapter will clean up if this fails */ 778 * piix4_add_adapter will clean up if this fails */
786 piix4_add_adapter(dev, retval, piix4_aux_port_name_sb800, 779 piix4_add_adapter(dev, retval, false, 0,
780 is_sb800 ? piix4_aux_port_name_sb800 : "",
787 &piix4_aux_adapter); 781 &piix4_aux_adapter);
788 } 782 }
789 783
@@ -798,10 +792,8 @@ static void piix4_adap_remove(struct i2c_adapter *adap)
798 i2c_del_adapter(adap); 792 i2c_del_adapter(adap);
799 if (adapdata->port == 0) { 793 if (adapdata->port == 0) {
800 release_region(adapdata->smba, SMBIOSIZE); 794 release_region(adapdata->smba, SMBIOSIZE);
801 if (adapdata->sb800_main) { 795 if (adapdata->sb800_main)
802 kfree(adapdata->mutex);
803 release_region(SB800_PIIX4_SMB_IDX, 2); 796 release_region(SB800_PIIX4_SMB_IDX, 2);
804 }
805 } 797 }
806 kfree(adapdata); 798 kfree(adapdata);
807 kfree(adap); 799 kfree(adap);
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index f3e5ff8522f0..213ba55e17c3 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -467,7 +467,7 @@ static int uniphier_fi2c_clk_init(struct device *dev,
467 bus_speed = UNIPHIER_FI2C_DEFAULT_SPEED; 467 bus_speed = UNIPHIER_FI2C_DEFAULT_SPEED;
468 468
469 if (!bus_speed) { 469 if (!bus_speed) {
470 dev_err(dev, "clock-freqyency should not be zero\n"); 470 dev_err(dev, "clock-frequency should not be zero\n");
471 return -EINVAL; 471 return -EINVAL;
472 } 472 }
473 473
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 1f4f3f53819c..89eaa8a7e1e0 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -328,7 +328,7 @@ static int uniphier_i2c_clk_init(struct device *dev,
328 bus_speed = UNIPHIER_I2C_DEFAULT_SPEED; 328 bus_speed = UNIPHIER_I2C_DEFAULT_SPEED;
329 329
330 if (!bus_speed) { 330 if (!bus_speed) {
331 dev_err(dev, "clock-freqyency should not be zero\n"); 331 dev_err(dev, "clock-frequency should not be zero\n");
332 return -EINVAL; 332 return -EINVAL;
333 } 333 }
334 334
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index edc29b173f6c..833ea9dd4464 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -213,6 +213,7 @@ config STK8312
213config STK8BA50 213config STK8BA50
214 tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver" 214 tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver"
215 depends on I2C 215 depends on I2C
216 depends on IIO_TRIGGER
216 help 217 help
217 Say yes here to get support for the Sensortek STK8BA50 3-axis 218 Say yes here to get support for the Sensortek STK8BA50 3-axis
218 accelerometer. 219 accelerometer.
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 605ff42c4631..283ded7747a9 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -175,6 +175,7 @@ config DA9150_GPADC
175config EXYNOS_ADC 175config EXYNOS_ADC
176 tristate "Exynos ADC driver support" 176 tristate "Exynos ADC driver support"
177 depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || (OF && COMPILE_TEST) 177 depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || (OF && COMPILE_TEST)
178 depends on HAS_IOMEM
178 help 179 help
179 Core support for the ADC block found in the Samsung EXYNOS series 180 Core support for the ADC block found in the Samsung EXYNOS series
180 of SoCs for drivers such as the touchscreen and hwmon to use to share 181 of SoCs for drivers such as the touchscreen and hwmon to use to share
@@ -207,6 +208,7 @@ config INA2XX_ADC
207config IMX7D_ADC 208config IMX7D_ADC
208 tristate "IMX7D ADC driver" 209 tristate "IMX7D ADC driver"
209 depends on ARCH_MXC || COMPILE_TEST 210 depends on ARCH_MXC || COMPILE_TEST
211 depends on HAS_IOMEM
210 help 212 help
211 Say yes here to build support for IMX7D ADC. 213 Say yes here to build support for IMX7D ADC.
212 214
@@ -409,6 +411,7 @@ config TWL6030_GPADC
409config VF610_ADC 411config VF610_ADC
410 tristate "Freescale vf610 ADC driver" 412 tristate "Freescale vf610 ADC driver"
411 depends on OF 413 depends on OF
414 depends on HAS_IOMEM
412 select IIO_BUFFER 415 select IIO_BUFFER
413 select IIO_TRIGGERED_BUFFER 416 select IIO_TRIGGERED_BUFFER
414 help 417 help
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 942320e32753..c1e05532d437 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -289,7 +289,7 @@ static int tiadc_iio_buffered_hardware_setup(struct iio_dev *indio_dev,
289 goto error_kfifo_free; 289 goto error_kfifo_free;
290 290
291 indio_dev->setup_ops = setup_ops; 291 indio_dev->setup_ops = setup_ops;
292 indio_dev->modes |= INDIO_BUFFER_HARDWARE; 292 indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
293 293
294 return 0; 294 return 0;
295 295
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 43d14588448d..b4dde8315210 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -300,6 +300,7 @@ static int mcp4725_probe(struct i2c_client *client,
300 data->client = client; 300 data->client = client;
301 301
302 indio_dev->dev.parent = &client->dev; 302 indio_dev->dev.parent = &client->dev;
303 indio_dev->name = id->name;
303 indio_dev->info = &mcp4725_info; 304 indio_dev->info = &mcp4725_info;
304 indio_dev->channels = &mcp4725_channel; 305 indio_dev->channels = &mcp4725_channel;
305 indio_dev->num_channels = 1; 306 indio_dev->num_channels = 1;
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 1165b1c4f9d6..cfc5a051ab9f 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -117,7 +117,7 @@ static int dht11_decode(struct dht11 *dht11, int offset, int timeres)
117 if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum) 117 if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum)
118 return -EIO; 118 return -EIO;
119 119
120 dht11->timestamp = ktime_get_real_ns(); 120 dht11->timestamp = ktime_get_boot_ns();
121 if (hum_int < 20) { /* DHT22 */ 121 if (hum_int < 20) { /* DHT22 */
122 dht11->temperature = (((temp_int & 0x7f) << 8) + temp_dec) * 122 dht11->temperature = (((temp_int & 0x7f) << 8) + temp_dec) *
123 ((temp_int & 0x80) ? -100 : 100); 123 ((temp_int & 0x80) ? -100 : 100);
@@ -145,7 +145,7 @@ static irqreturn_t dht11_handle_irq(int irq, void *data)
145 145
146 /* TODO: Consider making the handler safe for IRQ sharing */ 146 /* TODO: Consider making the handler safe for IRQ sharing */
147 if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) { 147 if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
148 dht11->edges[dht11->num_edges].ts = ktime_get_real_ns(); 148 dht11->edges[dht11->num_edges].ts = ktime_get_boot_ns();
149 dht11->edges[dht11->num_edges++].value = 149 dht11->edges[dht11->num_edges++].value =
150 gpio_get_value(dht11->gpio); 150 gpio_get_value(dht11->gpio);
151 151
@@ -164,7 +164,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
164 int ret, timeres; 164 int ret, timeres;
165 165
166 mutex_lock(&dht11->lock); 166 mutex_lock(&dht11->lock);
167 if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_real_ns()) { 167 if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boot_ns()) {
168 timeres = ktime_get_resolution_ns(); 168 timeres = ktime_get_resolution_ns();
169 if (DHT11_DATA_BIT_HIGH < 2 * timeres) { 169 if (DHT11_DATA_BIT_HIGH < 2 * timeres) {
170 dev_err(dht11->dev, "timeresolution %dns too low\n", 170 dev_err(dht11->dev, "timeresolution %dns too low\n",
@@ -279,7 +279,7 @@ static int dht11_probe(struct platform_device *pdev)
279 return -EINVAL; 279 return -EINVAL;
280 } 280 }
281 281
282 dht11->timestamp = ktime_get_real_ns() - DHT11_DATA_VALID_TIME - 1; 282 dht11->timestamp = ktime_get_boot_ns() - DHT11_DATA_VALID_TIME - 1;
283 dht11->num_edges = -1; 283 dht11->num_edges = -1;
284 284
285 platform_set_drvdata(pdev, iio); 285 platform_set_drvdata(pdev, iio);
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
index cb32b593f1c5..36607d52fee0 100644
--- a/drivers/iio/imu/adis_buffer.c
+++ b/drivers/iio/imu/adis_buffer.c
@@ -43,7 +43,7 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
43 return -ENOMEM; 43 return -ENOMEM;
44 44
45 rx = adis->buffer; 45 rx = adis->buffer;
46 tx = rx + indio_dev->scan_bytes; 46 tx = rx + scan_count;
47 47
48 spi_message_init(&adis->msg); 48 spi_message_init(&adis->msg);
49 49
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index 48fbc0bc7e2a..8f8d1370ed8b 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -5,9 +5,9 @@
5config INV_MPU6050_IIO 5config INV_MPU6050_IIO
6 tristate "Invensense MPU6050 devices" 6 tristate "Invensense MPU6050 devices"
7 depends on I2C && SYSFS 7 depends on I2C && SYSFS
8 depends on I2C_MUX
8 select IIO_BUFFER 9 select IIO_BUFFER
9 select IIO_TRIGGERED_BUFFER 10 select IIO_TRIGGERED_BUFFER
10 select I2C_MUX
11 help 11 help
12 This driver supports the Invensense MPU6050 devices. 12 This driver supports the Invensense MPU6050 devices.
13 This driver can also support MPU6500 in MPU6050 compatibility mode 13 This driver can also support MPU6500 in MPU6050 compatibility mode
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 80fbbfd76faf..734a0042de0c 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -349,6 +349,8 @@ EXPORT_SYMBOL_GPL(iio_channel_get);
349 349
350void iio_channel_release(struct iio_channel *channel) 350void iio_channel_release(struct iio_channel *channel)
351{ 351{
352 if (!channel)
353 return;
352 iio_device_put(channel->indio_dev); 354 iio_device_put(channel->indio_dev);
353 kfree(channel); 355 kfree(channel);
354} 356}
diff --git a/drivers/iio/light/acpi-als.c b/drivers/iio/light/acpi-als.c
index 60537ec0c923..53201d99a16c 100644
--- a/drivers/iio/light/acpi-als.c
+++ b/drivers/iio/light/acpi-als.c
@@ -54,7 +54,9 @@ static const struct iio_chan_spec acpi_als_channels[] = {
54 .realbits = 32, 54 .realbits = 32,
55 .storagebits = 32, 55 .storagebits = 32,
56 }, 56 },
57 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), 57 /* _RAW is here for backward ABI compatibility */
58 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
59 BIT(IIO_CHAN_INFO_PROCESSED),
58 }, 60 },
59}; 61};
60 62
@@ -152,7 +154,7 @@ static int acpi_als_read_raw(struct iio_dev *indio_dev,
152 s32 temp_val; 154 s32 temp_val;
153 int ret; 155 int ret;
154 156
155 if (mask != IIO_CHAN_INFO_RAW) 157 if ((mask != IIO_CHAN_INFO_PROCESSED) && (mask != IIO_CHAN_INFO_RAW))
156 return -EINVAL; 158 return -EINVAL;
157 159
158 /* we support only illumination (_ALI) so far. */ 160 /* we support only illumination (_ALI) so far. */
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 809a961b9a7f..6bf89d8f3741 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -180,7 +180,7 @@ static const struct ltr501_samp_table ltr501_ps_samp_table[] = {
180 {500000, 2000000} 180 {500000, 2000000}
181}; 181};
182 182
183static unsigned int ltr501_match_samp_freq(const struct ltr501_samp_table *tab, 183static int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
184 int len, int val, int val2) 184 int len, int val, int val2)
185{ 185{
186 int i, freq; 186 int i, freq;
diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c
index f5ecd6e19f5d..a0d7deeac62f 100644
--- a/drivers/iio/pressure/mpl115.c
+++ b/drivers/iio/pressure/mpl115.c
@@ -117,7 +117,7 @@ static int mpl115_read_raw(struct iio_dev *indio_dev,
117 *val = ret >> 6; 117 *val = ret >> 6;
118 return IIO_VAL_INT; 118 return IIO_VAL_INT;
119 case IIO_CHAN_INFO_OFFSET: 119 case IIO_CHAN_INFO_OFFSET:
120 *val = 605; 120 *val = -605;
121 *val2 = 750000; 121 *val2 = 750000;
122 return IIO_VAL_INT_PLUS_MICRO; 122 return IIO_VAL_INT_PLUS_MICRO;
123 case IIO_CHAN_INFO_SCALE: 123 case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 93e29fb67fa0..db35e04a0637 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -87,7 +87,7 @@ static int lidar_i2c_xfer(struct lidar_data *data, u8 reg, u8 *val, int len)
87 87
88 ret = i2c_transfer(client->adapter, msg, 2); 88 ret = i2c_transfer(client->adapter, msg, 2);
89 89
90 return (ret == 2) ? 0 : ret; 90 return (ret == 2) ? 0 : -EIO;
91} 91}
92 92
93static int lidar_smbus_xfer(struct lidar_data *data, u8 reg, u8 *val, int len) 93static int lidar_smbus_xfer(struct lidar_data *data, u8 reg, u8 *val, int len)
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 00da80e02154..94b80a51ab68 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -358,6 +358,7 @@ int ib_register_device(struct ib_device *device,
358 ret = device->query_device(device, &device->attrs, &uhw); 358 ret = device->query_device(device, &device->attrs, &uhw);
359 if (ret) { 359 if (ret) {
360 printk(KERN_WARNING "Couldn't query the device attributes\n"); 360 printk(KERN_WARNING "Couldn't query the device attributes\n");
361 ib_cache_cleanup_one(device);
361 goto out; 362 goto out;
362 } 363 }
363 364
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index f334090bb612..1e37f3515d98 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1071,7 +1071,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
1071 } 1071 }
1072 } 1072 }
1073 1073
1074 if (rec->hop_limit > 1 || use_roce) { 1074 if (rec->hop_limit > 0 || use_roce) {
1075 ah_attr->ah_flags = IB_AH_GRH; 1075 ah_attr->ah_flags = IB_AH_GRH;
1076 ah_attr->grh.dgid = rec->dgid; 1076 ah_attr->grh.dgid = rec->dgid;
1077 1077
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 3de93517efe4..14606afbfaa8 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -336,7 +336,6 @@ static ssize_t _show_port_gid_attr(struct ib_port *p,
336 union ib_gid gid; 336 union ib_gid gid;
337 struct ib_gid_attr gid_attr = {}; 337 struct ib_gid_attr gid_attr = {};
338 ssize_t ret; 338 ssize_t ret;
339 va_list args;
340 339
341 ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid, 340 ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid,
342 &gid_attr); 341 &gid_attr);
@@ -348,7 +347,6 @@ static ssize_t _show_port_gid_attr(struct ib_port *p,
348err: 347err:
349 if (gid_attr.ndev) 348 if (gid_attr.ndev)
350 dev_put(gid_attr.ndev); 349 dev_put(gid_attr.ndev);
351 va_end(args);
352 return ret; 350 return ret;
353} 351}
354 352
@@ -722,12 +720,11 @@ static struct attribute_group *get_counter_table(struct ib_device *dev,
722 720
723 if (get_perf_mad(dev, port_num, IB_PMA_CLASS_PORT_INFO, 721 if (get_perf_mad(dev, port_num, IB_PMA_CLASS_PORT_INFO,
724 &cpi, 40, sizeof(cpi)) >= 0) { 722 &cpi, 40, sizeof(cpi)) >= 0) {
725 723 if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH)
726 if (cpi.capability_mask && IB_PMA_CLASS_CAP_EXT_WIDTH)
727 /* We have extended counters */ 724 /* We have extended counters */
728 return &pma_group_ext; 725 return &pma_group_ext;
729 726
730 if (cpi.capability_mask && IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF) 727 if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF)
731 /* But not the IETF ones */ 728 /* But not the IETF ones */
732 return &pma_group_noietf; 729 return &pma_group_noietf;
733 } 730 }
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 19837d270278..2116132568e7 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -322,6 +322,8 @@ int ib_ud_header_init(int payload_bytes,
322 int immediate_present, 322 int immediate_present,
323 struct ib_ud_header *header) 323 struct ib_ud_header *header)
324{ 324{
325 size_t udp_bytes = udp_present ? IB_UDP_BYTES : 0;
326
325 grh_present = grh_present && !ip_version; 327 grh_present = grh_present && !ip_version;
326 memset(header, 0, sizeof *header); 328 memset(header, 0, sizeof *header);
327 329
@@ -353,7 +355,8 @@ int ib_ud_header_init(int payload_bytes,
353 if (ip_version == 6 || grh_present) { 355 if (ip_version == 6 || grh_present) {
354 header->grh.ip_version = 6; 356 header->grh.ip_version = 6;
355 header->grh.payload_length = 357 header->grh.payload_length =
356 cpu_to_be16((IB_BTH_BYTES + 358 cpu_to_be16((udp_bytes +
359 IB_BTH_BYTES +
357 IB_DETH_BYTES + 360 IB_DETH_BYTES +
358 payload_bytes + 361 payload_bytes +
359 4 + /* ICRC */ 362 4 + /* ICRC */
@@ -362,8 +365,6 @@ int ib_ud_header_init(int payload_bytes,
362 } 365 }
363 366
364 if (ip_version == 4) { 367 if (ip_version == 4) {
365 int udp_bytes = udp_present ? IB_UDP_BYTES : 0;
366
367 header->ip4.ver = 4; /* version 4 */ 368 header->ip4.ver = 4; /* version 4 */
368 header->ip4.hdr_len = 5; /* 5 words */ 369 header->ip4.hdr_len = 5; /* 5 words */
369 header->ip4.tot_len = 370 header->ip4.tot_len =
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6ffc9c4e93af..6c6fbff19752 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1970,7 +1970,8 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1970 resp_size); 1970 resp_size);
1971 INIT_UDATA(&uhw, buf + sizeof(cmd), 1971 INIT_UDATA(&uhw, buf + sizeof(cmd),
1972 (unsigned long)cmd.response + resp_size, 1972 (unsigned long)cmd.response + resp_size,
1973 in_len - sizeof(cmd), out_len - resp_size); 1973 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1974 out_len - resp_size);
1974 1975
1975 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1976 memset(&cmd_ex, 0, sizeof(cmd_ex));
1976 cmd_ex.user_handle = cmd.user_handle; 1977 cmd_ex.user_handle = cmd.user_handle;
@@ -3413,7 +3414,8 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
3413 3414
3414 INIT_UDATA(&udata, buf + sizeof cmd, 3415 INIT_UDATA(&udata, buf + sizeof cmd,
3415 (unsigned long) cmd.response + sizeof resp, 3416 (unsigned long) cmd.response + sizeof resp,
3416 in_len - sizeof cmd, out_len - sizeof resp); 3417 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
3418 out_len - sizeof resp);
3417 3419
3418 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); 3420 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
3419 if (ret) 3421 if (ret)
@@ -3439,7 +3441,8 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3439 3441
3440 INIT_UDATA(&udata, buf + sizeof cmd, 3442 INIT_UDATA(&udata, buf + sizeof cmd,
3441 (unsigned long) cmd.response + sizeof resp, 3443 (unsigned long) cmd.response + sizeof resp,
3442 in_len - sizeof cmd, out_len - sizeof resp); 3444 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
3445 out_len - sizeof resp);
3443 3446
3444 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); 3447 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
3445 if (ret) 3448 if (ret)
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 26833bfa639b..d68f506c1922 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -817,17 +817,48 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
817 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 817 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
818} 818}
819 819
820static void edit_counter(struct mlx4_counter *cnt, 820static void edit_counter(struct mlx4_counter *cnt, void *counters,
821 struct ib_pma_portcounters *pma_cnt) 821 __be16 attr_id)
822{ 822{
823 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data, 823 switch (attr_id) {
824 (be64_to_cpu(cnt->tx_bytes) >> 2)); 824 case IB_PMA_PORT_COUNTERS:
825 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data, 825 {
826 (be64_to_cpu(cnt->rx_bytes) >> 2)); 826 struct ib_pma_portcounters *pma_cnt =
827 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets, 827 (struct ib_pma_portcounters *)counters;
828 be64_to_cpu(cnt->tx_frames)); 828
829 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets, 829 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
830 be64_to_cpu(cnt->rx_frames)); 830 (be64_to_cpu(cnt->tx_bytes) >> 2));
831 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
832 (be64_to_cpu(cnt->rx_bytes) >> 2));
833 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
834 be64_to_cpu(cnt->tx_frames));
835 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
836 be64_to_cpu(cnt->rx_frames));
837 break;
838 }
839 case IB_PMA_PORT_COUNTERS_EXT:
840 {
841 struct ib_pma_portcounters_ext *pma_cnt_ext =
842 (struct ib_pma_portcounters_ext *)counters;
843
844 pma_cnt_ext->port_xmit_data =
845 cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2);
846 pma_cnt_ext->port_rcv_data =
847 cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2);
848 pma_cnt_ext->port_xmit_packets = cnt->tx_frames;
849 pma_cnt_ext->port_rcv_packets = cnt->rx_frames;
850 break;
851 }
852 }
853}
854
855static int iboe_process_mad_port_info(void *out_mad)
856{
857 struct ib_class_port_info cpi = {};
858
859 cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
860 memcpy(out_mad, &cpi, sizeof(cpi));
861 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
831} 862}
832 863
833static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 864static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@@ -842,6 +873,9 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
842 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT) 873 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
843 return -EINVAL; 874 return -EINVAL;
844 875
876 if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)
877 return iboe_process_mad_port_info((void *)(out_mad->data + 40));
878
845 memset(&counter_stats, 0, sizeof(counter_stats)); 879 memset(&counter_stats, 0, sizeof(counter_stats));
846 mutex_lock(&dev->counters_table[port_num - 1].mutex); 880 mutex_lock(&dev->counters_table[port_num - 1].mutex);
847 list_for_each_entry(tmp_counter, 881 list_for_each_entry(tmp_counter,
@@ -863,7 +897,8 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
863 switch (counter_stats.counter_mode & 0xf) { 897 switch (counter_stats.counter_mode & 0xf) {
864 case 0: 898 case 0:
865 edit_counter(&counter_stats, 899 edit_counter(&counter_stats,
866 (void *)(out_mad->data + 40)); 900 (void *)(out_mad->data + 40),
901 in_mad->mad_hdr.attr_id);
867 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 902 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
868 break; 903 break;
869 default: 904 default:
@@ -894,8 +929,10 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
894 */ 929 */
895 if (link == IB_LINK_LAYER_INFINIBAND) { 930 if (link == IB_LINK_LAYER_INFINIBAND) {
896 if (mlx4_is_slave(dev->dev) && 931 if (mlx4_is_slave(dev->dev) &&
897 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && 932 (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
898 in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS) 933 (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
934 in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
935 in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
899 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, 936 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
900 in_grh, in_mad, out_mad); 937 in_grh, in_mad, out_mad);
901 938
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index bc5536f00b6c..fd97534762b8 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1681,9 +1681,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1681 } 1681 }
1682 1682
1683 if (qp->ibqp.uobject) 1683 if (qp->ibqp.uobject)
1684 context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index); 1684 context->usr_page = cpu_to_be32(
1685 mlx4_to_hw_uar_index(dev->dev,
1686 to_mucontext(ibqp->uobject->context)->uar.index));
1685 else 1687 else
1686 context->usr_page = cpu_to_be32(dev->priv_uar.index); 1688 context->usr_page = cpu_to_be32(
1689 mlx4_to_hw_uar_index(dev->dev, dev->priv_uar.index));
1687 1690
1688 if (attr_mask & IB_QP_DEST_QPN) 1691 if (attr_mask & IB_QP_DEST_QPN)
1689 context->remote_qpn = cpu_to_be32(attr->dest_qp_num); 1692 context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index ec737e2287fe..03c418ccbc98 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -844,6 +844,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
844 int err; 844 int err;
845 int i; 845 int i;
846 size_t reqlen; 846 size_t reqlen;
847 size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
848 max_cqe_version);
847 849
848 if (!dev->ib_active) 850 if (!dev->ib_active)
849 return ERR_PTR(-EAGAIN); 851 return ERR_PTR(-EAGAIN);
@@ -854,7 +856,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
854 reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr); 856 reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
855 if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) 857 if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
856 ver = 0; 858 ver = 0;
857 else if (reqlen >= sizeof(struct mlx5_ib_alloc_ucontext_req_v2)) 859 else if (reqlen >= min_req_v2)
858 ver = 2; 860 ver = 2;
859 else 861 else
860 return ERR_PTR(-EINVAL); 862 return ERR_PTR(-EINVAL);
@@ -2214,7 +2216,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
2214 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | 2216 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
2215 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 2217 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
2216 dev->ib_dev.uverbs_ex_cmd_mask = 2218 dev->ib_dev.uverbs_ex_cmd_mask =
2217 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE); 2219 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
2220 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
2221 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
2218 2222
2219 dev->ib_dev.query_device = mlx5_ib_query_device; 2223 dev->ib_dev.query_device = mlx5_ib_query_device;
2220 dev->ib_dev.query_port = mlx5_ib_query_port; 2224 dev->ib_dev.query_port = mlx5_ib_query_port;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 8fb9c27485e1..34cb8e87c7b8 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -270,8 +270,10 @@ static int sq_overhead(enum ib_qp_type qp_type)
270 /* fall through */ 270 /* fall through */
271 case IB_QPT_RC: 271 case IB_QPT_RC:
272 size += sizeof(struct mlx5_wqe_ctrl_seg) + 272 size += sizeof(struct mlx5_wqe_ctrl_seg) +
273 sizeof(struct mlx5_wqe_atomic_seg) + 273 max(sizeof(struct mlx5_wqe_atomic_seg) +
274 sizeof(struct mlx5_wqe_raddr_seg); 274 sizeof(struct mlx5_wqe_raddr_seg),
275 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
276 sizeof(struct mlx5_mkey_seg));
275 break; 277 break;
276 278
277 case IB_QPT_XRC_TGT: 279 case IB_QPT_XRC_TGT:
@@ -279,9 +281,9 @@ static int sq_overhead(enum ib_qp_type qp_type)
279 281
280 case IB_QPT_UC: 282 case IB_QPT_UC:
281 size += sizeof(struct mlx5_wqe_ctrl_seg) + 283 size += sizeof(struct mlx5_wqe_ctrl_seg) +
282 sizeof(struct mlx5_wqe_raddr_seg) + 284 max(sizeof(struct mlx5_wqe_raddr_seg),
283 sizeof(struct mlx5_wqe_umr_ctrl_seg) + 285 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
284 sizeof(struct mlx5_mkey_seg); 286 sizeof(struct mlx5_mkey_seg));
285 break; 287 break;
286 288
287 case IB_QPT_UD: 289 case IB_QPT_UD:
@@ -1036,7 +1038,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1036 wq = MLX5_ADDR_OF(rqc, rqc, wq); 1038 wq = MLX5_ADDR_OF(rqc, rqc, wq);
1037 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 1039 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1038 MLX5_SET(wq, wq, end_padding_mode, 1040 MLX5_SET(wq, wq, end_padding_mode,
1039 MLX5_GET64(qpc, qpc, end_padding_mode)); 1041 MLX5_GET(qpc, qpc, end_padding_mode));
1040 MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset)); 1042 MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset));
1041 MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd)); 1043 MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
1042 MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr)); 1044 MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
@@ -1615,15 +1617,6 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1615 1617
1616 if (pd) { 1618 if (pd) {
1617 dev = to_mdev(pd->device); 1619 dev = to_mdev(pd->device);
1618 } else {
1619 /* being cautious here */
1620 if (init_attr->qp_type != IB_QPT_XRC_TGT &&
1621 init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
1622 pr_warn("%s: no PD for transport %s\n", __func__,
1623 ib_qp_type_str(init_attr->qp_type));
1624 return ERR_PTR(-EINVAL);
1625 }
1626 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1627 1620
1628 if (init_attr->qp_type == IB_QPT_RAW_PACKET) { 1621 if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
1629 if (!pd->uobject) { 1622 if (!pd->uobject) {
@@ -1634,6 +1627,15 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1634 return ERR_PTR(-EINVAL); 1627 return ERR_PTR(-EINVAL);
1635 } 1628 }
1636 } 1629 }
1630 } else {
1631 /* being cautious here */
1632 if (init_attr->qp_type != IB_QPT_XRC_TGT &&
1633 init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
1634 pr_warn("%s: no PD for transport %s\n", __func__,
1635 ib_qp_type_str(init_attr->qp_type));
1636 return ERR_PTR(-EINVAL);
1637 }
1638 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1637 } 1639 }
1638 1640
1639 switch (init_attr->qp_type) { 1641 switch (init_attr->qp_type) {
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 4659256cd95e..3b2ddd64a371 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -75,7 +75,8 @@ static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
75 75
76static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, 76static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
77 struct mlx5_create_srq_mbox_in **in, 77 struct mlx5_create_srq_mbox_in **in,
78 struct ib_udata *udata, int buf_size, int *inlen) 78 struct ib_udata *udata, int buf_size, int *inlen,
79 int is_xrc)
79{ 80{
80 struct mlx5_ib_dev *dev = to_mdev(pd->device); 81 struct mlx5_ib_dev *dev = to_mdev(pd->device);
81 struct mlx5_ib_create_srq ucmd = {}; 82 struct mlx5_ib_create_srq ucmd = {};
@@ -87,13 +88,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
87 int ncont; 88 int ncont;
88 u32 offset; 89 u32 offset;
89 u32 uidx = MLX5_IB_DEFAULT_UIDX; 90 u32 uidx = MLX5_IB_DEFAULT_UIDX;
90 int drv_data = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
91 91
92 if (drv_data < 0) 92 ucmdlen = min(udata->inlen, sizeof(ucmd));
93 return -EINVAL;
94
95 ucmdlen = (drv_data < sizeof(ucmd)) ?
96 drv_data : sizeof(ucmd);
97 93
98 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) { 94 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
99 mlx5_ib_dbg(dev, "failed copy udata\n"); 95 mlx5_ib_dbg(dev, "failed copy udata\n");
@@ -103,15 +99,17 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
103 if (ucmd.reserved0 || ucmd.reserved1) 99 if (ucmd.reserved0 || ucmd.reserved1)
104 return -EINVAL; 100 return -EINVAL;
105 101
106 if (drv_data > sizeof(ucmd) && 102 if (udata->inlen > sizeof(ucmd) &&
107 !ib_is_udata_cleared(udata, sizeof(ucmd), 103 !ib_is_udata_cleared(udata, sizeof(ucmd),
108 drv_data - sizeof(ucmd))) 104 udata->inlen - sizeof(ucmd)))
109 return -EINVAL; 105 return -EINVAL;
110 106
111 err = get_srq_user_index(to_mucontext(pd->uobject->context), 107 if (is_xrc) {
112 &ucmd, udata->inlen, &uidx); 108 err = get_srq_user_index(to_mucontext(pd->uobject->context),
113 if (err) 109 &ucmd, udata->inlen, &uidx);
114 return err; 110 if (err)
111 return err;
112 }
115 113
116 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); 114 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
117 115
@@ -151,7 +149,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
151 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 149 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
152 (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26); 150 (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
153 151
154 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) { 152 if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
153 is_xrc){
155 xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in, 154 xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
156 xrc_srq_context_entry); 155 xrc_srq_context_entry);
157 MLX5_SET(xrc_srqc, xsrqc, user_index, uidx); 156 MLX5_SET(xrc_srqc, xsrqc, user_index, uidx);
@@ -170,7 +169,7 @@ err_umem:
170 169
171static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, 170static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
172 struct mlx5_create_srq_mbox_in **in, int buf_size, 171 struct mlx5_create_srq_mbox_in **in, int buf_size,
173 int *inlen) 172 int *inlen, int is_xrc)
174{ 173{
175 int err; 174 int err;
176 int i; 175 int i;
@@ -224,7 +223,8 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
224 223
225 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 224 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
226 225
227 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) { 226 if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
227 is_xrc){
228 xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in, 228 xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
229 xrc_srq_context_entry); 229 xrc_srq_context_entry);
230 /* 0xffffff means we ask to work with cqe version 0 */ 230 /* 0xffffff means we ask to work with cqe version 0 */
@@ -302,10 +302,14 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
302 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, 302 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
303 srq->msrq.max_avail_gather); 303 srq->msrq.max_avail_gather);
304 304
305 is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
306
305 if (pd->uobject) 307 if (pd->uobject)
306 err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen); 308 err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen,
309 is_xrc);
307 else 310 else
308 err = create_srq_kernel(dev, srq, &in, buf_size, &inlen); 311 err = create_srq_kernel(dev, srq, &in, buf_size, &inlen,
312 is_xrc);
309 313
310 if (err) { 314 if (err) {
311 mlx5_ib_warn(dev, "create srq %s failed, err %d\n", 315 mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
@@ -313,7 +317,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
313 goto err_srq; 317 goto err_srq;
314 } 318 }
315 319
316 is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
317 in->ctx.state_log_sz = ilog2(srq->msrq.max); 320 in->ctx.state_log_sz = ilog2(srq->msrq.max);
318 flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24; 321 flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
319 xrcdn = 0; 322 xrcdn = 0;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 040bb8b5cb15..12503f15fbd6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -323,9 +323,6 @@ struct ocrdma_cq {
323 */ 323 */
324 u32 max_hw_cqe; 324 u32 max_hw_cqe;
325 bool phase_change; 325 bool phase_change;
326 bool deferred_arm, deferred_sol;
327 bool first_arm;
328
329 spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization 326 spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization
330 * to cq polling 327 * to cq polling
331 */ 328 */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 573849354cb9..f38743018cb4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -228,6 +228,11 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
228 228
229 ocrdma_alloc_pd_pool(dev); 229 ocrdma_alloc_pd_pool(dev);
230 230
231 if (!ocrdma_alloc_stats_resources(dev)) {
232 pr_err("%s: stats resource allocation failed\n", __func__);
233 goto alloc_err;
234 }
235
231 spin_lock_init(&dev->av_tbl.lock); 236 spin_lock_init(&dev->av_tbl.lock);
232 spin_lock_init(&dev->flush_q_lock); 237 spin_lock_init(&dev->flush_q_lock);
233 return 0; 238 return 0;
@@ -238,6 +243,7 @@ alloc_err:
238 243
239static void ocrdma_free_resources(struct ocrdma_dev *dev) 244static void ocrdma_free_resources(struct ocrdma_dev *dev)
240{ 245{
246 ocrdma_release_stats_resources(dev);
241 kfree(dev->stag_arr); 247 kfree(dev->stag_arr);
242 kfree(dev->qp_tbl); 248 kfree(dev->qp_tbl);
243 kfree(dev->cq_tbl); 249 kfree(dev->cq_tbl);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 86c303a620c1..255f774080a4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -64,10 +64,11 @@ static int ocrdma_add_stat(char *start, char *pcur,
64 return cpy_len; 64 return cpy_len;
65} 65}
66 66
67static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev) 67bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
68{ 68{
69 struct stats_mem *mem = &dev->stats_mem; 69 struct stats_mem *mem = &dev->stats_mem;
70 70
71 mutex_init(&dev->stats_lock);
71 /* Alloc mbox command mem*/ 72 /* Alloc mbox command mem*/
72 mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), 73 mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
73 sizeof(struct ocrdma_rdma_stats_resp)); 74 sizeof(struct ocrdma_rdma_stats_resp));
@@ -91,13 +92,14 @@ static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev)
91 return true; 92 return true;
92} 93}
93 94
94static void ocrdma_release_stats_mem(struct ocrdma_dev *dev) 95void ocrdma_release_stats_resources(struct ocrdma_dev *dev)
95{ 96{
96 struct stats_mem *mem = &dev->stats_mem; 97 struct stats_mem *mem = &dev->stats_mem;
97 98
98 if (mem->va) 99 if (mem->va)
99 dma_free_coherent(&dev->nic_info.pdev->dev, mem->size, 100 dma_free_coherent(&dev->nic_info.pdev->dev, mem->size,
100 mem->va, mem->pa); 101 mem->va, mem->pa);
102 mem->va = NULL;
101 kfree(mem->debugfs_mem); 103 kfree(mem->debugfs_mem);
102} 104}
103 105
@@ -838,15 +840,9 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
838 &dev->reset_stats, &ocrdma_dbg_ops)) 840 &dev->reset_stats, &ocrdma_dbg_ops))
839 goto err; 841 goto err;
840 842
841 /* Now create dma_mem for stats mbx command */
842 if (!ocrdma_alloc_stats_mem(dev))
843 goto err;
844
845 mutex_init(&dev->stats_lock);
846 843
847 return; 844 return;
848err: 845err:
849 ocrdma_release_stats_mem(dev);
850 debugfs_remove_recursive(dev->dir); 846 debugfs_remove_recursive(dev->dir);
851 dev->dir = NULL; 847 dev->dir = NULL;
852} 848}
@@ -855,9 +851,7 @@ void ocrdma_rem_port_stats(struct ocrdma_dev *dev)
855{ 851{
856 if (!dev->dir) 852 if (!dev->dir)
857 return; 853 return;
858 debugfs_remove(dev->dir); 854 debugfs_remove_recursive(dev->dir);
859 mutex_destroy(&dev->stats_lock);
860 ocrdma_release_stats_mem(dev);
861} 855}
862 856
863void ocrdma_init_debugfs(void) 857void ocrdma_init_debugfs(void)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index c9e58d04c7b8..bba1fec4f11f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -65,6 +65,8 @@ enum OCRDMA_STATS_TYPE {
65 65
66void ocrdma_rem_debugfs(void); 66void ocrdma_rem_debugfs(void);
67void ocrdma_init_debugfs(void); 67void ocrdma_init_debugfs(void);
68bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev);
69void ocrdma_release_stats_resources(struct ocrdma_dev *dev);
68void ocrdma_rem_port_stats(struct ocrdma_dev *dev); 70void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
69void ocrdma_add_port_stats(struct ocrdma_dev *dev); 71void ocrdma_add_port_stats(struct ocrdma_dev *dev);
70int ocrdma_pma_counters(struct ocrdma_dev *dev, 72int ocrdma_pma_counters(struct ocrdma_dev *dev,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index d4c687b548d8..12420e4ecf3d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -125,8 +125,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
125 IB_DEVICE_SYS_IMAGE_GUID | 125 IB_DEVICE_SYS_IMAGE_GUID |
126 IB_DEVICE_LOCAL_DMA_LKEY | 126 IB_DEVICE_LOCAL_DMA_LKEY |
127 IB_DEVICE_MEM_MGT_EXTENSIONS; 127 IB_DEVICE_MEM_MGT_EXTENSIONS;
128 attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge); 128 attr->max_sge = dev->attr.max_send_sge;
129 attr->max_sge_rd = 0; 129 attr->max_sge_rd = attr->max_sge;
130 attr->max_cq = dev->attr.max_cq; 130 attr->max_cq = dev->attr.max_cq;
131 attr->max_cqe = dev->attr.max_cqe; 131 attr->max_cqe = dev->attr.max_cqe;
132 attr->max_mr = dev->attr.max_mr; 132 attr->max_mr = dev->attr.max_mr;
@@ -1094,7 +1094,6 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
1094 spin_lock_init(&cq->comp_handler_lock); 1094 spin_lock_init(&cq->comp_handler_lock);
1095 INIT_LIST_HEAD(&cq->sq_head); 1095 INIT_LIST_HEAD(&cq->sq_head);
1096 INIT_LIST_HEAD(&cq->rq_head); 1096 INIT_LIST_HEAD(&cq->rq_head);
1097 cq->first_arm = true;
1098 1097
1099 if (ib_ctx) { 1098 if (ib_ctx) {
1100 uctx = get_ocrdma_ucontext(ib_ctx); 1099 uctx = get_ocrdma_ucontext(ib_ctx);
@@ -2726,8 +2725,7 @@ static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
2726 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT; 2725 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2727 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) & 2726 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2728 OCRDMA_CQE_SRCQP_MASK; 2727 OCRDMA_CQE_SRCQP_MASK;
2729 ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) & 2728 ibwc->pkey_index = 0;
2730 OCRDMA_CQE_PKEY_MASK;
2731 ibwc->wc_flags = IB_WC_GRH; 2729 ibwc->wc_flags = IB_WC_GRH;
2732 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >> 2730 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2733 OCRDMA_CQE_UD_XFER_LEN_SHIFT); 2731 OCRDMA_CQE_UD_XFER_LEN_SHIFT);
@@ -2911,12 +2909,9 @@ expand_cqe:
2911 } 2909 }
2912stop_cqe: 2910stop_cqe:
2913 cq->getp = cur_getp; 2911 cq->getp = cur_getp;
2914 if (cq->deferred_arm || polled_hw_cqes) { 2912
2915 ocrdma_ring_cq_db(dev, cq->id, cq->deferred_arm, 2913 if (polled_hw_cqes)
2916 cq->deferred_sol, polled_hw_cqes); 2914 ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes);
2917 cq->deferred_arm = false;
2918 cq->deferred_sol = false;
2919 }
2920 2915
2921 return i; 2916 return i;
2922} 2917}
@@ -3000,13 +2995,7 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
3000 if (cq_flags & IB_CQ_SOLICITED) 2995 if (cq_flags & IB_CQ_SOLICITED)
3001 sol_needed = true; 2996 sol_needed = true;
3002 2997
3003 if (cq->first_arm) { 2998 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
3004 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
3005 cq->first_arm = false;
3006 }
3007
3008 cq->deferred_arm = true;
3009 cq->deferred_sol = sol_needed;
3010 spin_unlock_irqrestore(&cq->cq_lock, flags); 2999 spin_unlock_irqrestore(&cq->cq_lock, flags);
3011 3000
3012 return 0; 3001 return 0;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5ea0c14070d1..fa9c42ff1fb0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -245,8 +245,6 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
245 skb_reset_mac_header(skb); 245 skb_reset_mac_header(skb);
246 skb_pull(skb, IPOIB_ENCAP_LEN); 246 skb_pull(skb, IPOIB_ENCAP_LEN);
247 247
248 skb->truesize = SKB_TRUESIZE(skb->len);
249
250 ++dev->stats.rx_packets; 248 ++dev->stats.rx_packets;
251 dev->stats.rx_bytes += skb->len; 249 dev->stats.rx_bytes += skb->len;
252 250
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 050dfa175d16..25889311b1e9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -456,7 +456,10 @@ out_locked:
456 return status; 456 return status;
457} 457}
458 458
459static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) 459/*
460 * Caller must hold 'priv->lock'
461 */
462static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
460{ 463{
461 struct ipoib_dev_priv *priv = netdev_priv(dev); 464 struct ipoib_dev_priv *priv = netdev_priv(dev);
462 struct ib_sa_multicast *multicast; 465 struct ib_sa_multicast *multicast;
@@ -466,6 +469,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
466 ib_sa_comp_mask comp_mask; 469 ib_sa_comp_mask comp_mask;
467 int ret = 0; 470 int ret = 0;
468 471
472 if (!priv->broadcast ||
473 !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
474 return -EINVAL;
475
469 ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw); 476 ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
470 477
471 rec.mgid = mcast->mcmember.mgid; 478 rec.mgid = mcast->mcmember.mgid;
@@ -525,20 +532,23 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
525 rec.join_state = 4; 532 rec.join_state = 4;
526#endif 533#endif
527 } 534 }
535 spin_unlock_irq(&priv->lock);
528 536
529 multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, 537 multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
530 &rec, comp_mask, GFP_KERNEL, 538 &rec, comp_mask, GFP_KERNEL,
531 ipoib_mcast_join_complete, mcast); 539 ipoib_mcast_join_complete, mcast);
540 spin_lock_irq(&priv->lock);
532 if (IS_ERR(multicast)) { 541 if (IS_ERR(multicast)) {
533 ret = PTR_ERR(multicast); 542 ret = PTR_ERR(multicast);
534 ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret); 543 ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
535 spin_lock_irq(&priv->lock);
536 /* Requeue this join task with a backoff delay */ 544 /* Requeue this join task with a backoff delay */
537 __ipoib_mcast_schedule_join_thread(priv, mcast, 1); 545 __ipoib_mcast_schedule_join_thread(priv, mcast, 1);
538 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 546 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
539 spin_unlock_irq(&priv->lock); 547 spin_unlock_irq(&priv->lock);
540 complete(&mcast->done); 548 complete(&mcast->done);
549 spin_lock_irq(&priv->lock);
541 } 550 }
551 return 0;
542} 552}
543 553
544void ipoib_mcast_join_task(struct work_struct *work) 554void ipoib_mcast_join_task(struct work_struct *work)
@@ -620,9 +630,10 @@ void ipoib_mcast_join_task(struct work_struct *work)
620 /* Found the next unjoined group */ 630 /* Found the next unjoined group */
621 init_completion(&mcast->done); 631 init_completion(&mcast->done);
622 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 632 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
623 spin_unlock_irq(&priv->lock); 633 if (ipoib_mcast_join(dev, mcast)) {
624 ipoib_mcast_join(dev, mcast); 634 spin_unlock_irq(&priv->lock);
625 spin_lock_irq(&priv->lock); 635 return;
636 }
626 } else if (!delay_until || 637 } else if (!delay_until ||
627 time_before(mcast->delay_until, delay_until)) 638 time_before(mcast->delay_until, delay_until))
628 delay_until = mcast->delay_until; 639 delay_until = mcast->delay_until;
@@ -641,10 +652,9 @@ out:
641 if (mcast) { 652 if (mcast) {
642 init_completion(&mcast->done); 653 init_completion(&mcast->done);
643 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 654 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
655 ipoib_mcast_join(dev, mcast);
644 } 656 }
645 spin_unlock_irq(&priv->lock); 657 spin_unlock_irq(&priv->lock);
646 if (mcast)
647 ipoib_mcast_join(dev, mcast);
648} 658}
649 659
650int ipoib_mcast_start_thread(struct net_device *dev) 660int ipoib_mcast_start_thread(struct net_device *dev)
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 6727954ab74b..e8a84d12b7ff 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -1207,7 +1207,6 @@ static void xpad_led_disconnect(struct usb_xpad *xpad)
1207#else 1207#else
1208static int xpad_led_probe(struct usb_xpad *xpad) { return 0; } 1208static int xpad_led_probe(struct usb_xpad *xpad) { return 0; }
1209static void xpad_led_disconnect(struct usb_xpad *xpad) { } 1209static void xpad_led_disconnect(struct usb_xpad *xpad) { }
1210static void xpad_identify_controller(struct usb_xpad *xpad) { }
1211#endif 1210#endif
1212 1211
1213static int xpad_start_input(struct usb_xpad *xpad) 1212static int xpad_start_input(struct usb_xpad *xpad)
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 4d446d5085aa..c01a1d648f9f 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -235,7 +235,7 @@ struct adp5589_kpad {
235 unsigned short gpimapsize; 235 unsigned short gpimapsize;
236 unsigned extend_cfg; 236 unsigned extend_cfg;
237 bool is_adp5585; 237 bool is_adp5585;
238 bool adp5585_support_row5; 238 bool support_row5;
239#ifdef CONFIG_GPIOLIB 239#ifdef CONFIG_GPIOLIB
240 unsigned char gpiomap[ADP5589_MAXGPIO]; 240 unsigned char gpiomap[ADP5589_MAXGPIO];
241 bool export_gpio; 241 bool export_gpio;
@@ -485,7 +485,7 @@ static int adp5589_build_gpiomap(struct adp5589_kpad *kpad,
485 if (kpad->extend_cfg & C4_EXTEND_CFG) 485 if (kpad->extend_cfg & C4_EXTEND_CFG)
486 pin_used[kpad->var->c4_extend_cfg] = true; 486 pin_used[kpad->var->c4_extend_cfg] = true;
487 487
488 if (!kpad->adp5585_support_row5) 488 if (!kpad->support_row5)
489 pin_used[5] = true; 489 pin_used[5] = true;
490 490
491 for (i = 0; i < kpad->var->maxgpio; i++) 491 for (i = 0; i < kpad->var->maxgpio; i++)
@@ -884,12 +884,13 @@ static int adp5589_probe(struct i2c_client *client,
884 884
885 switch (id->driver_data) { 885 switch (id->driver_data) {
886 case ADP5585_02: 886 case ADP5585_02:
887 kpad->adp5585_support_row5 = true; 887 kpad->support_row5 = true;
888 case ADP5585_01: 888 case ADP5585_01:
889 kpad->is_adp5585 = true; 889 kpad->is_adp5585 = true;
890 kpad->var = &const_adp5585; 890 kpad->var = &const_adp5585;
891 break; 891 break;
892 case ADP5589: 892 case ADP5589:
893 kpad->support_row5 = true;
893 kpad->var = &const_adp5589; 894 kpad->var = &const_adp5589;
894 break; 895 break;
895 } 896 }
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
index 378db10001df..4401be225d64 100644
--- a/drivers/input/keyboard/cap11xx.c
+++ b/drivers/input/keyboard/cap11xx.c
@@ -304,8 +304,10 @@ static int cap11xx_init_leds(struct device *dev,
304 led->cdev.brightness = LED_OFF; 304 led->cdev.brightness = LED_OFF;
305 305
306 error = of_property_read_u32(child, "reg", &reg); 306 error = of_property_read_u32(child, "reg", &reg);
307 if (error != 0 || reg >= num_leds) 307 if (error != 0 || reg >= num_leds) {
308 of_node_put(child);
308 return -EINVAL; 309 return -EINVAL;
310 }
309 311
310 led->reg = reg; 312 led->reg = reg;
311 led->priv = priv; 313 led->priv = priv;
@@ -313,8 +315,10 @@ static int cap11xx_init_leds(struct device *dev,
313 INIT_WORK(&led->work, cap11xx_led_work); 315 INIT_WORK(&led->work, cap11xx_led_work);
314 316
315 error = devm_led_classdev_register(dev, &led->cdev); 317 error = devm_led_classdev_register(dev, &led->cdev);
316 if (error) 318 if (error) {
319 of_node_put(child);
317 return error; 320 return error;
321 }
318 322
319 priv->num_leds++; 323 priv->num_leds++;
320 led++; 324 led++;
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index d6d16fa78281..1f2337abcf2f 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -733,7 +733,7 @@ config INPUT_XEN_KBDDEV_FRONTEND
733 module will be called xen-kbdfront. 733 module will be called xen-kbdfront.
734 734
735config INPUT_SIRFSOC_ONKEY 735config INPUT_SIRFSOC_ONKEY
736 bool "CSR SiRFSoC power on/off/suspend key support" 736 tristate "CSR SiRFSoC power on/off/suspend key support"
737 depends on ARCH_SIRF && OF 737 depends on ARCH_SIRF && OF
738 default y 738 default y
739 help 739 help
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index 9d5b89befe6f..ed7237f19539 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -101,7 +101,7 @@ static void sirfsoc_pwrc_close(struct input_dev *input)
101static const struct of_device_id sirfsoc_pwrc_of_match[] = { 101static const struct of_device_id sirfsoc_pwrc_of_match[] = {
102 { .compatible = "sirf,prima2-pwrc" }, 102 { .compatible = "sirf,prima2-pwrc" },
103 {}, 103 {},
104} 104};
105MODULE_DEVICE_TABLE(of, sirfsoc_pwrc_of_match); 105MODULE_DEVICE_TABLE(of, sirfsoc_pwrc_of_match);
106 106
107static int sirfsoc_pwrc_probe(struct platform_device *pdev) 107static int sirfsoc_pwrc_probe(struct platform_device *pdev)
diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
index e272f06258ce..a3f0f5a47490 100644
--- a/drivers/input/mouse/vmmouse.c
+++ b/drivers/input/mouse/vmmouse.c
@@ -458,8 +458,6 @@ int vmmouse_init(struct psmouse *psmouse)
458 priv->abs_dev = abs_dev; 458 priv->abs_dev = abs_dev;
459 psmouse->private = priv; 459 psmouse->private = priv;
460 460
461 input_set_capability(rel_dev, EV_REL, REL_WHEEL);
462
463 /* Set up and register absolute device */ 461 /* Set up and register absolute device */
464 snprintf(priv->phys, sizeof(priv->phys), "%s/input1", 462 snprintf(priv->phys, sizeof(priv->phys), "%s/input1",
465 psmouse->ps2dev.serio->phys); 463 psmouse->ps2dev.serio->phys);
@@ -475,10 +473,6 @@ int vmmouse_init(struct psmouse *psmouse)
475 abs_dev->id.version = psmouse->model; 473 abs_dev->id.version = psmouse->model;
476 abs_dev->dev.parent = &psmouse->ps2dev.serio->dev; 474 abs_dev->dev.parent = &psmouse->ps2dev.serio->dev;
477 475
478 error = input_register_device(priv->abs_dev);
479 if (error)
480 goto init_fail;
481
482 /* Set absolute device capabilities */ 476 /* Set absolute device capabilities */
483 input_set_capability(abs_dev, EV_KEY, BTN_LEFT); 477 input_set_capability(abs_dev, EV_KEY, BTN_LEFT);
484 input_set_capability(abs_dev, EV_KEY, BTN_RIGHT); 478 input_set_capability(abs_dev, EV_KEY, BTN_RIGHT);
@@ -488,6 +482,13 @@ int vmmouse_init(struct psmouse *psmouse)
488 input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0); 482 input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0);
489 input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0); 483 input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0);
490 484
485 error = input_register_device(priv->abs_dev);
486 if (error)
487 goto init_fail;
488
489 /* Add wheel capability to the relative device */
490 input_set_capability(rel_dev, EV_REL, REL_WHEEL);
491
491 psmouse->protocol_handler = vmmouse_process_byte; 492 psmouse->protocol_handler = vmmouse_process_byte;
492 psmouse->disconnect = vmmouse_disconnect; 493 psmouse->disconnect = vmmouse_disconnect;
493 psmouse->reconnect = vmmouse_reconnect; 494 psmouse->reconnect = vmmouse_reconnect;
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 8f828975ab10..1ca7f551e2da 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -134,7 +134,7 @@ static void serio_find_driver(struct serio *serio)
134 int error; 134 int error;
135 135
136 error = device_attach(&serio->dev); 136 error = device_attach(&serio->dev);
137 if (error < 0) 137 if (error < 0 && error != -EPROBE_DEFER)
138 dev_warn(&serio->dev, 138 dev_warn(&serio->dev,
139 "device_attach() failed for %s (%s), error: %d\n", 139 "device_attach() failed for %s (%s), error: %d\n",
140 serio->phys, serio->name, error); 140 serio->phys, serio->name, error);
diff --git a/drivers/input/touchscreen/colibri-vf50-ts.c b/drivers/input/touchscreen/colibri-vf50-ts.c
index 5d4903a402cc..69828d015d45 100644
--- a/drivers/input/touchscreen/colibri-vf50-ts.c
+++ b/drivers/input/touchscreen/colibri-vf50-ts.c
@@ -21,6 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/of.h>
24#include <linux/pinctrl/consumer.h> 25#include <linux/pinctrl/consumer.h>
25#include <linux/platform_device.h> 26#include <linux/platform_device.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 0b0f8c17f3f7..23fbe382da8b 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -822,16 +822,22 @@ static void edt_ft5x06_ts_get_defaults(struct device *dev,
822 int error; 822 int error;
823 823
824 error = device_property_read_u32(dev, "threshold", &val); 824 error = device_property_read_u32(dev, "threshold", &val);
825 if (!error) 825 if (!error) {
826 reg_addr->reg_threshold = val; 826 edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold, val);
827 tsdata->threshold = val;
828 }
827 829
828 error = device_property_read_u32(dev, "gain", &val); 830 error = device_property_read_u32(dev, "gain", &val);
829 if (!error) 831 if (!error) {
830 reg_addr->reg_gain = val; 832 edt_ft5x06_register_write(tsdata, reg_addr->reg_gain, val);
833 tsdata->gain = val;
834 }
831 835
832 error = device_property_read_u32(dev, "offset", &val); 836 error = device_property_read_u32(dev, "offset", &val);
833 if (!error) 837 if (!error) {
834 reg_addr->reg_offset = val; 838 edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, val);
839 tsdata->offset = val;
840 }
835} 841}
836 842
837static void 843static void
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 539b0dea8034..374c129219ef 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -114,6 +114,7 @@ struct kmem_cache *amd_iommu_irq_cache;
114 114
115static void update_domain(struct protection_domain *domain); 115static void update_domain(struct protection_domain *domain);
116static int protection_domain_init(struct protection_domain *domain); 116static int protection_domain_init(struct protection_domain *domain);
117static void detach_device(struct device *dev);
117 118
118/* 119/*
119 * For dynamic growth the aperture size is split into ranges of 128MB of 120 * For dynamic growth the aperture size is split into ranges of 128MB of
@@ -384,6 +385,9 @@ static void iommu_uninit_device(struct device *dev)
384 if (!dev_data) 385 if (!dev_data)
385 return; 386 return;
386 387
388 if (dev_data->domain)
389 detach_device(dev);
390
387 iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, 391 iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
388 dev); 392 dev);
389 393
@@ -2049,7 +2053,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
2049 /* Update device table */ 2053 /* Update device table */
2050 set_dte_entry(dev_data->devid, domain, ats); 2054 set_dte_entry(dev_data->devid, domain, ats);
2051 if (alias != dev_data->devid) 2055 if (alias != dev_data->devid)
2052 set_dte_entry(dev_data->devid, domain, ats); 2056 set_dte_entry(alias, domain, ats);
2053 2057
2054 device_flush_dte(dev_data); 2058 device_flush_dte(dev_data);
2055} 2059}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 013bdfff2d4d..bf4959f4225b 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -228,6 +228,10 @@ static int amd_iommu_enable_interrupts(void);
228static int __init iommu_go_to_state(enum iommu_init_state state); 228static int __init iommu_go_to_state(enum iommu_init_state state);
229static void init_device_table_dma(void); 229static void init_device_table_dma(void);
230 230
231static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
232 u8 bank, u8 cntr, u8 fxn,
233 u64 *value, bool is_write);
234
231static inline void update_last_devid(u16 devid) 235static inline void update_last_devid(u16 devid)
232{ 236{
233 if (devid > amd_iommu_last_bdf) 237 if (devid > amd_iommu_last_bdf)
@@ -1016,6 +1020,34 @@ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1016} 1020}
1017 1021
1018/* 1022/*
1023 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1024 * Workaround:
1025 * BIOS should enable ATS write permission check by setting
1026 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1027 */
1028static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1029{
1030 u32 value;
1031
1032 if ((boot_cpu_data.x86 != 0x15) ||
1033 (boot_cpu_data.x86_model < 0x30) ||
1034 (boot_cpu_data.x86_model > 0x3f))
1035 return;
1036
1037 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1038 value = iommu_read_l2(iommu, 0x47);
1039
1040 if (value & BIT(0))
1041 return;
1042
1043 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1044 iommu_write_l2(iommu, 0x47, value | BIT(0));
1045
1046 pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
1047 dev_name(&iommu->dev->dev));
1048}
1049
1050/*
1019 * This function clues the initialization function for one IOMMU 1051 * This function clues the initialization function for one IOMMU
1020 * together and also allocates the command buffer and programs the 1052 * together and also allocates the command buffer and programs the
1021 * hardware. It does NOT enable the IOMMU. This is done afterwards. 1053 * hardware. It does NOT enable the IOMMU. This is done afterwards.
@@ -1142,8 +1174,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1142 amd_iommu_pc_present = true; 1174 amd_iommu_pc_present = true;
1143 1175
1144 /* Check if the performance counters can be written to */ 1176 /* Check if the performance counters can be written to */
1145 if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) || 1177 if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
1146 (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) || 1178 (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
1147 (val != val2)) { 1179 (val != val2)) {
1148 pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n"); 1180 pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
1149 amd_iommu_pc_present = false; 1181 amd_iommu_pc_present = false;
@@ -1284,6 +1316,7 @@ static int iommu_init_pci(struct amd_iommu *iommu)
1284 } 1316 }
1285 1317
1286 amd_iommu_erratum_746_workaround(iommu); 1318 amd_iommu_erratum_746_workaround(iommu);
1319 amd_iommu_ats_write_check_workaround(iommu);
1287 1320
1288 iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu, 1321 iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
1289 amd_iommu_groups, "ivhd%d", 1322 amd_iommu_groups, "ivhd%d",
@@ -2283,22 +2316,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid)
2283} 2316}
2284EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); 2317EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
2285 2318
2286int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, 2319static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
2320 u8 bank, u8 cntr, u8 fxn,
2287 u64 *value, bool is_write) 2321 u64 *value, bool is_write)
2288{ 2322{
2289 struct amd_iommu *iommu;
2290 u32 offset; 2323 u32 offset;
2291 u32 max_offset_lim; 2324 u32 max_offset_lim;
2292 2325
2293 /* Make sure the IOMMU PC resource is available */
2294 if (!amd_iommu_pc_present)
2295 return -ENODEV;
2296
2297 /* Locate the iommu associated with the device ID */
2298 iommu = amd_iommu_rlookup_table[devid];
2299
2300 /* Check for valid iommu and pc register indexing */ 2326 /* Check for valid iommu and pc register indexing */
2301 if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7))) 2327 if (WARN_ON((fxn > 0x28) || (fxn & 7)))
2302 return -ENODEV; 2328 return -ENODEV;
2303 2329
2304 offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn); 2330 offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
@@ -2322,3 +2348,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
2322 return 0; 2348 return 0;
2323} 2349}
2324EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val); 2350EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
2351
2352int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
2353 u64 *value, bool is_write)
2354{
2355 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
2356
2357 /* Make sure the IOMMU PC resource is available */
2358 if (!amd_iommu_pc_present || iommu == NULL)
2359 return -ENODEV;
2360
2361 return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
2362 value, is_write);
2363}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 62a400c5ba06..8ffd7568fc91 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -329,7 +329,8 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
329 /* Only care about add/remove events for physical functions */ 329 /* Only care about add/remove events for physical functions */
330 if (pdev->is_virtfn) 330 if (pdev->is_virtfn)
331 return NOTIFY_DONE; 331 return NOTIFY_DONE;
332 if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE) 332 if (action != BUS_NOTIFY_ADD_DEVICE &&
333 action != BUS_NOTIFY_REMOVED_DEVICE)
333 return NOTIFY_DONE; 334 return NOTIFY_DONE;
334 335
335 info = dmar_alloc_pci_notify_info(pdev, action); 336 info = dmar_alloc_pci_notify_info(pdev, action);
@@ -339,7 +340,7 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
339 down_write(&dmar_global_lock); 340 down_write(&dmar_global_lock);
340 if (action == BUS_NOTIFY_ADD_DEVICE) 341 if (action == BUS_NOTIFY_ADD_DEVICE)
341 dmar_pci_bus_add_dev(info); 342 dmar_pci_bus_add_dev(info);
342 else if (action == BUS_NOTIFY_DEL_DEVICE) 343 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
343 dmar_pci_bus_del_dev(info); 344 dmar_pci_bus_del_dev(info);
344 up_write(&dmar_global_lock); 345 up_write(&dmar_global_lock);
345 346
@@ -1353,7 +1354,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
1353 1354
1354 raw_spin_lock_irqsave(&iommu->register_lock, flags); 1355 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1355 1356
1356 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 1357 sts = readl(iommu->reg + DMAR_GSTS_REG);
1357 if (!(sts & DMA_GSTS_QIES)) 1358 if (!(sts & DMA_GSTS_QIES))
1358 goto end; 1359 goto end;
1359 1360
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index ac7387686ddc..a2e1b7f14df2 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1489,7 +1489,7 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1489{ 1489{
1490 struct pci_dev *pdev; 1490 struct pci_dev *pdev;
1491 1491
1492 if (dev_is_pci(info->dev)) 1492 if (!dev_is_pci(info->dev))
1493 return; 1493 return;
1494 1494
1495 pdev = to_pci_dev(info->dev); 1495 pdev = to_pci_dev(info->dev);
@@ -4367,7 +4367,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4367 rmrru->devices_cnt); 4367 rmrru->devices_cnt);
4368 if(ret < 0) 4368 if(ret < 0)
4369 return ret; 4369 return ret;
4370 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) { 4370 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4371 dmar_remove_dev_scope(info, rmrr->segment, 4371 dmar_remove_dev_scope(info, rmrr->segment,
4372 rmrru->devices, rmrru->devices_cnt); 4372 rmrru->devices, rmrru->devices_cnt);
4373 } 4373 }
@@ -4387,7 +4387,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4387 break; 4387 break;
4388 else if(ret < 0) 4388 else if(ret < 0)
4389 return ret; 4389 return ret;
4390 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) { 4390 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4391 if (dmar_remove_dev_scope(info, atsr->segment, 4391 if (dmar_remove_dev_scope(info, atsr->segment,
4392 atsru->devices, atsru->devices_cnt)) 4392 atsru->devices, atsru->devices_cnt))
4393 break; 4393 break;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 50464833d0b8..d9939fa9b588 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -249,12 +249,30 @@ static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *s
249static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) 249static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
250{ 250{
251 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); 251 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
252 struct intel_svm_dev *sdev;
252 253
254 /* This might end up being called from exit_mmap(), *before* the page
255 * tables are cleared. And __mmu_notifier_release() will delete us from
256 * the list of notifiers so that our invalidate_range() callback doesn't
257 * get called when the page tables are cleared. So we need to protect
258 * against hardware accessing those page tables.
259 *
260 * We do it by clearing the entry in the PASID table and then flushing
261 * the IOTLB and the PASID table caches. This might upset hardware;
262 * perhaps we'll want to point the PASID to a dummy PGD (like the zero
263 * page) so that we end up taking a fault that the hardware really
264 * *has* to handle gracefully without affecting other processes.
265 */
253 svm->iommu->pasid_table[svm->pasid].val = 0; 266 svm->iommu->pasid_table[svm->pasid].val = 0;
267 wmb();
268
269 rcu_read_lock();
270 list_for_each_entry_rcu(sdev, &svm->devs, list) {
271 intel_flush_pasid_dev(svm, sdev, svm->pasid);
272 intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
273 }
274 rcu_read_unlock();
254 275
255 /* There's no need to do any flush because we can't get here if there
256 * are any devices left anyway. */
257 WARN_ON(!list_empty(&svm->devs));
258} 276}
259 277
260static const struct mmu_notifier_ops intel_mmuops = { 278static const struct mmu_notifier_ops intel_mmuops = {
@@ -379,7 +397,6 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
379 goto out; 397 goto out;
380 } 398 }
381 iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1; 399 iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
382 mm = NULL;
383 } else 400 } else
384 iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11); 401 iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
385 wmb(); 402 wmb();
@@ -442,11 +459,11 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
442 kfree_rcu(sdev, rcu); 459 kfree_rcu(sdev, rcu);
443 460
444 if (list_empty(&svm->devs)) { 461 if (list_empty(&svm->devs)) {
445 mmu_notifier_unregister(&svm->notifier, svm->mm);
446 462
447 idr_remove(&svm->iommu->pasid_idr, svm->pasid); 463 idr_remove(&svm->iommu->pasid_idr, svm->pasid);
448 if (svm->mm) 464 if (svm->mm)
449 mmput(svm->mm); 465 mmu_notifier_unregister(&svm->notifier, svm->mm);
466
450 /* We mandate that no page faults may be outstanding 467 /* We mandate that no page faults may be outstanding
451 * for the PASID when intel_svm_unbind_mm() is called. 468 * for the PASID when intel_svm_unbind_mm() is called.
452 * If that is not obeyed, subtle errors will happen. 469 * If that is not obeyed, subtle errors will happen.
@@ -507,6 +524,10 @@ static irqreturn_t prq_event_thread(int irq, void *d)
507 struct intel_svm *svm = NULL; 524 struct intel_svm *svm = NULL;
508 int head, tail, handled = 0; 525 int head, tail, handled = 0;
509 526
527 /* Clear PPR bit before reading head/tail registers, to
528 * ensure that we get a new interrupt if needed. */
529 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
530
510 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; 531 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
511 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; 532 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
512 while (head != tail) { 533 while (head != tail) {
@@ -551,6 +572,9 @@ static irqreturn_t prq_event_thread(int irq, void *d)
551 * any faults on kernel addresses. */ 572 * any faults on kernel addresses. */
552 if (!svm->mm) 573 if (!svm->mm)
553 goto bad_req; 574 goto bad_req;
575 /* If the mm is already defunct, don't handle faults. */
576 if (!atomic_inc_not_zero(&svm->mm->mm_users))
577 goto bad_req;
554 down_read(&svm->mm->mmap_sem); 578 down_read(&svm->mm->mmap_sem);
555 vma = find_extend_vma(svm->mm, address); 579 vma = find_extend_vma(svm->mm, address);
556 if (!vma || address < vma->vm_start) 580 if (!vma || address < vma->vm_start)
@@ -567,6 +591,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
567 result = QI_RESP_SUCCESS; 591 result = QI_RESP_SUCCESS;
568 invalid: 592 invalid:
569 up_read(&svm->mm->mmap_sem); 593 up_read(&svm->mm->mmap_sem);
594 mmput(svm->mm);
570 bad_req: 595 bad_req:
571 /* Accounting for major/minor faults? */ 596 /* Accounting for major/minor faults? */
572 rcu_read_lock(); 597 rcu_read_lock();
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index c12ba4516df2..ac596928f6b4 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -629,7 +629,7 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
629 629
630 raw_spin_lock_irqsave(&iommu->register_lock, flags); 630 raw_spin_lock_irqsave(&iommu->register_lock, flags);
631 631
632 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 632 sts = readl(iommu->reg + DMAR_GSTS_REG);
633 if (!(sts & DMA_GSTS_IRES)) 633 if (!(sts & DMA_GSTS_IRES))
634 goto end; 634 goto end;
635 635
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 8bbcbfe7695c..381ca5a37a7b 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -25,6 +25,7 @@
25#include <linux/sizes.h> 25#include <linux/sizes.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/dma-mapping.h>
28 29
29#include <asm/barrier.h> 30#include <asm/barrier.h>
30 31
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 715923d5236c..fb50911b3940 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -159,6 +159,7 @@ config TB10X_IRQC
159config TS4800_IRQ 159config TS4800_IRQ
160 tristate "TS-4800 IRQ controller" 160 tristate "TS-4800 IRQ controller"
161 select IRQ_DOMAIN 161 select IRQ_DOMAIN
162 depends on HAS_IOMEM
162 help 163 help
163 Support for the TS-4800 FPGA IRQ controller 164 Support for the TS-4800 FPGA IRQ controller
164 165
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index b12a5d58546f..37199b9b2cfa 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -86,7 +86,7 @@ int aic_common_set_priority(int priority, unsigned *val)
86 priority > AT91_AIC_IRQ_MAX_PRIORITY) 86 priority > AT91_AIC_IRQ_MAX_PRIORITY)
87 return -EINVAL; 87 return -EINVAL;
88 88
89 *val &= AT91_AIC_PRIOR; 89 *val &= ~AT91_AIC_PRIOR;
90 *val |= priority; 90 *val |= priority;
91 91
92 return 0; 92 return 0;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index e23d1d18f9d6..43dfd15c1dd2 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -66,7 +66,10 @@ struct its_node {
66 unsigned long phys_base; 66 unsigned long phys_base;
67 struct its_cmd_block *cmd_base; 67 struct its_cmd_block *cmd_base;
68 struct its_cmd_block *cmd_write; 68 struct its_cmd_block *cmd_write;
69 void *tables[GITS_BASER_NR_REGS]; 69 struct {
70 void *base;
71 u32 order;
72 } tables[GITS_BASER_NR_REGS];
70 struct its_collection *collections; 73 struct its_collection *collections;
71 struct list_head its_device_list; 74 struct list_head its_device_list;
72 u64 flags; 75 u64 flags;
@@ -75,6 +78,9 @@ struct its_node {
75 78
76#define ITS_ITT_ALIGN SZ_256 79#define ITS_ITT_ALIGN SZ_256
77 80
81/* Convert page order to size in bytes */
82#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
83
78struct event_lpi_map { 84struct event_lpi_map {
79 unsigned long *lpi_map; 85 unsigned long *lpi_map;
80 u16 *col_map; 86 u16 *col_map;
@@ -597,11 +603,6 @@ static void its_unmask_irq(struct irq_data *d)
597 lpi_set_config(d, true); 603 lpi_set_config(d, true);
598} 604}
599 605
600static void its_eoi_irq(struct irq_data *d)
601{
602 gic_write_eoir(d->hwirq);
603}
604
605static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 606static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
606 bool force) 607 bool force)
607{ 608{
@@ -638,7 +639,7 @@ static struct irq_chip its_irq_chip = {
638 .name = "ITS", 639 .name = "ITS",
639 .irq_mask = its_mask_irq, 640 .irq_mask = its_mask_irq,
640 .irq_unmask = its_unmask_irq, 641 .irq_unmask = its_unmask_irq,
641 .irq_eoi = its_eoi_irq, 642 .irq_eoi = irq_chip_eoi_parent,
642 .irq_set_affinity = its_set_affinity, 643 .irq_set_affinity = its_set_affinity,
643 .irq_compose_msi_msg = its_irq_compose_msi_msg, 644 .irq_compose_msi_msg = its_irq_compose_msi_msg,
644}; 645};
@@ -807,9 +808,10 @@ static void its_free_tables(struct its_node *its)
807 int i; 808 int i;
808 809
809 for (i = 0; i < GITS_BASER_NR_REGS; i++) { 810 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
810 if (its->tables[i]) { 811 if (its->tables[i].base) {
811 free_page((unsigned long)its->tables[i]); 812 free_pages((unsigned long)its->tables[i].base,
812 its->tables[i] = NULL; 813 its->tables[i].order);
814 its->tables[i].base = NULL;
813 } 815 }
814 } 816 }
815} 817}
@@ -842,7 +844,6 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
842 u64 type = GITS_BASER_TYPE(val); 844 u64 type = GITS_BASER_TYPE(val);
843 u64 entry_size = GITS_BASER_ENTRY_SIZE(val); 845 u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
844 int order = get_order(psz); 846 int order = get_order(psz);
845 int alloc_size;
846 int alloc_pages; 847 int alloc_pages;
847 u64 tmp; 848 u64 tmp;
848 void *base; 849 void *base;
@@ -874,8 +875,8 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
874 } 875 }
875 } 876 }
876 877
877 alloc_size = (1 << order) * PAGE_SIZE; 878retry_alloc_baser:
878 alloc_pages = (alloc_size / psz); 879 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
879 if (alloc_pages > GITS_BASER_PAGES_MAX) { 880 if (alloc_pages > GITS_BASER_PAGES_MAX) {
880 alloc_pages = GITS_BASER_PAGES_MAX; 881 alloc_pages = GITS_BASER_PAGES_MAX;
881 order = get_order(GITS_BASER_PAGES_MAX * psz); 882 order = get_order(GITS_BASER_PAGES_MAX * psz);
@@ -889,7 +890,8 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
889 goto out_free; 890 goto out_free;
890 } 891 }
891 892
892 its->tables[i] = base; 893 its->tables[i].base = base;
894 its->tables[i].order = order;
893 895
894retry_baser: 896retry_baser:
895 val = (virt_to_phys(base) | 897 val = (virt_to_phys(base) |
@@ -927,7 +929,7 @@ retry_baser:
927 shr = tmp & GITS_BASER_SHAREABILITY_MASK; 929 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
928 if (!shr) { 930 if (!shr) {
929 cache = GITS_BASER_nC; 931 cache = GITS_BASER_nC;
930 __flush_dcache_area(base, alloc_size); 932 __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
931 } 933 }
932 goto retry_baser; 934 goto retry_baser;
933 } 935 }
@@ -938,13 +940,16 @@ retry_baser:
938 * size and retry. If we reach 4K, then 940 * size and retry. If we reach 4K, then
939 * something is horribly wrong... 941 * something is horribly wrong...
940 */ 942 */
943 free_pages((unsigned long)base, order);
944 its->tables[i].base = NULL;
945
941 switch (psz) { 946 switch (psz) {
942 case SZ_16K: 947 case SZ_16K:
943 psz = SZ_4K; 948 psz = SZ_4K;
944 goto retry_baser; 949 goto retry_alloc_baser;
945 case SZ_64K: 950 case SZ_64K:
946 psz = SZ_16K; 951 psz = SZ_16K;
947 goto retry_baser; 952 goto retry_alloc_baser;
948 } 953 }
949 } 954 }
950 955
@@ -957,7 +962,7 @@ retry_baser:
957 } 962 }
958 963
959 pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", 964 pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
960 (int)(alloc_size / entry_size), 965 (int)(PAGE_ORDER_TO_SIZE(order) / entry_size),
961 its_base_type_string[type], 966 its_base_type_string[type],
962 (unsigned long)virt_to_phys(base), 967 (unsigned long)virt_to_phys(base),
963 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); 968 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 911758c056c1..8f9ebf714e2b 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -384,9 +384,6 @@ static struct irq_chip gic_chip = {
384 .irq_unmask = gic_unmask_irq, 384 .irq_unmask = gic_unmask_irq,
385 .irq_eoi = gic_eoi_irq, 385 .irq_eoi = gic_eoi_irq,
386 .irq_set_type = gic_set_type, 386 .irq_set_type = gic_set_type,
387#ifdef CONFIG_SMP
388 .irq_set_affinity = gic_set_affinity,
389#endif
390 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 387 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
391 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 388 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
392 .flags = IRQCHIP_SET_TYPE_MASKED | 389 .flags = IRQCHIP_SET_TYPE_MASKED |
@@ -400,9 +397,6 @@ static struct irq_chip gic_eoimode1_chip = {
400 .irq_unmask = gic_unmask_irq, 397 .irq_unmask = gic_unmask_irq,
401 .irq_eoi = gic_eoimode1_eoi_irq, 398 .irq_eoi = gic_eoimode1_eoi_irq,
402 .irq_set_type = gic_set_type, 399 .irq_set_type = gic_set_type,
403#ifdef CONFIG_SMP
404 .irq_set_affinity = gic_set_affinity,
405#endif
406 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 400 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
407 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 401 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
408 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, 402 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
@@ -443,7 +437,7 @@ static void gic_cpu_if_up(struct gic_chip_data *gic)
443 u32 bypass = 0; 437 u32 bypass = 0;
444 u32 mode = 0; 438 u32 mode = 0;
445 439
446 if (static_key_true(&supports_deactivate)) 440 if (gic == &gic_data[0] && static_key_true(&supports_deactivate))
447 mode = GIC_CPU_CTRL_EOImodeNS; 441 mode = GIC_CPU_CTRL_EOImodeNS;
448 442
449 /* 443 /*
@@ -1039,6 +1033,11 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
1039 gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr); 1033 gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr);
1040 } 1034 }
1041 1035
1036#ifdef CONFIG_SMP
1037 if (gic_nr == 0)
1038 gic->chip.irq_set_affinity = gic_set_affinity;
1039#endif
1040
1042#ifdef CONFIG_GIC_NON_BANKED 1041#ifdef CONFIG_GIC_NON_BANKED
1043 if (percpu_offset) { /* Frankein-GIC without banked registers... */ 1042 if (percpu_offset) { /* Frankein-GIC without banked registers... */
1044 unsigned int cpu; 1043 unsigned int cpu;
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index c22e2d40cb30..efe50845939d 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -241,6 +241,7 @@ static int __init asm9260_of_init(struct device_node *np,
241 writel(0, icoll_priv.intr + i); 241 writel(0, icoll_priv.intr + i);
242 242
243 icoll_add_domain(np, ASM9260_NUM_IRQS); 243 icoll_add_domain(np, ASM9260_NUM_IRQS);
244 set_handle_irq(icoll_handle_irq);
244 245
245 return 0; 246 return 0;
246} 247}
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c
index c71914e8f596..5dc5a760c723 100644
--- a/drivers/irqchip/irq-s3c24xx.c
+++ b/drivers/irqchip/irq-s3c24xx.c
@@ -605,7 +605,7 @@ err:
605 return ERR_PTR(ret); 605 return ERR_PTR(ret);
606} 606}
607 607
608static struct s3c_irq_data init_eint[32] = { 608static struct s3c_irq_data __maybe_unused init_eint[32] = {
609 { .type = S3C_IRQTYPE_NONE, }, /* reserved */ 609 { .type = S3C_IRQTYPE_NONE, }, /* reserved */
610 { .type = S3C_IRQTYPE_NONE, }, /* reserved */ 610 { .type = S3C_IRQTYPE_NONE, }, /* reserved */
611 { .type = S3C_IRQTYPE_NONE, }, /* reserved */ 611 { .type = S3C_IRQTYPE_NONE, }, /* reserved */
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index 0704362f4c82..376b28074e0d 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -22,7 +22,6 @@
22#include <linux/of_irq.h> 22#include <linux/of_irq.h>
23 23
24#include <asm/exception.h> 24#include <asm/exception.h>
25#include <asm/mach/irq.h>
26 25
27#define SUN4I_IRQ_VECTOR_REG 0x00 26#define SUN4I_IRQ_VECTOR_REG 0x00
28#define SUN4I_IRQ_PROTECTION_REG 0x08 27#define SUN4I_IRQ_PROTECTION_REG 0x08
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 2a506fe0c8a4..d1f8ab915b15 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -373,13 +373,7 @@ static void gigaset_freecshw(struct cardstate *cs)
373 373
374static void gigaset_device_release(struct device *dev) 374static void gigaset_device_release(struct device *dev)
375{ 375{
376 struct cardstate *cs = dev_get_drvdata(dev); 376 kfree(container_of(dev, struct ser_cardstate, dev.dev));
377
378 if (!cs)
379 return;
380 dev_set_drvdata(dev, NULL);
381 kfree(cs->hw.ser);
382 cs->hw.ser = NULL;
383} 377}
384 378
385/* 379/*
@@ -408,7 +402,6 @@ static int gigaset_initcshw(struct cardstate *cs)
408 cs->hw.ser = NULL; 402 cs->hw.ser = NULL;
409 return rc; 403 return rc;
410 } 404 }
411 dev_set_drvdata(&cs->hw.ser->dev.dev, cs);
412 405
413 tasklet_init(&cs->write_tasklet, 406 tasklet_init(&cs->write_tasklet,
414 gigaset_modem_fill, (unsigned long) cs); 407 gigaset_modem_fill, (unsigned long) cs);
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 8e2944784e00..afde4edef9ae 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -392,7 +392,7 @@ read_dma(struct tiger_ch *bc, u32 idx, int cnt)
392 } 392 }
393 stat = bchannel_get_rxbuf(&bc->bch, cnt); 393 stat = bchannel_get_rxbuf(&bc->bch, cnt);
394 /* only transparent use the count here, HDLC overun is detected later */ 394 /* only transparent use the count here, HDLC overun is detected later */
395 if (stat == ENOMEM) { 395 if (stat == -ENOMEM) {
396 pr_warning("%s.B%d: No memory for %d bytes\n", 396 pr_warning("%s.B%d: No memory for %d bytes\n",
397 card->name, bc->bch.nr, cnt); 397 card->name, bc->bch.nr, cnt);
398 return; 398 return;
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 33224cb91c5b..9f6acd5d1d2e 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -572,11 +572,13 @@ int nvm_register(struct request_queue *q, char *disk_name,
572 } 572 }
573 } 573 }
574 574
575 ret = nvm_get_sysblock(dev, &dev->sb); 575 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
576 if (!ret) 576 ret = nvm_get_sysblock(dev, &dev->sb);
577 pr_err("nvm: device not initialized.\n"); 577 if (!ret)
578 else if (ret < 0) 578 pr_err("nvm: device not initialized.\n");
579 pr_err("nvm: err (%d) on device initialization\n", ret); 579 else if (ret < 0)
580 pr_err("nvm: err (%d) on device initialization\n", ret);
581 }
580 582
581 /* register device with a supported media manager */ 583 /* register device with a supported media manager */
582 down_write(&nvm_lock); 584 down_write(&nvm_lock);
@@ -1055,9 +1057,11 @@ static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
1055 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN); 1057 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
1056 info.fs_ppa.ppa = -1; 1058 info.fs_ppa.ppa = -1;
1057 1059
1058 ret = nvm_init_sysblock(dev, &info); 1060 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
1059 if (ret) 1061 ret = nvm_init_sysblock(dev, &info);
1060 return ret; 1062 if (ret)
1063 return ret;
1064 }
1061 1065
1062 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info)); 1066 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
1063 1067
@@ -1117,7 +1121,10 @@ static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1117 dev->mt = NULL; 1121 dev->mt = NULL;
1118 } 1122 }
1119 1123
1120 return nvm_dev_factory(dev, fact.flags); 1124 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
1125 return nvm_dev_factory(dev, fact.flags);
1126
1127 return 0;
1121} 1128}
1122 1129
1123static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg) 1130static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index d8c75958ced3..307db1ea22de 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -300,8 +300,10 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
300 } 300 }
301 301
302 page = mempool_alloc(rrpc->page_pool, GFP_NOIO); 302 page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
303 if (!page) 303 if (!page) {
304 bio_put(bio);
304 return -ENOMEM; 305 return -ENOMEM;
306 }
305 307
306 while ((slot = find_first_zero_bit(rblk->invalid_pages, 308 while ((slot = find_first_zero_bit(rblk->invalid_pages,
307 nr_pgs_per_blk)) < nr_pgs_per_blk) { 309 nr_pgs_per_blk)) < nr_pgs_per_blk) {
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index ef13ac7700c8..f7b37336353f 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -174,8 +174,7 @@ static inline sector_t rrpc_get_sector(sector_t laddr)
174static inline int request_intersects(struct rrpc_inflight_rq *r, 174static inline int request_intersects(struct rrpc_inflight_rq *r,
175 sector_t laddr_start, sector_t laddr_end) 175 sector_t laddr_start, sector_t laddr_end)
176{ 176{
177 return (laddr_end >= r->l_start && laddr_end <= r->l_end) && 177 return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
178 (laddr_start >= r->l_start && laddr_start <= r->l_end);
179} 178}
180 179
181static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr, 180static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
@@ -184,6 +183,8 @@ static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
184 sector_t laddr_end = laddr + pages - 1; 183 sector_t laddr_end = laddr + pages - 1;
185 struct rrpc_inflight_rq *rtmp; 184 struct rrpc_inflight_rq *rtmp;
186 185
186 WARN_ON(irqs_disabled());
187
187 spin_lock_irq(&rrpc->inflights.lock); 188 spin_lock_irq(&rrpc->inflights.lock);
188 list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) { 189 list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
189 if (unlikely(request_intersects(rtmp, laddr, laddr_end))) { 190 if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 546d05f4358a..b2bbe8659bed 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -81,6 +81,7 @@ config STI_MBOX
81config MAILBOX_TEST 81config MAILBOX_TEST
82 tristate "Mailbox Test Client" 82 tristate "Mailbox Test Client"
83 depends on OF 83 depends on OF
84 depends on HAS_IOMEM
84 help 85 help
85 Test client to help with testing new Controller driver 86 Test client to help with testing new Controller driver
86 implementations. 87 implementations.
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 45d85aea9955..8f779a1ec99c 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -81,16 +81,10 @@ static struct mbox_controller pcc_mbox_ctrl = {};
81 */ 81 */
82static struct mbox_chan *get_pcc_channel(int id) 82static struct mbox_chan *get_pcc_channel(int id)
83{ 83{
84 struct mbox_chan *pcc_chan;
85
86 if (id < 0 || id > pcc_mbox_ctrl.num_chans) 84 if (id < 0 || id > pcc_mbox_ctrl.num_chans)
87 return ERR_PTR(-ENOENT); 85 return ERR_PTR(-ENOENT);
88 86
89 pcc_chan = (struct mbox_chan *) 87 return &pcc_mbox_channels[id];
90 (unsigned long) pcc_mbox_channels +
91 (id * sizeof(*pcc_chan));
92
93 return pcc_chan;
94} 88}
95 89
96/** 90/**
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 4f22e919787a..d80cce499a56 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -210,10 +210,6 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
210 struct block_device *bdev; 210 struct block_device *bdev;
211 struct mddev *mddev = bitmap->mddev; 211 struct mddev *mddev = bitmap->mddev;
212 struct bitmap_storage *store = &bitmap->storage; 212 struct bitmap_storage *store = &bitmap->storage;
213 int node_offset = 0;
214
215 if (mddev_is_clustered(bitmap->mddev))
216 node_offset = bitmap->cluster_slot * store->file_pages;
217 213
218 while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { 214 while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
219 int size = PAGE_SIZE; 215 int size = PAGE_SIZE;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 5df40480228b..dd834927bc66 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1191,6 +1191,8 @@ static void dm_unprep_request(struct request *rq)
1191 1191
1192 if (clone) 1192 if (clone)
1193 free_rq_clone(clone); 1193 free_rq_clone(clone);
1194 else if (!tio->md->queue->mq_ops)
1195 free_rq_tio(tio);
1194} 1196}
1195 1197
1196/* 1198/*
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 4a8e15058e8b..685aa2d77e25 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -170,7 +170,7 @@ static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
170 conf->nfaults = n+1; 170 conf->nfaults = n+1;
171} 171}
172 172
173static void make_request(struct mddev *mddev, struct bio *bio) 173static void faulty_make_request(struct mddev *mddev, struct bio *bio)
174{ 174{
175 struct faulty_conf *conf = mddev->private; 175 struct faulty_conf *conf = mddev->private;
176 int failit = 0; 176 int failit = 0;
@@ -226,7 +226,7 @@ static void make_request(struct mddev *mddev, struct bio *bio)
226 generic_make_request(bio); 226 generic_make_request(bio);
227} 227}
228 228
229static void status(struct seq_file *seq, struct mddev *mddev) 229static void faulty_status(struct seq_file *seq, struct mddev *mddev)
230{ 230{
231 struct faulty_conf *conf = mddev->private; 231 struct faulty_conf *conf = mddev->private;
232 int n; 232 int n;
@@ -259,7 +259,7 @@ static void status(struct seq_file *seq, struct mddev *mddev)
259} 259}
260 260
261 261
262static int reshape(struct mddev *mddev) 262static int faulty_reshape(struct mddev *mddev)
263{ 263{
264 int mode = mddev->new_layout & ModeMask; 264 int mode = mddev->new_layout & ModeMask;
265 int count = mddev->new_layout >> ModeShift; 265 int count = mddev->new_layout >> ModeShift;
@@ -299,7 +299,7 @@ static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disk
299 return sectors; 299 return sectors;
300} 300}
301 301
302static int run(struct mddev *mddev) 302static int faulty_run(struct mddev *mddev)
303{ 303{
304 struct md_rdev *rdev; 304 struct md_rdev *rdev;
305 int i; 305 int i;
@@ -327,7 +327,7 @@ static int run(struct mddev *mddev)
327 md_set_array_sectors(mddev, faulty_size(mddev, 0, 0)); 327 md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
328 mddev->private = conf; 328 mddev->private = conf;
329 329
330 reshape(mddev); 330 faulty_reshape(mddev);
331 331
332 return 0; 332 return 0;
333} 333}
@@ -344,11 +344,11 @@ static struct md_personality faulty_personality =
344 .name = "faulty", 344 .name = "faulty",
345 .level = LEVEL_FAULTY, 345 .level = LEVEL_FAULTY,
346 .owner = THIS_MODULE, 346 .owner = THIS_MODULE,
347 .make_request = make_request, 347 .make_request = faulty_make_request,
348 .run = run, 348 .run = faulty_run,
349 .free = faulty_free, 349 .free = faulty_free,
350 .status = status, 350 .status = faulty_status,
351 .check_reshape = reshape, 351 .check_reshape = faulty_reshape,
352 .size = faulty_size, 352 .size = faulty_size,
353}; 353};
354 354
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 0ded8e97751d..dd97d4245822 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -293,6 +293,7 @@ static void recover_bitmaps(struct md_thread *thread)
293dlm_unlock: 293dlm_unlock:
294 dlm_unlock_sync(bm_lockres); 294 dlm_unlock_sync(bm_lockres);
295clear_bit: 295clear_bit:
296 lockres_free(bm_lockres);
296 clear_bit(slot, &cinfo->recovery_map); 297 clear_bit(slot, &cinfo->recovery_map);
297 } 298 }
298} 299}
@@ -682,8 +683,10 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
682 bm_lockres = lockres_init(mddev, str, NULL, 1); 683 bm_lockres = lockres_init(mddev, str, NULL, 1);
683 if (!bm_lockres) 684 if (!bm_lockres)
684 return -ENOMEM; 685 return -ENOMEM;
685 if (i == (cinfo->slot_number - 1)) 686 if (i == (cinfo->slot_number - 1)) {
687 lockres_free(bm_lockres);
686 continue; 688 continue;
689 }
687 690
688 bm_lockres->flags |= DLM_LKF_NOQUEUE; 691 bm_lockres->flags |= DLM_LKF_NOQUEUE;
689 ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW); 692 ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
@@ -858,6 +861,7 @@ static int leave(struct mddev *mddev)
858 lockres_free(cinfo->token_lockres); 861 lockres_free(cinfo->token_lockres);
859 lockres_free(cinfo->ack_lockres); 862 lockres_free(cinfo->ack_lockres);
860 lockres_free(cinfo->no_new_dev_lockres); 863 lockres_free(cinfo->no_new_dev_lockres);
864 lockres_free(cinfo->resync_lockres);
861 lockres_free(cinfo->bitmap_lockres); 865 lockres_free(cinfo->bitmap_lockres);
862 unlock_all_bitmaps(mddev); 866 unlock_all_bitmaps(mddev);
863 dlm_release_lockspace(cinfo->lockspace, 2); 867 dlm_release_lockspace(cinfo->lockspace, 2);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index c4b913409226..4e3843f7d245 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1044,7 +1044,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1044 kfree(plug); 1044 kfree(plug);
1045} 1045}
1046 1046
1047static void make_request(struct mddev *mddev, struct bio * bio) 1047static void raid1_make_request(struct mddev *mddev, struct bio * bio)
1048{ 1048{
1049 struct r1conf *conf = mddev->private; 1049 struct r1conf *conf = mddev->private;
1050 struct raid1_info *mirror; 1050 struct raid1_info *mirror;
@@ -1422,7 +1422,7 @@ read_again:
1422 wake_up(&conf->wait_barrier); 1422 wake_up(&conf->wait_barrier);
1423} 1423}
1424 1424
1425static void status(struct seq_file *seq, struct mddev *mddev) 1425static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1426{ 1426{
1427 struct r1conf *conf = mddev->private; 1427 struct r1conf *conf = mddev->private;
1428 int i; 1428 int i;
@@ -1439,7 +1439,7 @@ static void status(struct seq_file *seq, struct mddev *mddev)
1439 seq_printf(seq, "]"); 1439 seq_printf(seq, "]");
1440} 1440}
1441 1441
1442static void error(struct mddev *mddev, struct md_rdev *rdev) 1442static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1443{ 1443{
1444 char b[BDEVNAME_SIZE]; 1444 char b[BDEVNAME_SIZE];
1445 struct r1conf *conf = mddev->private; 1445 struct r1conf *conf = mddev->private;
@@ -2472,7 +2472,8 @@ static int init_resync(struct r1conf *conf)
2472 * that can be installed to exclude normal IO requests. 2472 * that can be installed to exclude normal IO requests.
2473 */ 2473 */
2474 2474
2475static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped) 2475static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2476 int *skipped)
2476{ 2477{
2477 struct r1conf *conf = mddev->private; 2478 struct r1conf *conf = mddev->private;
2478 struct r1bio *r1_bio; 2479 struct r1bio *r1_bio;
@@ -2890,7 +2891,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
2890} 2891}
2891 2892
2892static void raid1_free(struct mddev *mddev, void *priv); 2893static void raid1_free(struct mddev *mddev, void *priv);
2893static int run(struct mddev *mddev) 2894static int raid1_run(struct mddev *mddev)
2894{ 2895{
2895 struct r1conf *conf; 2896 struct r1conf *conf;
2896 int i; 2897 int i;
@@ -3170,15 +3171,15 @@ static struct md_personality raid1_personality =
3170 .name = "raid1", 3171 .name = "raid1",
3171 .level = 1, 3172 .level = 1,
3172 .owner = THIS_MODULE, 3173 .owner = THIS_MODULE,
3173 .make_request = make_request, 3174 .make_request = raid1_make_request,
3174 .run = run, 3175 .run = raid1_run,
3175 .free = raid1_free, 3176 .free = raid1_free,
3176 .status = status, 3177 .status = raid1_status,
3177 .error_handler = error, 3178 .error_handler = raid1_error,
3178 .hot_add_disk = raid1_add_disk, 3179 .hot_add_disk = raid1_add_disk,
3179 .hot_remove_disk= raid1_remove_disk, 3180 .hot_remove_disk= raid1_remove_disk,
3180 .spare_active = raid1_spare_active, 3181 .spare_active = raid1_spare_active,
3181 .sync_request = sync_request, 3182 .sync_request = raid1_sync_request,
3182 .resize = raid1_resize, 3183 .resize = raid1_resize,
3183 .size = raid1_size, 3184 .size = raid1_size,
3184 .check_reshape = raid1_reshape, 3185 .check_reshape = raid1_reshape,
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ce959b4ae4df..1c1447dd3417 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1442,7 +1442,7 @@ retry_write:
1442 one_write_done(r10_bio); 1442 one_write_done(r10_bio);
1443} 1443}
1444 1444
1445static void make_request(struct mddev *mddev, struct bio *bio) 1445static void raid10_make_request(struct mddev *mddev, struct bio *bio)
1446{ 1446{
1447 struct r10conf *conf = mddev->private; 1447 struct r10conf *conf = mddev->private;
1448 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); 1448 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
@@ -1484,7 +1484,7 @@ static void make_request(struct mddev *mddev, struct bio *bio)
1484 wake_up(&conf->wait_barrier); 1484 wake_up(&conf->wait_barrier);
1485} 1485}
1486 1486
1487static void status(struct seq_file *seq, struct mddev *mddev) 1487static void raid10_status(struct seq_file *seq, struct mddev *mddev)
1488{ 1488{
1489 struct r10conf *conf = mddev->private; 1489 struct r10conf *conf = mddev->private;
1490 int i; 1490 int i;
@@ -1562,7 +1562,7 @@ static int enough(struct r10conf *conf, int ignore)
1562 _enough(conf, 1, ignore); 1562 _enough(conf, 1, ignore);
1563} 1563}
1564 1564
1565static void error(struct mddev *mddev, struct md_rdev *rdev) 1565static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
1566{ 1566{
1567 char b[BDEVNAME_SIZE]; 1567 char b[BDEVNAME_SIZE];
1568 struct r10conf *conf = mddev->private; 1568 struct r10conf *conf = mddev->private;
@@ -2802,7 +2802,7 @@ static int init_resync(struct r10conf *conf)
2802 * 2802 *
2803 */ 2803 */
2804 2804
2805static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, 2805static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
2806 int *skipped) 2806 int *skipped)
2807{ 2807{
2808 struct r10conf *conf = mddev->private; 2808 struct r10conf *conf = mddev->private;
@@ -3523,7 +3523,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
3523 return ERR_PTR(err); 3523 return ERR_PTR(err);
3524} 3524}
3525 3525
3526static int run(struct mddev *mddev) 3526static int raid10_run(struct mddev *mddev)
3527{ 3527{
3528 struct r10conf *conf; 3528 struct r10conf *conf;
3529 int i, disk_idx, chunk_size; 3529 int i, disk_idx, chunk_size;
@@ -4617,15 +4617,15 @@ static struct md_personality raid10_personality =
4617 .name = "raid10", 4617 .name = "raid10",
4618 .level = 10, 4618 .level = 10,
4619 .owner = THIS_MODULE, 4619 .owner = THIS_MODULE,
4620 .make_request = make_request, 4620 .make_request = raid10_make_request,
4621 .run = run, 4621 .run = raid10_run,
4622 .free = raid10_free, 4622 .free = raid10_free,
4623 .status = status, 4623 .status = raid10_status,
4624 .error_handler = error, 4624 .error_handler = raid10_error,
4625 .hot_add_disk = raid10_add_disk, 4625 .hot_add_disk = raid10_add_disk,
4626 .hot_remove_disk= raid10_remove_disk, 4626 .hot_remove_disk= raid10_remove_disk,
4627 .spare_active = raid10_spare_active, 4627 .spare_active = raid10_spare_active,
4628 .sync_request = sync_request, 4628 .sync_request = raid10_sync_request,
4629 .quiesce = raid10_quiesce, 4629 .quiesce = raid10_quiesce,
4630 .size = raid10_size, 4630 .size = raid10_size,
4631 .resize = raid10_resize, 4631 .resize = raid10_resize,
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a086014dcd49..b4f02c9959f2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2496,7 +2496,7 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
2496 dev->sector = raid5_compute_blocknr(sh, i, previous); 2496 dev->sector = raid5_compute_blocknr(sh, i, previous);
2497} 2497}
2498 2498
2499static void error(struct mddev *mddev, struct md_rdev *rdev) 2499static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
2500{ 2500{
2501 char b[BDEVNAME_SIZE]; 2501 char b[BDEVNAME_SIZE];
2502 struct r5conf *conf = mddev->private; 2502 struct r5conf *conf = mddev->private;
@@ -2958,7 +2958,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
2958 * If several bio share a stripe. The bio bi_phys_segments acts as a 2958 * If several bio share a stripe. The bio bi_phys_segments acts as a
2959 * reference count to avoid race. The reference count should already be 2959 * reference count to avoid race. The reference count should already be
2960 * increased before this function is called (for example, in 2960 * increased before this function is called (for example, in
2961 * make_request()), so other bio sharing this stripe will not free the 2961 * raid5_make_request()), so other bio sharing this stripe will not free the
2962 * stripe. If a stripe is owned by one stripe, the stripe lock will 2962 * stripe. If a stripe is owned by one stripe, the stripe lock will
2963 * protect it. 2963 * protect it.
2964 */ 2964 */
@@ -5135,7 +5135,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
5135 } 5135 }
5136} 5136}
5137 5137
5138static void make_request(struct mddev *mddev, struct bio * bi) 5138static void raid5_make_request(struct mddev *mddev, struct bio * bi)
5139{ 5139{
5140 struct r5conf *conf = mddev->private; 5140 struct r5conf *conf = mddev->private;
5141 int dd_idx; 5141 int dd_idx;
@@ -5225,7 +5225,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
5225 new_sector = raid5_compute_sector(conf, logical_sector, 5225 new_sector = raid5_compute_sector(conf, logical_sector,
5226 previous, 5226 previous,
5227 &dd_idx, NULL); 5227 &dd_idx, NULL);
5228 pr_debug("raid456: make_request, sector %llu logical %llu\n", 5228 pr_debug("raid456: raid5_make_request, sector %llu logical %llu\n",
5229 (unsigned long long)new_sector, 5229 (unsigned long long)new_sector,
5230 (unsigned long long)logical_sector); 5230 (unsigned long long)logical_sector);
5231 5231
@@ -5575,7 +5575,8 @@ ret:
5575 return retn; 5575 return retn;
5576} 5576}
5577 5577
5578static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped) 5578static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr,
5579 int *skipped)
5579{ 5580{
5580 struct r5conf *conf = mddev->private; 5581 struct r5conf *conf = mddev->private;
5581 struct stripe_head *sh; 5582 struct stripe_head *sh;
@@ -6674,7 +6675,7 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
6674 return 0; 6675 return 0;
6675} 6676}
6676 6677
6677static int run(struct mddev *mddev) 6678static int raid5_run(struct mddev *mddev)
6678{ 6679{
6679 struct r5conf *conf; 6680 struct r5conf *conf;
6680 int working_disks = 0; 6681 int working_disks = 0;
@@ -7048,7 +7049,7 @@ static void raid5_free(struct mddev *mddev, void *priv)
7048 mddev->to_remove = &raid5_attrs_group; 7049 mddev->to_remove = &raid5_attrs_group;
7049} 7050}
7050 7051
7051static void status(struct seq_file *seq, struct mddev *mddev) 7052static void raid5_status(struct seq_file *seq, struct mddev *mddev)
7052{ 7053{
7053 struct r5conf *conf = mddev->private; 7054 struct r5conf *conf = mddev->private;
7054 int i; 7055 int i;
@@ -7864,15 +7865,15 @@ static struct md_personality raid6_personality =
7864 .name = "raid6", 7865 .name = "raid6",
7865 .level = 6, 7866 .level = 6,
7866 .owner = THIS_MODULE, 7867 .owner = THIS_MODULE,
7867 .make_request = make_request, 7868 .make_request = raid5_make_request,
7868 .run = run, 7869 .run = raid5_run,
7869 .free = raid5_free, 7870 .free = raid5_free,
7870 .status = status, 7871 .status = raid5_status,
7871 .error_handler = error, 7872 .error_handler = raid5_error,
7872 .hot_add_disk = raid5_add_disk, 7873 .hot_add_disk = raid5_add_disk,
7873 .hot_remove_disk= raid5_remove_disk, 7874 .hot_remove_disk= raid5_remove_disk,
7874 .spare_active = raid5_spare_active, 7875 .spare_active = raid5_spare_active,
7875 .sync_request = sync_request, 7876 .sync_request = raid5_sync_request,
7876 .resize = raid5_resize, 7877 .resize = raid5_resize,
7877 .size = raid5_size, 7878 .size = raid5_size,
7878 .check_reshape = raid6_check_reshape, 7879 .check_reshape = raid6_check_reshape,
@@ -7887,15 +7888,15 @@ static struct md_personality raid5_personality =
7887 .name = "raid5", 7888 .name = "raid5",
7888 .level = 5, 7889 .level = 5,
7889 .owner = THIS_MODULE, 7890 .owner = THIS_MODULE,
7890 .make_request = make_request, 7891 .make_request = raid5_make_request,
7891 .run = run, 7892 .run = raid5_run,
7892 .free = raid5_free, 7893 .free = raid5_free,
7893 .status = status, 7894 .status = raid5_status,
7894 .error_handler = error, 7895 .error_handler = raid5_error,
7895 .hot_add_disk = raid5_add_disk, 7896 .hot_add_disk = raid5_add_disk,
7896 .hot_remove_disk= raid5_remove_disk, 7897 .hot_remove_disk= raid5_remove_disk,
7897 .spare_active = raid5_spare_active, 7898 .spare_active = raid5_spare_active,
7898 .sync_request = sync_request, 7899 .sync_request = raid5_sync_request,
7899 .resize = raid5_resize, 7900 .resize = raid5_resize,
7900 .size = raid5_size, 7901 .size = raid5_size,
7901 .check_reshape = raid5_check_reshape, 7902 .check_reshape = raid5_check_reshape,
@@ -7911,15 +7912,15 @@ static struct md_personality raid4_personality =
7911 .name = "raid4", 7912 .name = "raid4",
7912 .level = 4, 7913 .level = 4,
7913 .owner = THIS_MODULE, 7914 .owner = THIS_MODULE,
7914 .make_request = make_request, 7915 .make_request = raid5_make_request,
7915 .run = run, 7916 .run = raid5_run,
7916 .free = raid5_free, 7917 .free = raid5_free,
7917 .status = status, 7918 .status = raid5_status,
7918 .error_handler = error, 7919 .error_handler = raid5_error,
7919 .hot_add_disk = raid5_add_disk, 7920 .hot_add_disk = raid5_add_disk,
7920 .hot_remove_disk= raid5_remove_disk, 7921 .hot_remove_disk= raid5_remove_disk,
7921 .spare_active = raid5_spare_active, 7922 .spare_active = raid5_spare_active,
7922 .sync_request = sync_request, 7923 .sync_request = raid5_sync_request,
7923 .resize = raid5_resize, 7924 .resize = raid5_resize,
7924 .size = raid5_size, 7925 .size = raid5_size,
7925 .check_reshape = raid5_check_reshape, 7926 .check_reshape = raid5_check_reshape,
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index e6e4bacb09ee..12099b09a9a7 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2048,6 +2048,7 @@ int db8500_prcmu_config_hotmon(u8 low, u8 high)
2048 2048
2049 return 0; 2049 return 0;
2050} 2050}
2051EXPORT_SYMBOL_GPL(db8500_prcmu_config_hotmon);
2051 2052
2052static int config_hot_period(u16 val) 2053static int config_hot_period(u16 val)
2053{ 2054{
@@ -2074,11 +2075,13 @@ int db8500_prcmu_start_temp_sense(u16 cycles32k)
2074 2075
2075 return config_hot_period(cycles32k); 2076 return config_hot_period(cycles32k);
2076} 2077}
2078EXPORT_SYMBOL_GPL(db8500_prcmu_start_temp_sense);
2077 2079
2078int db8500_prcmu_stop_temp_sense(void) 2080int db8500_prcmu_stop_temp_sense(void)
2079{ 2081{
2080 return config_hot_period(0xFFFF); 2082 return config_hot_period(0xFFFF);
2081} 2083}
2084EXPORT_SYMBOL_GPL(db8500_prcmu_stop_temp_sense);
2082 2085
2083static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3) 2086static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3)
2084{ 2087{
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 4c1903f781fc..0c6c17a1c59e 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -415,7 +415,7 @@ static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
415 delta = mftb() - psl_tb; 415 delta = mftb() - psl_tb;
416 if (delta < 0) 416 if (delta < 0)
417 delta = -delta; 417 delta = -delta;
418 } while (cputime_to_usecs(delta) > 16); 418 } while (tb_to_ns(delta) > 16000);
419 419
420 return 0; 420 return 0;
421} 421}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 677d0362f334..80f9afcb1382 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -458,7 +458,11 @@ static int mei_ioctl_client_notify_request(struct file *file, u32 request)
458{ 458{
459 struct mei_cl *cl = file->private_data; 459 struct mei_cl *cl = file->private_data;
460 460
461 return mei_cl_notify_request(cl, file, request); 461 if (request != MEI_HBM_NOTIFICATION_START &&
462 request != MEI_HBM_NOTIFICATION_STOP)
463 return -EINVAL;
464
465 return mei_cl_notify_request(cl, file, (u8)request);
462} 466}
463 467
464/** 468/**
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 5914263090fc..fe207e542032 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -47,13 +47,10 @@
47#include "queue.h" 47#include "queue.h"
48 48
49MODULE_ALIAS("mmc:block"); 49MODULE_ALIAS("mmc:block");
50
51#ifdef KERNEL
52#ifdef MODULE_PARAM_PREFIX 50#ifdef MODULE_PARAM_PREFIX
53#undef MODULE_PARAM_PREFIX 51#undef MODULE_PARAM_PREFIX
54#endif 52#endif
55#define MODULE_PARAM_PREFIX "mmcblk." 53#define MODULE_PARAM_PREFIX "mmcblk."
56#endif
57 54
58#define INAND_CMD38_ARG_EXT_CSD 113 55#define INAND_CMD38_ARG_EXT_CSD 113
59#define INAND_CMD38_ARG_ERASE 0x00 56#define INAND_CMD38_ARG_ERASE 0x00
@@ -655,8 +652,10 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
655 } 652 }
656 653
657 md = mmc_blk_get(bdev->bd_disk); 654 md = mmc_blk_get(bdev->bd_disk);
658 if (!md) 655 if (!md) {
656 err = -EINVAL;
659 goto cmd_err; 657 goto cmd_err;
658 }
660 659
661 card = md->queue.card; 660 card = md->queue.card;
662 if (IS_ERR(card)) { 661 if (IS_ERR(card)) {
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 1c1b45ef3faf..3446097a43c0 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -925,6 +925,10 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
925 925
926 dma_addr = dma_map_page(dma_dev, sg_page(sg), 0, 926 dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
927 PAGE_SIZE, dir); 927 PAGE_SIZE, dir);
928 if (dma_mapping_error(dma_dev, dma_addr)) {
929 data->error = -EFAULT;
930 break;
931 }
928 if (direction == DMA_TO_DEVICE) 932 if (direction == DMA_TO_DEVICE)
929 t->tx_dma = dma_addr + sg->offset; 933 t->tx_dma = dma_addr + sg->offset;
930 else 934 else
@@ -1393,10 +1397,12 @@ static int mmc_spi_probe(struct spi_device *spi)
1393 host->dma_dev = dev; 1397 host->dma_dev = dev;
1394 host->ones_dma = dma_map_single(dev, ones, 1398 host->ones_dma = dma_map_single(dev, ones,
1395 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); 1399 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
1400 if (dma_mapping_error(dev, host->ones_dma))
1401 goto fail_ones_dma;
1396 host->data_dma = dma_map_single(dev, host->data, 1402 host->data_dma = dma_map_single(dev, host->data,
1397 sizeof(*host->data), DMA_BIDIRECTIONAL); 1403 sizeof(*host->data), DMA_BIDIRECTIONAL);
1398 1404 if (dma_mapping_error(dev, host->data_dma))
1399 /* REVISIT in theory those map operations can fail... */ 1405 goto fail_data_dma;
1400 1406
1401 dma_sync_single_for_cpu(host->dma_dev, 1407 dma_sync_single_for_cpu(host->dma_dev,
1402 host->data_dma, sizeof(*host->data), 1408 host->data_dma, sizeof(*host->data),
@@ -1462,6 +1468,11 @@ fail_glue_init:
1462 if (host->dma_dev) 1468 if (host->dma_dev)
1463 dma_unmap_single(host->dma_dev, host->data_dma, 1469 dma_unmap_single(host->dma_dev, host->data_dma,
1464 sizeof(*host->data), DMA_BIDIRECTIONAL); 1470 sizeof(*host->data), DMA_BIDIRECTIONAL);
1471fail_data_dma:
1472 if (host->dma_dev)
1473 dma_unmap_single(host->dma_dev, host->ones_dma,
1474 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
1475fail_ones_dma:
1465 kfree(host->data); 1476 kfree(host->data);
1466 1477
1467fail_nobuf1: 1478fail_nobuf1:
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index b6639ea0bf18..f6e4d9718035 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2232,6 +2232,7 @@ err_irq:
2232 dma_release_channel(host->tx_chan); 2232 dma_release_channel(host->tx_chan);
2233 if (host->rx_chan) 2233 if (host->rx_chan)
2234 dma_release_channel(host->rx_chan); 2234 dma_release_channel(host->rx_chan);
2235 pm_runtime_dont_use_autosuspend(host->dev);
2235 pm_runtime_put_sync(host->dev); 2236 pm_runtime_put_sync(host->dev);
2236 pm_runtime_disable(host->dev); 2237 pm_runtime_disable(host->dev);
2237 if (host->dbclk) 2238 if (host->dbclk)
@@ -2253,6 +2254,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2253 dma_release_channel(host->tx_chan); 2254 dma_release_channel(host->tx_chan);
2254 dma_release_channel(host->rx_chan); 2255 dma_release_channel(host->rx_chan);
2255 2256
2257 pm_runtime_dont_use_autosuspend(host->dev);
2256 pm_runtime_put_sync(host->dev); 2258 pm_runtime_put_sync(host->dev);
2257 pm_runtime_disable(host->dev); 2259 pm_runtime_disable(host->dev);
2258 device_init_wakeup(&pdev->dev, false); 2260 device_init_wakeup(&pdev->dev, false);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index ce08896b9d69..da824772bbb4 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -86,7 +86,7 @@ struct pxamci_host {
86static inline void pxamci_init_ocr(struct pxamci_host *host) 86static inline void pxamci_init_ocr(struct pxamci_host *host)
87{ 87{
88#ifdef CONFIG_REGULATOR 88#ifdef CONFIG_REGULATOR
89 host->vcc = regulator_get_optional(mmc_dev(host->mmc), "vmmc"); 89 host->vcc = devm_regulator_get_optional(mmc_dev(host->mmc), "vmmc");
90 90
91 if (IS_ERR(host->vcc)) 91 if (IS_ERR(host->vcc))
92 host->vcc = NULL; 92 host->vcc = NULL;
@@ -654,12 +654,8 @@ static int pxamci_probe(struct platform_device *pdev)
654 654
655 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 655 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
656 irq = platform_get_irq(pdev, 0); 656 irq = platform_get_irq(pdev, 0);
657 if (!r || irq < 0) 657 if (irq < 0)
658 return -ENXIO; 658 return irq;
659
660 r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
661 if (!r)
662 return -EBUSY;
663 659
664 mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev); 660 mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
665 if (!mmc) { 661 if (!mmc) {
@@ -695,7 +691,7 @@ static int pxamci_probe(struct platform_device *pdev)
695 host->pdata = pdev->dev.platform_data; 691 host->pdata = pdev->dev.platform_data;
696 host->clkrt = CLKRT_OFF; 692 host->clkrt = CLKRT_OFF;
697 693
698 host->clk = clk_get(&pdev->dev, NULL); 694 host->clk = devm_clk_get(&pdev->dev, NULL);
699 if (IS_ERR(host->clk)) { 695 if (IS_ERR(host->clk)) {
700 ret = PTR_ERR(host->clk); 696 ret = PTR_ERR(host->clk);
701 host->clk = NULL; 697 host->clk = NULL;
@@ -727,9 +723,9 @@ static int pxamci_probe(struct platform_device *pdev)
727 host->irq = irq; 723 host->irq = irq;
728 host->imask = MMC_I_MASK_ALL; 724 host->imask = MMC_I_MASK_ALL;
729 725
730 host->base = ioremap(r->start, SZ_4K); 726 host->base = devm_ioremap_resource(&pdev->dev, r);
731 if (!host->base) { 727 if (IS_ERR(host->base)) {
732 ret = -ENOMEM; 728 ret = PTR_ERR(host->base);
733 goto out; 729 goto out;
734 } 730 }
735 731
@@ -742,7 +738,8 @@ static int pxamci_probe(struct platform_device *pdev)
742 writel(64, host->base + MMC_RESTO); 738 writel(64, host->base + MMC_RESTO);
743 writel(host->imask, host->base + MMC_I_MASK); 739 writel(host->imask, host->base + MMC_I_MASK);
744 740
745 ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host); 741 ret = devm_request_irq(&pdev->dev, host->irq, pxamci_irq, 0,
742 DRIVER_NAME, host);
746 if (ret) 743 if (ret)
747 goto out; 744 goto out;
748 745
@@ -804,7 +801,7 @@ static int pxamci_probe(struct platform_device *pdev)
804 dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); 801 dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
805 goto out; 802 goto out;
806 } else { 803 } else {
807 mmc->caps |= host->pdata->gpio_card_ro_invert ? 804 mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
808 0 : MMC_CAP2_RO_ACTIVE_HIGH; 805 0 : MMC_CAP2_RO_ACTIVE_HIGH;
809 } 806 }
810 807
@@ -833,14 +830,9 @@ out:
833 dma_release_channel(host->dma_chan_rx); 830 dma_release_channel(host->dma_chan_rx);
834 if (host->dma_chan_tx) 831 if (host->dma_chan_tx)
835 dma_release_channel(host->dma_chan_tx); 832 dma_release_channel(host->dma_chan_tx);
836 if (host->base)
837 iounmap(host->base);
838 if (host->clk)
839 clk_put(host->clk);
840 } 833 }
841 if (mmc) 834 if (mmc)
842 mmc_free_host(mmc); 835 mmc_free_host(mmc);
843 release_resource(r);
844 return ret; 836 return ret;
845} 837}
846 838
@@ -859,9 +851,6 @@ static int pxamci_remove(struct platform_device *pdev)
859 gpio_ro = host->pdata->gpio_card_ro; 851 gpio_ro = host->pdata->gpio_card_ro;
860 gpio_power = host->pdata->gpio_power; 852 gpio_power = host->pdata->gpio_power;
861 } 853 }
862 if (host->vcc)
863 regulator_put(host->vcc);
864
865 if (host->pdata && host->pdata->exit) 854 if (host->pdata && host->pdata->exit)
866 host->pdata->exit(&pdev->dev, mmc); 855 host->pdata->exit(&pdev->dev, mmc);
867 856
@@ -870,16 +859,10 @@ static int pxamci_remove(struct platform_device *pdev)
870 END_CMD_RES|PRG_DONE|DATA_TRAN_DONE, 859 END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
871 host->base + MMC_I_MASK); 860 host->base + MMC_I_MASK);
872 861
873 free_irq(host->irq, host);
874 dmaengine_terminate_all(host->dma_chan_rx); 862 dmaengine_terminate_all(host->dma_chan_rx);
875 dmaengine_terminate_all(host->dma_chan_tx); 863 dmaengine_terminate_all(host->dma_chan_tx);
876 dma_release_channel(host->dma_chan_rx); 864 dma_release_channel(host->dma_chan_rx);
877 dma_release_channel(host->dma_chan_tx); 865 dma_release_channel(host->dma_chan_tx);
878 iounmap(host->base);
879
880 clk_put(host->clk);
881
882 release_resource(host->res);
883 866
884 mmc_free_host(mmc); 867 mmc_free_host(mmc);
885 } 868 }
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index f6047fc94062..a5cda926d38e 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -146,6 +146,33 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
146 .ops = &sdhci_acpi_ops_int, 146 .ops = &sdhci_acpi_ops_int,
147}; 147};
148 148
149static int bxt_get_cd(struct mmc_host *mmc)
150{
151 int gpio_cd = mmc_gpio_get_cd(mmc);
152 struct sdhci_host *host = mmc_priv(mmc);
153 unsigned long flags;
154 int ret = 0;
155
156 if (!gpio_cd)
157 return 0;
158
159 pm_runtime_get_sync(mmc->parent);
160
161 spin_lock_irqsave(&host->lock, flags);
162
163 if (host->flags & SDHCI_DEVICE_DEAD)
164 goto out;
165
166 ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
167out:
168 spin_unlock_irqrestore(&host->lock, flags);
169
170 pm_runtime_mark_last_busy(mmc->parent);
171 pm_runtime_put_autosuspend(mmc->parent);
172
173 return ret;
174}
175
149static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev, 176static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
150 const char *hid, const char *uid) 177 const char *hid, const char *uid)
151{ 178{
@@ -196,6 +223,9 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
196 223
197 /* Platform specific code during sd probe slot goes here */ 224 /* Platform specific code during sd probe slot goes here */
198 225
226 if (hid && !strcmp(hid, "80865ACA"))
227 host->mmc_host_ops.get_cd = bxt_get_cd;
228
199 return 0; 229 return 0;
200} 230}
201 231
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 7e7d8f0c9438..9cb86fb25976 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -217,6 +217,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
217pm_runtime_disable: 217pm_runtime_disable:
218 pm_runtime_disable(&pdev->dev); 218 pm_runtime_disable(&pdev->dev);
219 pm_runtime_set_suspended(&pdev->dev); 219 pm_runtime_set_suspended(&pdev->dev);
220 pm_runtime_put_noidle(&pdev->dev);
220clocks_disable_unprepare: 221clocks_disable_unprepare:
221 clk_disable_unprepare(priv->gck); 222 clk_disable_unprepare(priv->gck);
222 clk_disable_unprepare(priv->mainck); 223 clk_disable_unprepare(priv->mainck);
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index cc851b065d0a..df3b8eced8c4 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -330,6 +330,33 @@ static void spt_read_drive_strength(struct sdhci_host *host)
330 sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf); 330 sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf);
331} 331}
332 332
333static int bxt_get_cd(struct mmc_host *mmc)
334{
335 int gpio_cd = mmc_gpio_get_cd(mmc);
336 struct sdhci_host *host = mmc_priv(mmc);
337 unsigned long flags;
338 int ret = 0;
339
340 if (!gpio_cd)
341 return 0;
342
343 pm_runtime_get_sync(mmc->parent);
344
345 spin_lock_irqsave(&host->lock, flags);
346
347 if (host->flags & SDHCI_DEVICE_DEAD)
348 goto out;
349
350 ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
351out:
352 spin_unlock_irqrestore(&host->lock, flags);
353
354 pm_runtime_mark_last_busy(mmc->parent);
355 pm_runtime_put_autosuspend(mmc->parent);
356
357 return ret;
358}
359
333static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) 360static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
334{ 361{
335 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | 362 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
@@ -362,6 +389,10 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
362 slot->cd_con_id = NULL; 389 slot->cd_con_id = NULL;
363 slot->cd_idx = 0; 390 slot->cd_idx = 0;
364 slot->cd_override_level = true; 391 slot->cd_override_level = true;
392 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
393 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
394 slot->host->mmc_host_ops.get_cd = bxt_get_cd;
395
365 return 0; 396 return 0;
366} 397}
367 398
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index d622435d1bcc..add9fdfd1d8f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1360,7 +1360,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1360 sdhci_runtime_pm_get(host); 1360 sdhci_runtime_pm_get(host);
1361 1361
1362 /* Firstly check card presence */ 1362 /* Firstly check card presence */
1363 present = sdhci_do_get_cd(host); 1363 present = mmc->ops->get_cd(mmc);
1364 1364
1365 spin_lock_irqsave(&host->lock, flags); 1365 spin_lock_irqsave(&host->lock, flags);
1366 1366
@@ -2849,6 +2849,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
2849 2849
2850 host = mmc_priv(mmc); 2850 host = mmc_priv(mmc);
2851 host->mmc = mmc; 2851 host->mmc = mmc;
2852 host->mmc_host_ops = sdhci_ops;
2853 mmc->ops = &host->mmc_host_ops;
2852 2854
2853 return host; 2855 return host;
2854} 2856}
@@ -3037,7 +3039,6 @@ int sdhci_add_host(struct sdhci_host *host)
3037 /* 3039 /*
3038 * Set host parameters. 3040 * Set host parameters.
3039 */ 3041 */
3040 mmc->ops = &sdhci_ops;
3041 max_clk = host->max_clk; 3042 max_clk = host->max_clk;
3042 3043
3043 if (host->ops->get_min_clock) 3044 if (host->ops->get_min_clock)
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 7654ae5d2b4e..0115e9907bf8 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -430,6 +430,7 @@ struct sdhci_host {
430 430
431 /* Internal data */ 431 /* Internal data */
432 struct mmc_host *mmc; /* MMC structure */ 432 struct mmc_host *mmc; /* MMC structure */
433 struct mmc_host_ops mmc_host_ops; /* MMC host ops */
433 u64 dma_mask; /* custom DMA mask */ 434 u64 dma_mask; /* custom DMA mask */
434 435
435#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) 436#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 1ca8a1359cbc..6234eab38ff3 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -445,7 +445,7 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
445 pdata->slave_id_rx); 445 pdata->slave_id_rx);
446 } else { 446 } else {
447 host->chan_tx = dma_request_slave_channel(dev, "tx"); 447 host->chan_tx = dma_request_slave_channel(dev, "tx");
448 host->chan_tx = dma_request_slave_channel(dev, "rx"); 448 host->chan_rx = dma_request_slave_channel(dev, "rx");
449 } 449 }
450 dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx, 450 dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
451 host->chan_rx); 451 host->chan_rx);
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 2a1b6e037e1a..0134ba32a057 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
193 vol->changing_leb = 1; 193 vol->changing_leb = 1;
194 vol->ch_lnum = req->lnum; 194 vol->ch_lnum = req->lnum;
195 195
196 vol->upd_buf = vmalloc(req->bytes); 196 vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
197 if (!vol->upd_buf) 197 if (!vol->upd_buf)
198 return -ENOMEM; 198 return -ENOMEM;
199 199
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 56b560558884..b7f1a9919033 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -214,6 +214,8 @@ static void bond_uninit(struct net_device *bond_dev);
214static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, 214static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
215 struct rtnl_link_stats64 *stats); 215 struct rtnl_link_stats64 *stats);
216static void bond_slave_arr_handler(struct work_struct *work); 216static void bond_slave_arr_handler(struct work_struct *work);
217static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
218 int mod);
217 219
218/*---------------------------- General routines -----------------------------*/ 220/*---------------------------- General routines -----------------------------*/
219 221
@@ -2127,6 +2129,7 @@ static void bond_miimon_commit(struct bonding *bond)
2127 continue; 2129 continue;
2128 2130
2129 case BOND_LINK_UP: 2131 case BOND_LINK_UP:
2132 bond_update_speed_duplex(slave);
2130 bond_set_slave_link_state(slave, BOND_LINK_UP, 2133 bond_set_slave_link_state(slave, BOND_LINK_UP,
2131 BOND_SLAVE_NOTIFY_NOW); 2134 BOND_SLAVE_NOTIFY_NOW);
2132 slave->last_link_up = jiffies; 2135 slave->last_link_up = jiffies;
@@ -2459,7 +2462,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2459 struct slave *slave) 2462 struct slave *slave)
2460{ 2463{
2461 struct arphdr *arp = (struct arphdr *)skb->data; 2464 struct arphdr *arp = (struct arphdr *)skb->data;
2462 struct slave *curr_active_slave; 2465 struct slave *curr_active_slave, *curr_arp_slave;
2463 unsigned char *arp_ptr; 2466 unsigned char *arp_ptr;
2464 __be32 sip, tip; 2467 __be32 sip, tip;
2465 int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP); 2468 int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
@@ -2506,26 +2509,41 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2506 &sip, &tip); 2509 &sip, &tip);
2507 2510
2508 curr_active_slave = rcu_dereference(bond->curr_active_slave); 2511 curr_active_slave = rcu_dereference(bond->curr_active_slave);
2512 curr_arp_slave = rcu_dereference(bond->current_arp_slave);
2509 2513
2510 /* Backup slaves won't see the ARP reply, but do come through 2514 /* We 'trust' the received ARP enough to validate it if:
2511 * here for each ARP probe (so we swap the sip/tip to validate 2515 *
2512 * the probe). In a "redundant switch, common router" type of 2516 * (a) the slave receiving the ARP is active (which includes the
2513 * configuration, the ARP probe will (hopefully) travel from 2517 * current ARP slave, if any), or
2514 * the active, through one switch, the router, then the other 2518 *
2515 * switch before reaching the backup. 2519 * (b) the receiving slave isn't active, but there is a currently
2520 * active slave and it received valid arp reply(s) after it became
2521 * the currently active slave, or
2522 *
2523 * (c) there is an ARP slave that sent an ARP during the prior ARP
2524 * interval, and we receive an ARP reply on any slave. We accept
2525 * these because switch FDB update delays may deliver the ARP
2526 * reply to a slave other than the sender of the ARP request.
2516 * 2527 *
2517 * We 'trust' the arp requests if there is an active slave and 2528 * Note: for (b), backup slaves are receiving the broadcast ARP
2518 * it received valid arp reply(s) after it became active. This 2529 * request, not a reply. This request passes from the sending
2519 * is done to avoid endless looping when we can't reach the 2530 * slave through the L2 switch(es) to the receiving slave. Since
2531 * this is checking the request, sip/tip are swapped for
2532 * validation.
2533 *
2534 * This is done to avoid endless looping when we can't reach the
2520 * arp_ip_target and fool ourselves with our own arp requests. 2535 * arp_ip_target and fool ourselves with our own arp requests.
2521 */ 2536 */
2522
2523 if (bond_is_active_slave(slave)) 2537 if (bond_is_active_slave(slave))
2524 bond_validate_arp(bond, slave, sip, tip); 2538 bond_validate_arp(bond, slave, sip, tip);
2525 else if (curr_active_slave && 2539 else if (curr_active_slave &&
2526 time_after(slave_last_rx(bond, curr_active_slave), 2540 time_after(slave_last_rx(bond, curr_active_slave),
2527 curr_active_slave->last_link_up)) 2541 curr_active_slave->last_link_up))
2528 bond_validate_arp(bond, slave, tip, sip); 2542 bond_validate_arp(bond, slave, tip, sip);
2543 else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
2544 bond_time_in_interval(bond,
2545 dev_trans_start(curr_arp_slave->dev), 1))
2546 bond_validate_arp(bond, slave, sip, tip);
2529 2547
2530out_unlock: 2548out_unlock:
2531 if (arp != (struct arphdr *)skb->data) 2549 if (arp != (struct arphdr *)skb->data)
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 575790e8a75a..74a7dfecee27 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -843,7 +843,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
843 if (clear_intf) 843 if (clear_intf)
844 mcp251x_write_bits(spi, CANINTF, clear_intf, 0x00); 844 mcp251x_write_bits(spi, CANINTF, clear_intf, 0x00);
845 845
846 if (eflag) 846 if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR))
847 mcp251x_write_bits(spi, EFLG, eflag, 0x00); 847 mcp251x_write_bits(spi, EFLG, eflag, 0x00);
848 848
849 /* Update can state */ 849 /* Update can state */
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index fc5b75675cd8..eb7192fab593 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2");
117 */ 117 */
118#define EMS_USB_ARM7_CLOCK 8000000 118#define EMS_USB_ARM7_CLOCK 8000000
119 119
120#define CPC_TX_QUEUE_TRIGGER_LOW 25
121#define CPC_TX_QUEUE_TRIGGER_HIGH 35
122
120/* 123/*
121 * CAN-Message representation in a CPC_MSG. Message object type is 124 * CAN-Message representation in a CPC_MSG. Message object type is
122 * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or 125 * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
@@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
278 switch (urb->status) { 281 switch (urb->status) {
279 case 0: 282 case 0:
280 dev->free_slots = dev->intr_in_buffer[1]; 283 dev->free_slots = dev->intr_in_buffer[1];
284 if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
285 if (netif_queue_stopped(netdev)){
286 netif_wake_queue(netdev);
287 }
288 }
281 break; 289 break;
282 290
283 case -ECONNRESET: /* unlink */ 291 case -ECONNRESET: /* unlink */
@@ -526,8 +534,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
526 /* Release context */ 534 /* Release context */
527 context->echo_index = MAX_TX_URBS; 535 context->echo_index = MAX_TX_URBS;
528 536
529 if (netif_queue_stopped(netdev))
530 netif_wake_queue(netdev);
531} 537}
532 538
533/* 539/*
@@ -587,7 +593,7 @@ static int ems_usb_start(struct ems_usb *dev)
587 int err, i; 593 int err, i;
588 594
589 dev->intr_in_buffer[0] = 0; 595 dev->intr_in_buffer[0] = 0;
590 dev->free_slots = 15; /* initial size */ 596 dev->free_slots = 50; /* initial size */
591 597
592 for (i = 0; i < MAX_RX_URBS; i++) { 598 for (i = 0; i < MAX_RX_URBS; i++) {
593 struct urb *urb = NULL; 599 struct urb *urb = NULL;
@@ -835,7 +841,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
835 841
836 /* Slow down tx path */ 842 /* Slow down tx path */
837 if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS || 843 if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
838 dev->free_slots < 5) { 844 dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
839 netif_stop_queue(netdev); 845 netif_stop_queue(netdev);
840 } 846 }
841 } 847 }
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 5eee62badf45..cbc99d5649af 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -826,9 +826,8 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface
826static void gs_destroy_candev(struct gs_can *dev) 826static void gs_destroy_candev(struct gs_can *dev)
827{ 827{
828 unregister_candev(dev->netdev); 828 unregister_candev(dev->netdev);
829 free_candev(dev->netdev);
830 usb_kill_anchored_urbs(&dev->tx_submitted); 829 usb_kill_anchored_urbs(&dev->tx_submitted);
831 kfree(dev); 830 free_candev(dev->netdev);
832} 831}
833 832
834static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) 833static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
@@ -913,12 +912,15 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
913 for (i = 0; i < icount; i++) { 912 for (i = 0; i < icount; i++) {
914 dev->canch[i] = gs_make_candev(i, intf); 913 dev->canch[i] = gs_make_candev(i, intf);
915 if (IS_ERR_OR_NULL(dev->canch[i])) { 914 if (IS_ERR_OR_NULL(dev->canch[i])) {
915 /* save error code to return later */
916 rc = PTR_ERR(dev->canch[i]);
917
916 /* on failure destroy previously created candevs */ 918 /* on failure destroy previously created candevs */
917 icount = i; 919 icount = i;
918 for (i = 0; i < icount; i++) { 920 for (i = 0; i < icount; i++)
919 gs_destroy_candev(dev->canch[i]); 921 gs_destroy_candev(dev->canch[i]);
920 dev->canch[i] = NULL; 922
921 } 923 usb_kill_anchored_urbs(&dev->rx_submitted);
922 kfree(dev); 924 kfree(dev);
923 return rc; 925 return rc;
924 } 926 }
@@ -939,16 +941,12 @@ static void gs_usb_disconnect(struct usb_interface *intf)
939 return; 941 return;
940 } 942 }
941 943
942 for (i = 0; i < GS_MAX_INTF; i++) { 944 for (i = 0; i < GS_MAX_INTF; i++)
943 struct gs_can *can = dev->canch[i]; 945 if (dev->canch[i])
944 946 gs_destroy_candev(dev->canch[i]);
945 if (!can)
946 continue;
947
948 gs_destroy_candev(can);
949 }
950 947
951 usb_kill_anchored_urbs(&dev->rx_submitted); 948 usb_kill_anchored_urbs(&dev->rx_submitted);
949 kfree(dev);
952} 950}
953 951
954static const struct usb_device_id gs_usb_table[] = { 952static const struct usb_device_id gs_usb_table[] = {
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index cc6c54553418..a47f52f44b0d 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -25,6 +25,7 @@
25static const struct mv88e6xxx_switch_id mv88e6352_table[] = { 25static const struct mv88e6xxx_switch_id mv88e6352_table[] = {
26 { PORT_SWITCH_ID_6172, "Marvell 88E6172" }, 26 { PORT_SWITCH_ID_6172, "Marvell 88E6172" },
27 { PORT_SWITCH_ID_6176, "Marvell 88E6176" }, 27 { PORT_SWITCH_ID_6176, "Marvell 88E6176" },
28 { PORT_SWITCH_ID_6240, "Marvell 88E6240" },
28 { PORT_SWITCH_ID_6320, "Marvell 88E6320" }, 29 { PORT_SWITCH_ID_6320, "Marvell 88E6320" },
29 { PORT_SWITCH_ID_6320_A1, "Marvell 88E6320 (A1)" }, 30 { PORT_SWITCH_ID_6320_A1, "Marvell 88E6320 (A1)" },
30 { PORT_SWITCH_ID_6320_A2, "Marvell 88e6320 (A2)" }, 31 { PORT_SWITCH_ID_6320_A2, "Marvell 88e6320 (A2)" },
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 9fe33fc3c2b9..512c8c0be1b4 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -1532,7 +1532,7 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
1532 1532
1533 /* no PVID with ranges, otherwise it's a bug */ 1533 /* no PVID with ranges, otherwise it's a bug */
1534 if (pvid) 1534 if (pvid)
1535 err = _mv88e6xxx_port_pvid_set(ds, port, vid); 1535 err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end);
1536unlock: 1536unlock:
1537 mutex_unlock(&ps->smi_mutex); 1537 mutex_unlock(&ps->smi_mutex);
1538 1538
@@ -1555,7 +1555,7 @@ static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
1555 1555
1556 if (vlan.vid != vid || !vlan.valid || 1556 if (vlan.vid != vid || !vlan.valid ||
1557 vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) 1557 vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
1558 return -ENOENT; 1558 return -EOPNOTSUPP;
1559 1559
1560 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; 1560 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1561 1561
@@ -1582,6 +1582,7 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
1582 const struct switchdev_obj_port_vlan *vlan) 1582 const struct switchdev_obj_port_vlan *vlan)
1583{ 1583{
1584 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1584 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1585 const u16 defpvid = 4000 + ds->index * DSA_MAX_PORTS + port;
1585 u16 pvid, vid; 1586 u16 pvid, vid;
1586 int err = 0; 1587 int err = 0;
1587 1588
@@ -1597,7 +1598,8 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
1597 goto unlock; 1598 goto unlock;
1598 1599
1599 if (vid == pvid) { 1600 if (vid == pvid) {
1600 err = _mv88e6xxx_port_pvid_set(ds, port, 0); 1601 /* restore reserved VLAN ID */
1602 err = _mv88e6xxx_port_pvid_set(ds, port, defpvid);
1601 if (err) 1603 if (err)
1602 goto unlock; 1604 goto unlock;
1603 } 1605 }
@@ -1889,26 +1891,20 @@ unlock:
1889 1891
1890int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members) 1892int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members)
1891{ 1893{
1892 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1894 return 0;
1893 const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port;
1894 int err;
1895
1896 /* The port joined a bridge, so leave its reserved VLAN */
1897 mutex_lock(&ps->smi_mutex);
1898 err = _mv88e6xxx_port_vlan_del(ds, port, pvid);
1899 if (!err)
1900 err = _mv88e6xxx_port_pvid_set(ds, port, 0);
1901 mutex_unlock(&ps->smi_mutex);
1902 return err;
1903} 1895}
1904 1896
1905int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members) 1897int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members)
1906{ 1898{
1899 return 0;
1900}
1901
1902static int mv88e6xxx_setup_port_default_vlan(struct dsa_switch *ds, int port)
1903{
1907 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1904 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1908 const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port; 1905 const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port;
1909 int err; 1906 int err;
1910 1907
1911 /* The port left the bridge, so join its reserved VLAN */
1912 mutex_lock(&ps->smi_mutex); 1908 mutex_lock(&ps->smi_mutex);
1913 err = _mv88e6xxx_port_vlan_add(ds, port, pvid, true); 1909 err = _mv88e6xxx_port_vlan_add(ds, port, pvid, true);
1914 if (!err) 1910 if (!err)
@@ -2163,7 +2159,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2163 * database, and allow every port to egress frames on all other ports. 2159 * database, and allow every port to egress frames on all other ports.
2164 */ 2160 */
2165 reg = BIT(ps->num_ports) - 1; /* all ports */ 2161 reg = BIT(ps->num_ports) - 1; /* all ports */
2166 ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg & ~port); 2162 reg &= ~BIT(port); /* except itself */
2163 ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg);
2167 if (ret) 2164 if (ret)
2168 goto abort; 2165 goto abort;
2169 2166
@@ -2191,8 +2188,7 @@ int mv88e6xxx_setup_ports(struct dsa_switch *ds)
2191 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)) 2188 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
2192 continue; 2189 continue;
2193 2190
2194 /* setup the unbridged state */ 2191 ret = mv88e6xxx_setup_port_default_vlan(ds, i);
2195 ret = mv88e6xxx_port_bridge_leave(ds, i, 0);
2196 if (ret < 0) 2192 if (ret < 0)
2197 return ret; 2193 return ret;
2198 } 2194 }
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 79e1a0282163..17b2126075e0 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2461,7 +2461,7 @@ boomerang_interrupt(int irq, void *dev_id)
2461 int i; 2461 int i;
2462 pci_unmap_single(VORTEX_PCI(vp), 2462 pci_unmap_single(VORTEX_PCI(vp),
2463 le32_to_cpu(vp->tx_ring[entry].frag[0].addr), 2463 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2464 le32_to_cpu(vp->tx_ring[entry].frag[0].length), 2464 le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
2465 PCI_DMA_TODEVICE); 2465 PCI_DMA_TODEVICE);
2466 2466
2467 for (i=1; i<=skb_shinfo(skb)->nr_frags; i++) 2467 for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 2777289a26c0..2f79d29f17f2 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -1501,6 +1501,7 @@ static const struct pcmcia_device_id pcnet_ids[] = {
1501 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a), 1501 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a),
1502 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1103), 1502 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1103),
1503 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1121), 1503 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1121),
1504 PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0009),
1504 PCMCIA_DEVICE_PROD_ID12("2408LAN", "Ethernet", 0x352fff7f, 0x00b2e941), 1505 PCMCIA_DEVICE_PROD_ID12("2408LAN", "Ethernet", 0x352fff7f, 0x00b2e941),
1505 PCMCIA_DEVICE_PROD_ID1234("Socket", "CF 10/100 Ethernet Card", "Revision B", "05/11/06", 0xb38bcc2e, 0x4de88352, 0xeaca6c8d, 0x7e57c22e), 1506 PCMCIA_DEVICE_PROD_ID1234("Socket", "CF 10/100 Ethernet Card", "Revision B", "05/11/06", 0xb38bcc2e, 0x4de88352, 0xeaca6c8d, 0x7e57c22e),
1506 PCMCIA_DEVICE_PROD_ID123("Cardwell", "PCMCIA", "ETHERNET", 0x9533672e, 0x281f1c5d, 0x3ff7175b), 1507 PCMCIA_DEVICE_PROD_ID123("Cardwell", "PCMCIA", "ETHERNET", 0x9533672e, 0x281f1c5d, 0x3ff7175b),
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 3f3bcbea15bd..0907ab6ff309 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2380,7 +2380,7 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
2380 sizeof(u32), 2380 sizeof(u32),
2381 &tx_ring->tx_status_pa, 2381 &tx_ring->tx_status_pa,
2382 GFP_KERNEL); 2382 GFP_KERNEL);
2383 if (!tx_ring->tx_status_pa) { 2383 if (!tx_ring->tx_status) {
2384 dev_err(&adapter->pdev->dev, 2384 dev_err(&adapter->pdev->dev,
2385 "Cannot alloc memory for Tx status block\n"); 2385 "Cannot alloc memory for Tx status block\n");
2386 return -ENOMEM; 2386 return -ENOMEM;
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 17472851674f..f749e4d389eb 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -193,7 +193,6 @@ static void altera_tse_mdio_destroy(struct net_device *dev)
193 priv->mdio->id); 193 priv->mdio->id);
194 194
195 mdiobus_unregister(priv->mdio); 195 mdiobus_unregister(priv->mdio);
196 kfree(priv->mdio->irq);
197 mdiobus_free(priv->mdio); 196 mdiobus_free(priv->mdio);
198 priv->mdio = NULL; 197 priv->mdio = NULL;
199} 198}
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 87e727b921dc..fcdf5dda448f 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -50,8 +50,8 @@ static const char version[] =
50static void write_rreg(u_long base, u_int reg, u_int val) 50static void write_rreg(u_long base, u_int reg, u_int val)
51{ 51{
52 asm volatile( 52 asm volatile(
53 "str%?h %1, [%2] @ NET_RAP\n\t" 53 "strh %1, [%2] @ NET_RAP\n\t"
54 "str%?h %0, [%2, #-4] @ NET_RDP" 54 "strh %0, [%2, #-4] @ NET_RDP"
55 : 55 :
56 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); 56 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
57} 57}
@@ -60,8 +60,8 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg)
60{ 60{
61 unsigned short v; 61 unsigned short v;
62 asm volatile( 62 asm volatile(
63 "str%?h %1, [%2] @ NET_RAP\n\t" 63 "strh %1, [%2] @ NET_RAP\n\t"
64 "ldr%?h %0, [%2, #-4] @ NET_RDP" 64 "ldrh %0, [%2, #-4] @ NET_RDP"
65 : "=r" (v) 65 : "=r" (v)
66 : "r" (reg), "r" (ISAIO_BASE + 0x0464)); 66 : "r" (reg), "r" (ISAIO_BASE + 0x0464));
67 return v; 67 return v;
@@ -70,8 +70,8 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg)
70static inline void write_ireg(u_long base, u_int reg, u_int val) 70static inline void write_ireg(u_long base, u_int reg, u_int val)
71{ 71{
72 asm volatile( 72 asm volatile(
73 "str%?h %1, [%2] @ NET_RAP\n\t" 73 "strh %1, [%2] @ NET_RAP\n\t"
74 "str%?h %0, [%2, #8] @ NET_IDP" 74 "strh %0, [%2, #8] @ NET_IDP"
75 : 75 :
76 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); 76 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
77} 77}
@@ -80,8 +80,8 @@ static inline unsigned short read_ireg(u_long base_addr, u_int reg)
80{ 80{
81 u_short v; 81 u_short v;
82 asm volatile( 82 asm volatile(
83 "str%?h %1, [%2] @ NAT_RAP\n\t" 83 "strh %1, [%2] @ NAT_RAP\n\t"
84 "ldr%?h %0, [%2, #8] @ NET_IDP\n\t" 84 "ldrh %0, [%2, #8] @ NET_IDP\n\t"
85 : "=r" (v) 85 : "=r" (v)
86 : "r" (reg), "r" (ISAIO_BASE + 0x0464)); 86 : "r" (reg), "r" (ISAIO_BASE + 0x0464));
87 return v; 87 return v;
@@ -96,7 +96,7 @@ am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigne
96 offset = ISAMEM_BASE + (offset << 1); 96 offset = ISAMEM_BASE + (offset << 1);
97 length = (length + 1) & ~1; 97 length = (length + 1) & ~1;
98 if ((int)buf & 2) { 98 if ((int)buf & 2) {
99 asm volatile("str%?h %2, [%0], #4" 99 asm volatile("strh %2, [%0], #4"
100 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); 100 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
101 buf += 2; 101 buf += 2;
102 length -= 2; 102 length -= 2;
@@ -104,20 +104,20 @@ am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigne
104 while (length > 8) { 104 while (length > 8) {
105 register unsigned int tmp asm("r2"), tmp2 asm("r3"); 105 register unsigned int tmp asm("r2"), tmp2 asm("r3");
106 asm volatile( 106 asm volatile(
107 "ldm%?ia %0!, {%1, %2}" 107 "ldmia %0!, {%1, %2}"
108 : "+r" (buf), "=&r" (tmp), "=&r" (tmp2)); 108 : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
109 length -= 8; 109 length -= 8;
110 asm volatile( 110 asm volatile(
111 "str%?h %1, [%0], #4\n\t" 111 "strh %1, [%0], #4\n\t"
112 "mov%? %1, %1, lsr #16\n\t" 112 "mov %1, %1, lsr #16\n\t"
113 "str%?h %1, [%0], #4\n\t" 113 "strh %1, [%0], #4\n\t"
114 "str%?h %2, [%0], #4\n\t" 114 "strh %2, [%0], #4\n\t"
115 "mov%? %2, %2, lsr #16\n\t" 115 "mov %2, %2, lsr #16\n\t"
116 "str%?h %2, [%0], #4" 116 "strh %2, [%0], #4"
117 : "+r" (offset), "=&r" (tmp), "=&r" (tmp2)); 117 : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
118 } 118 }
119 while (length > 0) { 119 while (length > 0) {
120 asm volatile("str%?h %2, [%0], #4" 120 asm volatile("strh %2, [%0], #4"
121 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); 121 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
122 buf += 2; 122 buf += 2;
123 length -= 2; 123 length -= 2;
@@ -132,23 +132,23 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
132 if ((int)buf & 2) { 132 if ((int)buf & 2) {
133 unsigned int tmp; 133 unsigned int tmp;
134 asm volatile( 134 asm volatile(
135 "ldr%?h %2, [%0], #4\n\t" 135 "ldrh %2, [%0], #4\n\t"
136 "str%?b %2, [%1], #1\n\t" 136 "strb %2, [%1], #1\n\t"
137 "mov%? %2, %2, lsr #8\n\t" 137 "mov %2, %2, lsr #8\n\t"
138 "str%?b %2, [%1], #1" 138 "strb %2, [%1], #1"
139 : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf)); 139 : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf));
140 length -= 2; 140 length -= 2;
141 } 141 }
142 while (length > 8) { 142 while (length > 8) {
143 register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3; 143 register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
144 asm volatile( 144 asm volatile(
145 "ldr%?h %2, [%0], #4\n\t" 145 "ldrh %2, [%0], #4\n\t"
146 "ldr%?h %4, [%0], #4\n\t" 146 "ldrh %4, [%0], #4\n\t"
147 "ldr%?h %3, [%0], #4\n\t" 147 "ldrh %3, [%0], #4\n\t"
148 "orr%? %2, %2, %4, lsl #16\n\t" 148 "orr %2, %2, %4, lsl #16\n\t"
149 "ldr%?h %4, [%0], #4\n\t" 149 "ldrh %4, [%0], #4\n\t"
150 "orr%? %3, %3, %4, lsl #16\n\t" 150 "orr %3, %3, %4, lsl #16\n\t"
151 "stm%?ia %1!, {%2, %3}" 151 "stmia %1!, {%2, %3}"
152 : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3) 152 : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3)
153 : "0" (offset), "1" (buf)); 153 : "0" (offset), "1" (buf));
154 length -= 8; 154 length -= 8;
@@ -156,10 +156,10 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
156 while (length > 0) { 156 while (length > 0) {
157 unsigned int tmp; 157 unsigned int tmp;
158 asm volatile( 158 asm volatile(
159 "ldr%?h %2, [%0], #4\n\t" 159 "ldrh %2, [%0], #4\n\t"
160 "str%?b %2, [%1], #1\n\t" 160 "strb %2, [%1], #1\n\t"
161 "mov%? %2, %2, lsr #8\n\t" 161 "mov %2, %2, lsr #8\n\t"
162 "str%?b %2, [%1], #1" 162 "strb %2, [%1], #1"
163 : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf)); 163 : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf));
164 length -= 2; 164 length -= 2;
165 } 165 }
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 256f590f6bb1..3a7ebfdda57d 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -547,8 +547,8 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
547 /* Make certain the data structures used by the LANCE are aligned and DMAble. */ 547 /* Make certain the data structures used by the LANCE are aligned and DMAble. */
548 548
549 lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL); 549 lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
550 if(lp==NULL) 550 if (!lp)
551 return -ENODEV; 551 return -ENOMEM;
552 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp); 552 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
553 dev->ml_priv = lp; 553 dev->ml_priv = lp;
554 lp->name = chipname; 554 lp->name = chipname;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index a4799c1fc7d4..5eb9b20c0eea 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -628,6 +628,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
628 int ret; 628 int ret;
629 629
630 ring = pdata->rx_ring; 630 ring = pdata->rx_ring;
631 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
631 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, 632 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
632 IRQF_SHARED, ring->irq_name, ring); 633 IRQF_SHARED, ring->irq_name, ring);
633 if (ret) 634 if (ret)
@@ -635,6 +636,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
635 636
636 if (pdata->cq_cnt) { 637 if (pdata->cq_cnt) {
637 ring = pdata->tx_ring->cp_ring; 638 ring = pdata->tx_ring->cp_ring;
639 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
638 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, 640 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
639 IRQF_SHARED, ring->irq_name, ring); 641 IRQF_SHARED, ring->irq_name, ring);
640 if (ret) { 642 if (ret) {
@@ -649,15 +651,19 @@ static int xgene_enet_register_irq(struct net_device *ndev)
649static void xgene_enet_free_irq(struct net_device *ndev) 651static void xgene_enet_free_irq(struct net_device *ndev)
650{ 652{
651 struct xgene_enet_pdata *pdata; 653 struct xgene_enet_pdata *pdata;
654 struct xgene_enet_desc_ring *ring;
652 struct device *dev; 655 struct device *dev;
653 656
654 pdata = netdev_priv(ndev); 657 pdata = netdev_priv(ndev);
655 dev = ndev_to_dev(ndev); 658 dev = ndev_to_dev(ndev);
656 devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring); 659 ring = pdata->rx_ring;
660 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
661 devm_free_irq(dev, ring->irq, ring);
657 662
658 if (pdata->cq_cnt) { 663 if (pdata->cq_cnt) {
659 devm_free_irq(dev, pdata->tx_ring->cp_ring->irq, 664 ring = pdata->tx_ring->cp_ring;
660 pdata->tx_ring->cp_ring); 665 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
666 devm_free_irq(dev, ring->irq, ring);
661 } 667 }
662} 668}
663 669
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 70d5b62c125a..248dfc40a761 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -25,6 +25,7 @@
25#include <linux/acpi.h> 25#include <linux/acpi.h>
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/efi.h> 27#include <linux/efi.h>
28#include <linux/irq.h>
28#include <linux/io.h> 29#include <linux/io.h>
29#include <linux/of_platform.h> 30#include <linux/of_platform.h>
30#include <linux/of_net.h> 31#include <linux/of_net.h>
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index abe1eabc0171..6446af1403f7 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -163,7 +163,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
163 struct sk_buff *skb = tx_buff->skb; 163 struct sk_buff *skb = tx_buff->skb;
164 unsigned int info = le32_to_cpu(txbd->info); 164 unsigned int info = le32_to_cpu(txbd->info);
165 165
166 if ((info & FOR_EMAC) || !txbd->data) 166 if ((info & FOR_EMAC) || !txbd->data || !skb)
167 break; 167 break;
168 168
169 if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) { 169 if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) {
@@ -191,6 +191,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
191 191
192 txbd->data = 0; 192 txbd->data = 0;
193 txbd->info = 0; 193 txbd->info = 0;
194 tx_buff->skb = NULL;
194 195
195 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; 196 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
196 } 197 }
@@ -446,6 +447,9 @@ static int arc_emac_open(struct net_device *ndev)
446 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; 447 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
447 } 448 }
448 449
450 priv->txbd_curr = 0;
451 priv->txbd_dirty = 0;
452
449 /* Clean Tx BD's */ 453 /* Clean Tx BD's */
450 memset(priv->txbd, 0, TX_RING_SZ); 454 memset(priv->txbd, 0, TX_RING_SZ);
451 455
@@ -514,6 +518,64 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
514} 518}
515 519
516/** 520/**
521 * arc_free_tx_queue - free skb from tx queue
522 * @ndev: Pointer to the network device.
523 *
524 * This function must be called while EMAC disable
525 */
526static void arc_free_tx_queue(struct net_device *ndev)
527{
528 struct arc_emac_priv *priv = netdev_priv(ndev);
529 unsigned int i;
530
531 for (i = 0; i < TX_BD_NUM; i++) {
532 struct arc_emac_bd *txbd = &priv->txbd[i];
533 struct buffer_state *tx_buff = &priv->tx_buff[i];
534
535 if (tx_buff->skb) {
536 dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr),
537 dma_unmap_len(tx_buff, len), DMA_TO_DEVICE);
538
539 /* return the sk_buff to system */
540 dev_kfree_skb_irq(tx_buff->skb);
541 }
542
543 txbd->info = 0;
544 txbd->data = 0;
545 tx_buff->skb = NULL;
546 }
547}
548
549/**
550 * arc_free_rx_queue - free skb from rx queue
551 * @ndev: Pointer to the network device.
552 *
553 * This function must be called while EMAC disable
554 */
555static void arc_free_rx_queue(struct net_device *ndev)
556{
557 struct arc_emac_priv *priv = netdev_priv(ndev);
558 unsigned int i;
559
560 for (i = 0; i < RX_BD_NUM; i++) {
561 struct arc_emac_bd *rxbd = &priv->rxbd[i];
562 struct buffer_state *rx_buff = &priv->rx_buff[i];
563
564 if (rx_buff->skb) {
565 dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
566 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
567
568 /* return the sk_buff to system */
569 dev_kfree_skb_irq(rx_buff->skb);
570 }
571
572 rxbd->info = 0;
573 rxbd->data = 0;
574 rx_buff->skb = NULL;
575 }
576}
577
578/**
517 * arc_emac_stop - Close the network device. 579 * arc_emac_stop - Close the network device.
518 * @ndev: Pointer to the network device. 580 * @ndev: Pointer to the network device.
519 * 581 *
@@ -534,6 +596,10 @@ static int arc_emac_stop(struct net_device *ndev)
534 /* Disable EMAC */ 596 /* Disable EMAC */
535 arc_reg_clr(priv, R_CTRL, EN_MASK); 597 arc_reg_clr(priv, R_CTRL, EN_MASK);
536 598
599 /* Return the sk_buff to system */
600 arc_free_tx_queue(ndev);
601 arc_free_rx_queue(ndev);
602
537 return 0; 603 return 0;
538} 604}
539 605
@@ -610,7 +676,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
610 dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr); 676 dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr);
611 dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len); 677 dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len);
612 678
613 priv->tx_buff[*txbd_curr].skb = skb;
614 priv->txbd[*txbd_curr].data = cpu_to_le32(addr); 679 priv->txbd[*txbd_curr].data = cpu_to_le32(addr);
615 680
616 /* Make sure pointer to data buffer is set */ 681 /* Make sure pointer to data buffer is set */
@@ -620,6 +685,11 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
620 685
621 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); 686 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
622 687
688 /* Make sure info word is set */
689 wmb();
690
691 priv->tx_buff[*txbd_curr].skb = skb;
692
623 /* Increment index to point to the next BD */ 693 /* Increment index to point to the next BD */
624 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; 694 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
625 695
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index ecc4a334c507..08a23e6b60e9 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -302,7 +302,7 @@ static int nb8800_poll(struct napi_struct *napi, int budget)
302 nb8800_tx_done(dev); 302 nb8800_tx_done(dev);
303 303
304again: 304again:
305 while (work < budget) { 305 do {
306 struct nb8800_rx_buf *rxb; 306 struct nb8800_rx_buf *rxb;
307 unsigned int len; 307 unsigned int len;
308 308
@@ -330,7 +330,7 @@ again:
330 rxd->report = 0; 330 rxd->report = 0;
331 last = next; 331 last = next;
332 work++; 332 work++;
333 } 333 } while (work < budget);
334 334
335 if (work) { 335 if (work) {
336 priv->rx_descs[last].desc.config |= DESC_EOC; 336 priv->rx_descs[last].desc.config |= DESC_EOC;
@@ -1460,7 +1460,19 @@ static int nb8800_probe(struct platform_device *pdev)
1460 goto err_disable_clk; 1460 goto err_disable_clk;
1461 } 1461 }
1462 1462
1463 priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 1463 if (of_phy_is_fixed_link(pdev->dev.of_node)) {
1464 ret = of_phy_register_fixed_link(pdev->dev.of_node);
1465 if (ret < 0) {
1466 dev_err(&pdev->dev, "bad fixed-link spec\n");
1467 goto err_free_bus;
1468 }
1469 priv->phy_node = of_node_get(pdev->dev.of_node);
1470 }
1471
1472 if (!priv->phy_node)
1473 priv->phy_node = of_parse_phandle(pdev->dev.of_node,
1474 "phy-handle", 0);
1475
1464 if (!priv->phy_node) { 1476 if (!priv->phy_node) {
1465 dev_err(&pdev->dev, "no PHY specified\n"); 1477 dev_err(&pdev->dev, "no PHY specified\n");
1466 ret = -ENODEV; 1478 ret = -ENODEV;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 8550df189ceb..19f7cd02e085 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -151,8 +151,11 @@ config BNX2X_VXLAN
151 151
152config BGMAC 152config BGMAC
153 tristate "BCMA bus GBit core support" 153 tristate "BCMA bus GBit core support"
154 depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X) 154 depends on BCMA && BCMA_HOST_SOC
155 depends on HAS_DMA
156 depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST
155 select PHYLIB 157 select PHYLIB
158 select FIXED_PHY
156 ---help--- 159 ---help---
157 This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus. 160 This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
158 They can be found on BCM47xx SoCs and provide gigabit ethernet. 161 They can be found on BCM47xx SoCs and provide gigabit ethernet.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 27aa0802d87d..91874d24fd56 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -4896,9 +4896,9 @@ struct c2s_pri_trans_table_entry {
4896 * cfc delete event data 4896 * cfc delete event data
4897 */ 4897 */
4898struct cfc_del_event_data { 4898struct cfc_del_event_data {
4899 u32 cid; 4899 __le32 cid;
4900 u32 reserved0; 4900 __le32 reserved0;
4901 u32 reserved1; 4901 __le32 reserved1;
4902}; 4902};
4903 4903
4904 4904
@@ -5114,15 +5114,9 @@ struct vf_pf_channel_zone_trigger {
5114 * zone that triggers the in-bound interrupt 5114 * zone that triggers the in-bound interrupt
5115 */ 5115 */
5116struct trigger_vf_zone { 5116struct trigger_vf_zone {
5117#if defined(__BIG_ENDIAN)
5118 u16 reserved1;
5119 u8 reserved0;
5120 struct vf_pf_channel_zone_trigger vf_pf_channel;
5121#elif defined(__LITTLE_ENDIAN)
5122 struct vf_pf_channel_zone_trigger vf_pf_channel; 5117 struct vf_pf_channel_zone_trigger vf_pf_channel;
5123 u8 reserved0; 5118 u8 reserved0;
5124 u16 reserved1; 5119 u16 reserved1;
5125#endif
5126 u32 reserved2; 5120 u32 reserved2;
5127}; 5121};
5128 5122
@@ -5207,9 +5201,9 @@ struct e2_integ_data {
5207 * set mac event data 5201 * set mac event data
5208 */ 5202 */
5209struct eth_event_data { 5203struct eth_event_data {
5210 u32 echo; 5204 __le32 echo;
5211 u32 reserved0; 5205 __le32 reserved0;
5212 u32 reserved1; 5206 __le32 reserved1;
5213}; 5207};
5214 5208
5215 5209
@@ -5219,9 +5213,9 @@ struct eth_event_data {
5219struct vf_pf_event_data { 5213struct vf_pf_event_data {
5220 u8 vf_id; 5214 u8 vf_id;
5221 u8 reserved0; 5215 u8 reserved0;
5222 u16 reserved1; 5216 __le16 reserved1;
5223 u32 msg_addr_lo; 5217 __le32 msg_addr_lo;
5224 u32 msg_addr_hi; 5218 __le32 msg_addr_hi;
5225}; 5219};
5226 5220
5227/* 5221/*
@@ -5230,9 +5224,9 @@ struct vf_pf_event_data {
5230struct vf_flr_event_data { 5224struct vf_flr_event_data {
5231 u8 vf_id; 5225 u8 vf_id;
5232 u8 reserved0; 5226 u8 reserved0;
5233 u16 reserved1; 5227 __le16 reserved1;
5234 u32 reserved2; 5228 __le32 reserved2;
5235 u32 reserved3; 5229 __le32 reserved3;
5236}; 5230};
5237 5231
5238/* 5232/*
@@ -5241,9 +5235,9 @@ struct vf_flr_event_data {
5241struct malicious_vf_event_data { 5235struct malicious_vf_event_data {
5242 u8 vf_id; 5236 u8 vf_id;
5243 u8 err_id; 5237 u8 err_id;
5244 u16 reserved1; 5238 __le16 reserved1;
5245 u32 reserved2; 5239 __le32 reserved2;
5246 u32 reserved3; 5240 __le32 reserved3;
5247}; 5241};
5248 5242
5249/* 5243/*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index d946bba43726..1fb80100e5e7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6185,26 +6185,80 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
6185 shift -= 4; 6185 shift -= 4;
6186 digit = ((num & mask) >> shift); 6186 digit = ((num & mask) >> shift);
6187 if (digit == 0 && remove_leading_zeros) { 6187 if (digit == 0 && remove_leading_zeros) {
6188 mask = mask >> 4; 6188 *str_ptr = '0';
6189 continue; 6189 } else {
6190 } else if (digit < 0xa) 6190 if (digit < 0xa)
6191 *str_ptr = digit + '0'; 6191 *str_ptr = digit + '0';
6192 else 6192 else
6193 *str_ptr = digit - 0xa + 'a'; 6193 *str_ptr = digit - 0xa + 'a';
6194 remove_leading_zeros = 0; 6194
6195 str_ptr++; 6195 remove_leading_zeros = 0;
6196 (*len)--; 6196 str_ptr++;
6197 (*len)--;
6198 }
6197 mask = mask >> 4; 6199 mask = mask >> 4;
6198 if (shift == 4*4) { 6200 if (shift == 4*4) {
6201 if (remove_leading_zeros) {
6202 str_ptr++;
6203 (*len)--;
6204 }
6199 *str_ptr = '.'; 6205 *str_ptr = '.';
6200 str_ptr++; 6206 str_ptr++;
6201 (*len)--; 6207 (*len)--;
6202 remove_leading_zeros = 1; 6208 remove_leading_zeros = 1;
6203 } 6209 }
6204 } 6210 }
6211 if (remove_leading_zeros)
6212 (*len)--;
6205 return 0; 6213 return 0;
6206} 6214}
6207 6215
6216static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
6217{
6218 u8 *str_ptr = str;
6219 u32 mask = 0x00f00000;
6220 u8 shift = 8*3;
6221 u8 digit;
6222 u8 remove_leading_zeros = 1;
6223
6224 if (*len < 10) {
6225 /* Need more than 10chars for this format */
6226 *str_ptr = '\0';
6227 (*len)--;
6228 return -EINVAL;
6229 }
6230
6231 while (shift > 0) {
6232 shift -= 4;
6233 digit = ((num & mask) >> shift);
6234 if (digit == 0 && remove_leading_zeros) {
6235 *str_ptr = '0';
6236 } else {
6237 if (digit < 0xa)
6238 *str_ptr = digit + '0';
6239 else
6240 *str_ptr = digit - 0xa + 'a';
6241
6242 remove_leading_zeros = 0;
6243 str_ptr++;
6244 (*len)--;
6245 }
6246 mask = mask >> 4;
6247 if ((shift == 4*4) || (shift == 4*2)) {
6248 if (remove_leading_zeros) {
6249 str_ptr++;
6250 (*len)--;
6251 }
6252 *str_ptr = '.';
6253 str_ptr++;
6254 (*len)--;
6255 remove_leading_zeros = 1;
6256 }
6257 }
6258 if (remove_leading_zeros)
6259 (*len)--;
6260 return 0;
6261}
6208 6262
6209static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len) 6263static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
6210{ 6264{
@@ -9677,8 +9731,9 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
9677 9731
9678 if (bnx2x_is_8483x_8485x(phy)) { 9732 if (bnx2x_is_8483x_8485x(phy)) {
9679 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); 9733 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
9680 bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff, 9734 if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
9681 phy->ver_addr); 9735 fw_ver1 &= 0xfff;
9736 bnx2x_save_spirom_version(bp, port, fw_ver1, phy->ver_addr);
9682 } else { 9737 } else {
9683 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ 9738 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
9684 /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ 9739 /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
@@ -9732,16 +9787,32 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
9732static void bnx2x_848xx_set_led(struct bnx2x *bp, 9787static void bnx2x_848xx_set_led(struct bnx2x *bp,
9733 struct bnx2x_phy *phy) 9788 struct bnx2x_phy *phy)
9734{ 9789{
9735 u16 val, offset, i; 9790 u16 val, led3_blink_rate, offset, i;
9736 static struct bnx2x_reg_set reg_set[] = { 9791 static struct bnx2x_reg_set reg_set[] = {
9737 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080}, 9792 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080},
9738 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018}, 9793 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018},
9739 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006}, 9794 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006},
9740 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000},
9741 {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH, 9795 {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
9742 MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ}, 9796 MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ},
9743 {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD} 9797 {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD}
9744 }; 9798 };
9799
9800 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
9801 /* Set LED5 source */
9802 bnx2x_cl45_write(bp, phy,
9803 MDIO_PMA_DEVAD,
9804 MDIO_PMA_REG_8481_LED5_MASK,
9805 0x90);
9806 led3_blink_rate = 0x000f;
9807 } else {
9808 led3_blink_rate = 0x0000;
9809 }
9810 /* Set LED3 BLINK */
9811 bnx2x_cl45_write(bp, phy,
9812 MDIO_PMA_DEVAD,
9813 MDIO_PMA_REG_8481_LED3_BLINK,
9814 led3_blink_rate);
9815
9745 /* PHYC_CTL_LED_CTL */ 9816 /* PHYC_CTL_LED_CTL */
9746 bnx2x_cl45_read(bp, phy, 9817 bnx2x_cl45_read(bp, phy,
9747 MDIO_PMA_DEVAD, 9818 MDIO_PMA_DEVAD,
@@ -9749,6 +9820,9 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
9749 val &= 0xFE00; 9820 val &= 0xFE00;
9750 val |= 0x0092; 9821 val |= 0x0092;
9751 9822
9823 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
9824 val |= 2 << 12; /* LED5 ON based on source */
9825
9752 bnx2x_cl45_write(bp, phy, 9826 bnx2x_cl45_write(bp, phy,
9753 MDIO_PMA_DEVAD, 9827 MDIO_PMA_DEVAD,
9754 MDIO_PMA_REG_8481_LINK_SIGNAL, val); 9828 MDIO_PMA_REG_8481_LINK_SIGNAL, val);
@@ -9762,10 +9836,17 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
9762 else 9836 else
9763 offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1; 9837 offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
9764 9838
9765 /* stretch_en for LED3*/ 9839 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
9840 val = MDIO_PMA_REG_84858_ALLOW_GPHY_ACT |
9841 MDIO_PMA_REG_84823_LED3_STRETCH_EN;
9842 else
9843 val = MDIO_PMA_REG_84823_LED3_STRETCH_EN;
9844
9845 /* stretch_en for LEDs */
9766 bnx2x_cl45_read_or_write(bp, phy, 9846 bnx2x_cl45_read_or_write(bp, phy,
9767 MDIO_PMA_DEVAD, offset, 9847 MDIO_PMA_DEVAD,
9768 MDIO_PMA_REG_84823_LED3_STRETCH_EN); 9848 offset,
9849 val);
9769} 9850}
9770 9851
9771static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy, 9852static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
@@ -9775,7 +9856,7 @@ static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
9775 struct bnx2x *bp = params->bp; 9856 struct bnx2x *bp = params->bp;
9776 switch (action) { 9857 switch (action) {
9777 case PHY_INIT: 9858 case PHY_INIT:
9778 if (!bnx2x_is_8483x_8485x(phy)) { 9859 if (bnx2x_is_8483x_8485x(phy)) {
9779 /* Save spirom version */ 9860 /* Save spirom version */
9780 bnx2x_save_848xx_spirom_version(phy, bp, params->port); 9861 bnx2x_save_848xx_spirom_version(phy, bp, params->port);
9781 } 9862 }
@@ -10036,15 +10117,20 @@ static int bnx2x_84858_cmd_hdlr(struct bnx2x_phy *phy,
10036 10117
10037static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, 10118static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
10038 struct link_params *params, u16 fw_cmd, 10119 struct link_params *params, u16 fw_cmd,
10039 u16 cmd_args[], int argc) 10120 u16 cmd_args[], int argc, int process)
10040{ 10121{
10041 int idx; 10122 int idx;
10042 u16 val; 10123 u16 val;
10043 struct bnx2x *bp = params->bp; 10124 struct bnx2x *bp = params->bp;
10044 /* Write CMD_OPEN_OVERRIDE to STATUS reg */ 10125 int rc = 0;
10045 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10126
10046 MDIO_848xx_CMD_HDLR_STATUS, 10127 if (process == PHY84833_MB_PROCESS2) {
10047 PHY84833_STATUS_CMD_OPEN_OVERRIDE); 10128 /* Write CMD_OPEN_OVERRIDE to STATUS reg */
10129 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10130 MDIO_848xx_CMD_HDLR_STATUS,
10131 PHY84833_STATUS_CMD_OPEN_OVERRIDE);
10132 }
10133
10048 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { 10134 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
10049 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 10135 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
10050 MDIO_848xx_CMD_HDLR_STATUS, &val); 10136 MDIO_848xx_CMD_HDLR_STATUS, &val);
@@ -10054,15 +10140,27 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
10054 } 10140 }
10055 if (idx >= PHY848xx_CMDHDLR_WAIT) { 10141 if (idx >= PHY848xx_CMDHDLR_WAIT) {
10056 DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); 10142 DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
10143 /* if the status is CMD_COMPLETE_PASS or CMD_COMPLETE_ERROR
10144 * clear the status to CMD_CLEAR_COMPLETE
10145 */
10146 if (val == PHY84833_STATUS_CMD_COMPLETE_PASS ||
10147 val == PHY84833_STATUS_CMD_COMPLETE_ERROR) {
10148 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10149 MDIO_848xx_CMD_HDLR_STATUS,
10150 PHY84833_STATUS_CMD_CLEAR_COMPLETE);
10151 }
10057 return -EINVAL; 10152 return -EINVAL;
10058 } 10153 }
10059 10154 if (process == PHY84833_MB_PROCESS1 ||
10060 /* Prepare argument(s) and issue command */ 10155 process == PHY84833_MB_PROCESS2) {
10061 for (idx = 0; idx < argc; idx++) { 10156 /* Prepare argument(s) */
10062 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10157 for (idx = 0; idx < argc; idx++) {
10063 MDIO_848xx_CMD_HDLR_DATA1 + idx, 10158 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10064 cmd_args[idx]); 10159 MDIO_848xx_CMD_HDLR_DATA1 + idx,
10160 cmd_args[idx]);
10161 }
10065 } 10162 }
10163
10066 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10164 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10067 MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd); 10165 MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
10068 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { 10166 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
@@ -10076,24 +10174,30 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
10076 if ((idx >= PHY848xx_CMDHDLR_WAIT) || 10174 if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
10077 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { 10175 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
10078 DP(NETIF_MSG_LINK, "FW cmd failed.\n"); 10176 DP(NETIF_MSG_LINK, "FW cmd failed.\n");
10079 return -EINVAL; 10177 rc = -EINVAL;
10080 } 10178 }
10081 /* Gather returning data */ 10179 if (process == PHY84833_MB_PROCESS3 && rc == 0) {
10082 for (idx = 0; idx < argc; idx++) { 10180 /* Gather returning data */
10083 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 10181 for (idx = 0; idx < argc; idx++) {
10084 MDIO_848xx_CMD_HDLR_DATA1 + idx, 10182 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
10085 &cmd_args[idx]); 10183 MDIO_848xx_CMD_HDLR_DATA1 + idx,
10184 &cmd_args[idx]);
10185 }
10086 } 10186 }
10087 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10187 if (val == PHY84833_STATUS_CMD_COMPLETE_ERROR ||
10088 MDIO_848xx_CMD_HDLR_STATUS, 10188 val == PHY84833_STATUS_CMD_COMPLETE_PASS) {
10089 PHY84833_STATUS_CMD_CLEAR_COMPLETE); 10189 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10090 return 0; 10190 MDIO_848xx_CMD_HDLR_STATUS,
10191 PHY84833_STATUS_CMD_CLEAR_COMPLETE);
10192 }
10193 return rc;
10091} 10194}
10092 10195
10093static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy, 10196static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
10094 struct link_params *params, 10197 struct link_params *params,
10095 u16 fw_cmd, 10198 u16 fw_cmd,
10096 u16 cmd_args[], int argc) 10199 u16 cmd_args[], int argc,
10200 int process)
10097{ 10201{
10098 struct bnx2x *bp = params->bp; 10202 struct bnx2x *bp = params->bp;
10099 10203
@@ -10106,7 +10210,7 @@ static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
10106 argc); 10210 argc);
10107 } else { 10211 } else {
10108 return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args, 10212 return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args,
10109 argc); 10213 argc, process);
10110 } 10214 }
10111} 10215}
10112 10216
@@ -10133,7 +10237,7 @@ static int bnx2x_848xx_pair_swap_cfg(struct bnx2x_phy *phy,
10133 10237
10134 status = bnx2x_848xx_cmd_hdlr(phy, params, 10238 status = bnx2x_848xx_cmd_hdlr(phy, params,
10135 PHY848xx_CMD_SET_PAIR_SWAP, data, 10239 PHY848xx_CMD_SET_PAIR_SWAP, data,
10136 PHY848xx_CMDHDLR_MAX_ARGS); 10240 2, PHY84833_MB_PROCESS2);
10137 if (status == 0) 10241 if (status == 0)
10138 DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]); 10242 DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
10139 10243
@@ -10222,8 +10326,8 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
10222 DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n"); 10326 DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
10223 10327
10224 /* Prevent Phy from working in EEE and advertising it */ 10328 /* Prevent Phy from working in EEE and advertising it */
10225 rc = bnx2x_848xx_cmd_hdlr(phy, params, 10329 rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE,
10226 PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); 10330 &cmd_args, 1, PHY84833_MB_PROCESS1);
10227 if (rc) { 10331 if (rc) {
10228 DP(NETIF_MSG_LINK, "EEE disable failed.\n"); 10332 DP(NETIF_MSG_LINK, "EEE disable failed.\n");
10229 return rc; 10333 return rc;
@@ -10240,8 +10344,8 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
10240 struct bnx2x *bp = params->bp; 10344 struct bnx2x *bp = params->bp;
10241 u16 cmd_args = 1; 10345 u16 cmd_args = 1;
10242 10346
10243 rc = bnx2x_848xx_cmd_hdlr(phy, params, 10347 rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE,
10244 PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); 10348 &cmd_args, 1, PHY84833_MB_PROCESS1);
10245 if (rc) { 10349 if (rc) {
10246 DP(NETIF_MSG_LINK, "EEE enable failed.\n"); 10350 DP(NETIF_MSG_LINK, "EEE enable failed.\n");
10247 return rc; 10351 return rc;
@@ -10362,7 +10466,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
10362 cmd_args[3] = PHY84833_CONSTANT_LATENCY; 10466 cmd_args[3] = PHY84833_CONSTANT_LATENCY;
10363 rc = bnx2x_848xx_cmd_hdlr(phy, params, 10467 rc = bnx2x_848xx_cmd_hdlr(phy, params,
10364 PHY848xx_CMD_SET_EEE_MODE, cmd_args, 10468 PHY848xx_CMD_SET_EEE_MODE, cmd_args,
10365 PHY848xx_CMDHDLR_MAX_ARGS); 10469 4, PHY84833_MB_PROCESS1);
10366 if (rc) 10470 if (rc)
10367 DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); 10471 DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
10368 } 10472 }
@@ -10416,6 +10520,32 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
10416 vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; 10520 vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
10417 } 10521 }
10418 10522
10523 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
10524 /* Additional settings for jumbo packets in 1000BASE-T mode */
10525 /* Allow rx extended length */
10526 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10527 MDIO_AN_REG_8481_AUX_CTRL, &val);
10528 val |= 0x4000;
10529 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10530 MDIO_AN_REG_8481_AUX_CTRL, val);
10531 /* TX FIFO Elasticity LSB */
10532 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10533 MDIO_AN_REG_8481_1G_100T_EXT_CTRL, &val);
10534 val |= 0x1;
10535 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10536 MDIO_AN_REG_8481_1G_100T_EXT_CTRL, val);
10537 /* TX FIFO Elasticity MSB */
10538 /* Enable expansion register 0x46 (Pattern Generator status) */
10539 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10540 MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf46);
10541
10542 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10543 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, &val);
10544 val |= 0x4000;
10545 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10546 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, val);
10547 }
10548
10419 if (bnx2x_is_8483x_8485x(phy)) { 10549 if (bnx2x_is_8483x_8485x(phy)) {
10420 /* Bring PHY out of super isolate mode as the final step. */ 10550 /* Bring PHY out of super isolate mode as the final step. */
10421 bnx2x_cl45_read_and_write(bp, phy, 10551 bnx2x_cl45_read_and_write(bp, phy,
@@ -10555,6 +10685,17 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
10555 return link_up; 10685 return link_up;
10556} 10686}
10557 10687
10688static int bnx2x_8485x_format_ver(u32 raw_ver, u8 *str, u16 *len)
10689{
10690 int status = 0;
10691 u32 num;
10692
10693 num = ((raw_ver & 0xF80) >> 7) << 16 | ((raw_ver & 0x7F) << 8) |
10694 ((raw_ver & 0xF000) >> 12);
10695 status = bnx2x_3_seq_format_ver(num, str, len);
10696 return status;
10697}
10698
10558static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len) 10699static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
10559{ 10700{
10560 int status = 0; 10701 int status = 0;
@@ -10651,10 +10792,25 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10651 0x0); 10792 0x0);
10652 10793
10653 } else { 10794 } else {
10795 /* LED 1 OFF */
10654 bnx2x_cl45_write(bp, phy, 10796 bnx2x_cl45_write(bp, phy,
10655 MDIO_PMA_DEVAD, 10797 MDIO_PMA_DEVAD,
10656 MDIO_PMA_REG_8481_LED1_MASK, 10798 MDIO_PMA_REG_8481_LED1_MASK,
10657 0x0); 10799 0x0);
10800
10801 if (phy->type ==
10802 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
10803 /* LED 2 OFF */
10804 bnx2x_cl45_write(bp, phy,
10805 MDIO_PMA_DEVAD,
10806 MDIO_PMA_REG_8481_LED2_MASK,
10807 0x0);
10808 /* LED 3 OFF */
10809 bnx2x_cl45_write(bp, phy,
10810 MDIO_PMA_DEVAD,
10811 MDIO_PMA_REG_8481_LED3_MASK,
10812 0x0);
10813 }
10658 } 10814 }
10659 break; 10815 break;
10660 case LED_MODE_FRONT_PANEL_OFF: 10816 case LED_MODE_FRONT_PANEL_OFF:
@@ -10713,6 +10869,19 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10713 MDIO_PMA_REG_8481_SIGNAL_MASK, 10869 MDIO_PMA_REG_8481_SIGNAL_MASK,
10714 0x0); 10870 0x0);
10715 } 10871 }
10872 if (phy->type ==
10873 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
10874 /* LED 2 OFF */
10875 bnx2x_cl45_write(bp, phy,
10876 MDIO_PMA_DEVAD,
10877 MDIO_PMA_REG_8481_LED2_MASK,
10878 0x0);
10879 /* LED 3 OFF */
10880 bnx2x_cl45_write(bp, phy,
10881 MDIO_PMA_DEVAD,
10882 MDIO_PMA_REG_8481_LED3_MASK,
10883 0x0);
10884 }
10716 } 10885 }
10717 break; 10886 break;
10718 case LED_MODE_ON: 10887 case LED_MODE_ON:
@@ -10776,6 +10945,25 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10776 params->port*4, 10945 params->port*4,
10777 NIG_MASK_MI_INT); 10946 NIG_MASK_MI_INT);
10778 } 10947 }
10948 }
10949 if (phy->type ==
10950 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
10951 /* Tell LED3 to constant on */
10952 bnx2x_cl45_read(bp, phy,
10953 MDIO_PMA_DEVAD,
10954 MDIO_PMA_REG_8481_LINK_SIGNAL,
10955 &val);
10956 val &= ~(7<<6);
10957 val |= (2<<6); /* A83B[8:6]= 2 */
10958 bnx2x_cl45_write(bp, phy,
10959 MDIO_PMA_DEVAD,
10960 MDIO_PMA_REG_8481_LINK_SIGNAL,
10961 val);
10962 bnx2x_cl45_write(bp, phy,
10963 MDIO_PMA_DEVAD,
10964 MDIO_PMA_REG_8481_LED3_MASK,
10965 0x20);
10966 } else {
10779 bnx2x_cl45_write(bp, phy, 10967 bnx2x_cl45_write(bp, phy,
10780 MDIO_PMA_DEVAD, 10968 MDIO_PMA_DEVAD,
10781 MDIO_PMA_REG_8481_SIGNAL_MASK, 10969 MDIO_PMA_REG_8481_SIGNAL_MASK,
@@ -10854,6 +11042,17 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10854 MDIO_PMA_REG_8481_LINK_SIGNAL, 11042 MDIO_PMA_REG_8481_LINK_SIGNAL,
10855 val); 11043 val);
10856 if (phy->type == 11044 if (phy->type ==
11045 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
11046 bnx2x_cl45_write(bp, phy,
11047 MDIO_PMA_DEVAD,
11048 MDIO_PMA_REG_8481_LED2_MASK,
11049 0x18);
11050 bnx2x_cl45_write(bp, phy,
11051 MDIO_PMA_DEVAD,
11052 MDIO_PMA_REG_8481_LED3_MASK,
11053 0x06);
11054 }
11055 if (phy->type ==
10857 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { 11056 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
10858 /* Restore LED4 source to external link, 11057 /* Restore LED4 source to external link,
10859 * and re-enable interrupts. 11058 * and re-enable interrupts.
@@ -11982,7 +12181,7 @@ static const struct bnx2x_phy phy_84858 = {
11982 .read_status = (read_status_t)bnx2x_848xx_read_status, 12181 .read_status = (read_status_t)bnx2x_848xx_read_status,
11983 .link_reset = (link_reset_t)bnx2x_848x3_link_reset, 12182 .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
11984 .config_loopback = (config_loopback_t)NULL, 12183 .config_loopback = (config_loopback_t)NULL,
11985 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, 12184 .format_fw_ver = (format_fw_ver_t)bnx2x_8485x_format_ver,
11986 .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, 12185 .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
11987 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, 12186 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
11988 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func 12187 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
@@ -13807,8 +14006,10 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
13807 if (CHIP_IS_E3(bp)) { 14006 if (CHIP_IS_E3(bp)) {
13808 struct bnx2x_phy *phy = &params->phy[INT_PHY]; 14007 struct bnx2x_phy *phy = &params->phy[INT_PHY];
13809 bnx2x_set_aer_mmd(params, phy); 14008 bnx2x_set_aer_mmd(params, phy);
13810 if ((phy->supported & SUPPORTED_20000baseKR2_Full) && 14009 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
13811 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) 14010 (phy->speed_cap_mask &
14011 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) ||
14012 (phy->req_line_speed == SPEED_20000))
13812 bnx2x_check_kr2_wa(params, vars, phy); 14013 bnx2x_check_kr2_wa(params, vars, phy);
13813 bnx2x_check_over_curr(params, vars); 14014 bnx2x_check_over_curr(params, vars);
13814 if (vars->rx_tx_asic_rst) 14015 if (vars->rx_tx_asic_rst)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 6c4e3a69976f..2bf9c871144f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -5280,14 +5280,14 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5280{ 5280{
5281 unsigned long ramrod_flags = 0; 5281 unsigned long ramrod_flags = 0;
5282 int rc = 0; 5282 int rc = 0;
5283 u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; 5283 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
5284 u32 cid = echo & BNX2X_SWCID_MASK;
5284 struct bnx2x_vlan_mac_obj *vlan_mac_obj; 5285 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
5285 5286
5286 /* Always push next commands out, don't wait here */ 5287 /* Always push next commands out, don't wait here */
5287 __set_bit(RAMROD_CONT, &ramrod_flags); 5288 __set_bit(RAMROD_CONT, &ramrod_flags);
5288 5289
5289 switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo) 5290 switch (echo >> BNX2X_SWCID_SHIFT) {
5290 >> BNX2X_SWCID_SHIFT) {
5291 case BNX2X_FILTER_MAC_PENDING: 5291 case BNX2X_FILTER_MAC_PENDING:
5292 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); 5292 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
5293 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp))) 5293 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
@@ -5308,8 +5308,7 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5308 bnx2x_handle_mcast_eqe(bp); 5308 bnx2x_handle_mcast_eqe(bp);
5309 return; 5309 return;
5310 default: 5310 default:
5311 BNX2X_ERR("Unsupported classification command: %d\n", 5311 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
5312 elem->message.data.eth_event.echo);
5313 return; 5312 return;
5314 } 5313 }
5315 5314
@@ -5478,9 +5477,6 @@ static void bnx2x_eq_int(struct bnx2x *bp)
5478 goto next_spqe; 5477 goto next_spqe;
5479 } 5478 }
5480 5479
5481 /* elem CID originates from FW; actually LE */
5482 cid = SW_CID((__force __le32)
5483 elem->message.data.cfc_del_event.cid);
5484 opcode = elem->message.opcode; 5480 opcode = elem->message.opcode;
5485 5481
5486 /* handle eq element */ 5482 /* handle eq element */
@@ -5503,6 +5499,10 @@ static void bnx2x_eq_int(struct bnx2x *bp)
5503 * we may want to verify here that the bp state is 5499 * we may want to verify here that the bp state is
5504 * HALTING 5500 * HALTING
5505 */ 5501 */
5502
5503 /* elem CID originates from FW; actually LE */
5504 cid = SW_CID(elem->message.data.cfc_del_event.cid);
5505
5506 DP(BNX2X_MSG_SP, 5506 DP(BNX2X_MSG_SP,
5507 "got delete ramrod for MULTI[%d]\n", cid); 5507 "got delete ramrod for MULTI[%d]\n", cid);
5508 5508
@@ -5596,10 +5596,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
5596 BNX2X_STATE_OPENING_WAIT4_PORT): 5596 BNX2X_STATE_OPENING_WAIT4_PORT):
5597 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 5597 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5598 BNX2X_STATE_CLOSING_WAIT4_HALT): 5598 BNX2X_STATE_CLOSING_WAIT4_HALT):
5599 cid = elem->message.data.eth_event.echo &
5600 BNX2X_SWCID_MASK;
5601 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", 5599 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5602 cid); 5600 SW_CID(elem->message.data.eth_event.echo));
5603 rss_raw->clear_pending(rss_raw); 5601 rss_raw->clear_pending(rss_raw);
5604 break; 5602 break;
5605 5603
@@ -5684,7 +5682,7 @@ static void bnx2x_sp_task(struct work_struct *work)
5684 if (status & BNX2X_DEF_SB_IDX) { 5682 if (status & BNX2X_DEF_SB_IDX) {
5685 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 5683 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5686 5684
5687 if (FCOE_INIT(bp) && 5685 if (FCOE_INIT(bp) &&
5688 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 5686 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5689 /* Prevent local bottom-halves from running as 5687 /* Prevent local bottom-halves from running as
5690 * we are going to change the local NAPI list. 5688 * we are going to change the local NAPI list.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 4dead49bd5cb..a43dea259b12 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -7296,6 +7296,8 @@ Theotherbitsarereservedandshouldbezero*/
7296#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3 7296#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
7297#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec 7297#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec
7298#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 7298#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
7299/* BCM84858 only */
7300#define MDIO_PMA_REG_84858_ALLOW_GPHY_ACT 0x8000
7299 7301
7300/* BCM84833 only */ 7302/* BCM84833 only */
7301#define MDIO_84833_TOP_CFG_FW_REV 0x400f 7303#define MDIO_84833_TOP_CFG_FW_REV 0x400f
@@ -7337,6 +7339,10 @@ Theotherbitsarereservedandshouldbezero*/
7337#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040 7339#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040
7338#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080 7340#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080
7339#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5 7341#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5
7342/* Mailbox Process */
7343#define PHY84833_MB_PROCESS1 1
7344#define PHY84833_MB_PROCESS2 2
7345#define PHY84833_MB_PROCESS3 3
7340 7346
7341/* Mailbox status set used by 84858 only */ 7347/* Mailbox status set used by 84858 only */
7342#define PHY84858_STATUS_CMD_RECEIVED 0x0001 7348#define PHY84858_STATUS_CMD_RECEIVED 0x0001
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 9d027348cd09..632daff117d3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1672,11 +1672,12 @@ void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
1672{ 1672{
1673 unsigned long ramrod_flags = 0; 1673 unsigned long ramrod_flags = 0;
1674 int rc = 0; 1674 int rc = 0;
1675 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
1675 1676
1676 /* Always push next commands out, don't wait here */ 1677 /* Always push next commands out, don't wait here */
1677 set_bit(RAMROD_CONT, &ramrod_flags); 1678 set_bit(RAMROD_CONT, &ramrod_flags);
1678 1679
1679 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 1680 switch (echo >> BNX2X_SWCID_SHIFT) {
1680 case BNX2X_FILTER_MAC_PENDING: 1681 case BNX2X_FILTER_MAC_PENDING:
1681 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 1682 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
1682 &ramrod_flags); 1683 &ramrod_flags);
@@ -1686,8 +1687,7 @@ void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
1686 &ramrod_flags); 1687 &ramrod_flags);
1687 break; 1688 break;
1688 default: 1689 default:
1689 BNX2X_ERR("Unsupported classification command: %d\n", 1690 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
1690 elem->message.data.eth_event.echo);
1691 return; 1691 return;
1692 } 1692 }
1693 if (rc < 0) 1693 if (rc < 0)
@@ -1747,16 +1747,14 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
1747 1747
1748 switch (opcode) { 1748 switch (opcode) {
1749 case EVENT_RING_OPCODE_CFC_DEL: 1749 case EVENT_RING_OPCODE_CFC_DEL:
1750 cid = SW_CID((__force __le32) 1750 cid = SW_CID(elem->message.data.cfc_del_event.cid);
1751 elem->message.data.cfc_del_event.cid);
1752 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 1751 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
1753 break; 1752 break;
1754 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 1753 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1755 case EVENT_RING_OPCODE_MULTICAST_RULES: 1754 case EVENT_RING_OPCODE_MULTICAST_RULES:
1756 case EVENT_RING_OPCODE_FILTERS_RULES: 1755 case EVENT_RING_OPCODE_FILTERS_RULES:
1757 case EVENT_RING_OPCODE_RSS_UPDATE_RULES: 1756 case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1758 cid = (elem->message.data.eth_event.echo & 1757 cid = SW_CID(elem->message.data.eth_event.echo);
1759 BNX2X_SWCID_MASK);
1760 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 1758 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
1761 break; 1759 break;
1762 case EVENT_RING_OPCODE_VF_FLR: 1760 case EVENT_RING_OPCODE_VF_FLR:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 1374e5394a79..bfae300cf25f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -2187,8 +2187,10 @@ void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
2187 2187
2188 /* Update VFDB with current message and schedule its handling */ 2188 /* Update VFDB with current message and schedule its handling */
2189 mutex_lock(&BP_VFDB(bp)->event_mutex); 2189 mutex_lock(&BP_VFDB(bp)->event_mutex);
2190 BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi; 2190 BP_VF_MBX(bp, vf_idx)->vf_addr_hi =
2191 BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo; 2191 le32_to_cpu(vfpf_event->msg_addr_hi);
2192 BP_VF_MBX(bp, vf_idx)->vf_addr_lo =
2193 le32_to_cpu(vfpf_event->msg_addr_lo);
2192 BP_VFDB(bp)->event_occur |= (1ULL << vf_idx); 2194 BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
2193 mutex_unlock(&BP_VFDB(bp)->event_mutex); 2195 mutex_unlock(&BP_VFDB(bp)->event_mutex);
2194 2196
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index df835f5e46d8..82f191382989 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -69,7 +69,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
69#define BNXT_RX_DMA_OFFSET NET_SKB_PAD 69#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
70#define BNXT_RX_COPY_THRESH 256 70#define BNXT_RX_COPY_THRESH 256
71 71
72#define BNXT_TX_PUSH_THRESH 92 72#define BNXT_TX_PUSH_THRESH 164
73 73
74enum board_idx { 74enum board_idx {
75 BCM57301, 75 BCM57301,
@@ -223,11 +223,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
223 } 223 }
224 224
225 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { 225 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
226 struct tx_push_bd *push = txr->tx_push; 226 struct tx_push_buffer *tx_push_buf = txr->tx_push;
227 struct tx_bd *tx_push = &push->txbd1; 227 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
228 struct tx_bd_ext *tx_push1 = &push->txbd2; 228 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
229 void *pdata = tx_push1 + 1; 229 void *pdata = tx_push_buf->data;
230 int j; 230 u64 *end;
231 int j, push_len;
231 232
232 /* Set COAL_NOW to be ready quickly for the next push */ 233 /* Set COAL_NOW to be ready quickly for the next push */
233 tx_push->tx_bd_len_flags_type = 234 tx_push->tx_bd_len_flags_type =
@@ -247,6 +248,10 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
247 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 248 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
248 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action); 249 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
249 250
251 end = pdata + length;
252 end = PTR_ALIGN(end, 8) - 1;
253 *end = 0;
254
250 skb_copy_from_linear_data(skb, pdata, len); 255 skb_copy_from_linear_data(skb, pdata, len);
251 pdata += len; 256 pdata += len;
252 for (j = 0; j < last_frag; j++) { 257 for (j = 0; j < last_frag; j++) {
@@ -261,22 +266,29 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
261 pdata += skb_frag_size(frag); 266 pdata += skb_frag_size(frag);
262 } 267 }
263 268
264 memcpy(txbd, tx_push, sizeof(*txbd)); 269 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
270 txbd->tx_bd_haddr = txr->data_mapping;
265 prod = NEXT_TX(prod); 271 prod = NEXT_TX(prod);
266 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 272 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
267 memcpy(txbd, tx_push1, sizeof(*txbd)); 273 memcpy(txbd, tx_push1, sizeof(*txbd));
268 prod = NEXT_TX(prod); 274 prod = NEXT_TX(prod);
269 push->doorbell = 275 tx_push->doorbell =
270 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 276 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
271 txr->tx_prod = prod; 277 txr->tx_prod = prod;
272 278
273 netdev_tx_sent_queue(txq, skb->len); 279 netdev_tx_sent_queue(txq, skb->len);
274 280
275 __iowrite64_copy(txr->tx_doorbell, push, 281 push_len = (length + sizeof(*tx_push) + 7) / 8;
276 (length + sizeof(*push) + 8) / 8); 282 if (push_len > 16) {
283 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
284 __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
285 push_len - 16);
286 } else {
287 __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
288 push_len);
289 }
277 290
278 tx_buf->is_push = 1; 291 tx_buf->is_push = 1;
279
280 goto tx_done; 292 goto tx_done;
281 } 293 }
282 294
@@ -1490,10 +1502,11 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
1490 1502
1491 last = tx_buf->nr_frags; 1503 last = tx_buf->nr_frags;
1492 j += 2; 1504 j += 2;
1493 for (k = 0; k < last; k++, j = NEXT_TX(j)) { 1505 for (k = 0; k < last; k++, j++) {
1506 int ring_idx = j & bp->tx_ring_mask;
1494 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 1507 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
1495 1508
1496 tx_buf = &txr->tx_buf_ring[j]; 1509 tx_buf = &txr->tx_buf_ring[ring_idx];
1497 dma_unmap_page( 1510 dma_unmap_page(
1498 &pdev->dev, 1511 &pdev->dev,
1499 dma_unmap_addr(tx_buf, mapping), 1512 dma_unmap_addr(tx_buf, mapping),
@@ -1752,7 +1765,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
1752 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 1765 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
1753 bp->tx_push_thresh); 1766 bp->tx_push_thresh);
1754 1767
1755 if (push_size > 128) { 1768 if (push_size > 256) {
1756 push_size = 0; 1769 push_size = 0;
1757 bp->tx_push_thresh = 0; 1770 bp->tx_push_thresh = 0;
1758 } 1771 }
@@ -1771,7 +1784,6 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
1771 return rc; 1784 return rc;
1772 1785
1773 if (bp->tx_push_size) { 1786 if (bp->tx_push_size) {
1774 struct tx_bd *txbd;
1775 dma_addr_t mapping; 1787 dma_addr_t mapping;
1776 1788
1777 /* One pre-allocated DMA buffer to backup 1789 /* One pre-allocated DMA buffer to backup
@@ -1785,13 +1797,11 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
1785 if (!txr->tx_push) 1797 if (!txr->tx_push)
1786 return -ENOMEM; 1798 return -ENOMEM;
1787 1799
1788 txbd = &txr->tx_push->txbd1;
1789
1790 mapping = txr->tx_push_mapping + 1800 mapping = txr->tx_push_mapping +
1791 sizeof(struct tx_push_bd); 1801 sizeof(struct tx_push_bd);
1792 txbd->tx_bd_haddr = cpu_to_le64(mapping); 1802 txr->data_mapping = cpu_to_le64(mapping);
1793 1803
1794 memset(txbd + 1, 0, sizeof(struct tx_bd_ext)); 1804 memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
1795 } 1805 }
1796 ring->queue_id = bp->q_info[j].queue_id; 1806 ring->queue_id = bp->q_info[j].queue_id;
1797 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) 1807 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
@@ -3406,7 +3416,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
3406 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; 3416 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
3407 u16 error_code; 3417 u16 error_code;
3408 3418
3409 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1); 3419 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
3410 req.ring_type = ring_type; 3420 req.ring_type = ring_type;
3411 req.ring_id = cpu_to_le16(ring->fw_ring_id); 3421 req.ring_id = cpu_to_le16(ring->fw_ring_id);
3412 3422
@@ -4545,20 +4555,18 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
4545 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 4555 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4546 link_info->force_pause_setting != link_info->req_flow_ctrl) 4556 link_info->force_pause_setting != link_info->req_flow_ctrl)
4547 update_pause = true; 4557 update_pause = true;
4548 if (link_info->req_duplex != link_info->duplex_setting)
4549 update_link = true;
4550 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 4558 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
4551 if (BNXT_AUTO_MODE(link_info->auto_mode)) 4559 if (BNXT_AUTO_MODE(link_info->auto_mode))
4552 update_link = true; 4560 update_link = true;
4553 if (link_info->req_link_speed != link_info->force_link_speed) 4561 if (link_info->req_link_speed != link_info->force_link_speed)
4554 update_link = true; 4562 update_link = true;
4563 if (link_info->req_duplex != link_info->duplex_setting)
4564 update_link = true;
4555 } else { 4565 } else {
4556 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 4566 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
4557 update_link = true; 4567 update_link = true;
4558 if (link_info->advertising != link_info->auto_link_speeds) 4568 if (link_info->advertising != link_info->auto_link_speeds)
4559 update_link = true; 4569 update_link = true;
4560 if (link_info->req_link_speed != link_info->auto_link_speed)
4561 update_link = true;
4562 } 4570 }
4563 4571
4564 if (update_link) 4572 if (update_link)
@@ -4635,7 +4643,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4635 if (link_re_init) { 4643 if (link_re_init) {
4636 rc = bnxt_update_phy_setting(bp); 4644 rc = bnxt_update_phy_setting(bp);
4637 if (rc) 4645 if (rc)
4638 goto open_err; 4646 netdev_warn(bp->dev, "failed to update phy settings\n");
4639 } 4647 }
4640 4648
4641 if (irq_re_init) { 4649 if (irq_re_init) {
@@ -4653,6 +4661,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4653 /* Enable TX queues */ 4661 /* Enable TX queues */
4654 bnxt_tx_enable(bp); 4662 bnxt_tx_enable(bp);
4655 mod_timer(&bp->timer, jiffies + bp->current_interval); 4663 mod_timer(&bp->timer, jiffies + bp->current_interval);
4664 bnxt_update_link(bp, true);
4656 4665
4657 return 0; 4666 return 0;
4658 4667
@@ -4819,8 +4828,6 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
4819 4828
4820 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); 4829 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
4821 4830
4822 stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts);
4823
4824 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); 4831 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
4825 } 4832 }
4826 4833
@@ -5671,22 +5678,16 @@ static int bnxt_probe_phy(struct bnxt *bp)
5671 } 5678 }
5672 5679
5673 /*initialize the ethool setting copy with NVM settings */ 5680 /*initialize the ethool setting copy with NVM settings */
5674 if (BNXT_AUTO_MODE(link_info->auto_mode)) 5681 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
5675 link_info->autoneg |= BNXT_AUTONEG_SPEED; 5682 link_info->autoneg = BNXT_AUTONEG_SPEED |
5676 5683 BNXT_AUTONEG_FLOW_CTRL;
5677 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) { 5684 link_info->advertising = link_info->auto_link_speeds;
5678 if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH)
5679 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
5680 link_info->req_flow_ctrl = link_info->auto_pause_setting; 5685 link_info->req_flow_ctrl = link_info->auto_pause_setting;
5681 } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) { 5686 } else {
5687 link_info->req_link_speed = link_info->force_link_speed;
5688 link_info->req_duplex = link_info->duplex_setting;
5682 link_info->req_flow_ctrl = link_info->force_pause_setting; 5689 link_info->req_flow_ctrl = link_info->force_pause_setting;
5683 } 5690 }
5684 link_info->req_duplex = link_info->duplex_setting;
5685 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
5686 link_info->req_link_speed = link_info->auto_link_speed;
5687 else
5688 link_info->req_link_speed = link_info->force_link_speed;
5689 link_info->advertising = link_info->auto_link_speeds;
5690 snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d", 5691 snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
5691 link_info->phy_ver[0], 5692 link_info->phy_ver[0],
5692 link_info->phy_ver[1], 5693 link_info->phy_ver[1],
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 8af3ca8efcef..2be51b332652 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -411,8 +411,8 @@ struct rx_tpa_end_cmp_ext {
411 411
412#define BNXT_NUM_TESTS(bp) 0 412#define BNXT_NUM_TESTS(bp) 0
413 413
414#define BNXT_DEFAULT_RX_RING_SIZE 1023 414#define BNXT_DEFAULT_RX_RING_SIZE 511
415#define BNXT_DEFAULT_TX_RING_SIZE 512 415#define BNXT_DEFAULT_TX_RING_SIZE 511
416 416
417#define MAX_TPA 64 417#define MAX_TPA 64
418 418
@@ -523,10 +523,16 @@ struct bnxt_ring_struct {
523 523
524struct tx_push_bd { 524struct tx_push_bd {
525 __le32 doorbell; 525 __le32 doorbell;
526 struct tx_bd txbd1; 526 __le32 tx_bd_len_flags_type;
527 u32 tx_bd_opaque;
527 struct tx_bd_ext txbd2; 528 struct tx_bd_ext txbd2;
528}; 529};
529 530
531struct tx_push_buffer {
532 struct tx_push_bd push_bd;
533 u32 data[25];
534};
535
530struct bnxt_tx_ring_info { 536struct bnxt_tx_ring_info {
531 struct bnxt_napi *bnapi; 537 struct bnxt_napi *bnapi;
532 u16 tx_prod; 538 u16 tx_prod;
@@ -538,8 +544,9 @@ struct bnxt_tx_ring_info {
538 544
539 dma_addr_t tx_desc_mapping[MAX_TX_PAGES]; 545 dma_addr_t tx_desc_mapping[MAX_TX_PAGES];
540 546
541 struct tx_push_bd *tx_push; 547 struct tx_push_buffer *tx_push;
542 dma_addr_t tx_push_mapping; 548 dma_addr_t tx_push_mapping;
549 __le64 data_mapping;
543 550
544#define BNXT_DEV_STATE_CLOSING 0x1 551#define BNXT_DEV_STATE_CLOSING 0x1
545 u32 dev_state; 552 u32 dev_state;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 922b898e7a32..3238817dfd5f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -486,15 +486,8 @@ static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
486 speed_mask |= SUPPORTED_2500baseX_Full; 486 speed_mask |= SUPPORTED_2500baseX_Full;
487 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 487 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
488 speed_mask |= SUPPORTED_10000baseT_Full; 488 speed_mask |= SUPPORTED_10000baseT_Full;
489 /* TODO: support 25GB, 50GB with different cable type */
490 if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
491 speed_mask |= SUPPORTED_20000baseMLD2_Full |
492 SUPPORTED_20000baseKR2_Full;
493 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 489 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
494 speed_mask |= SUPPORTED_40000baseKR4_Full | 490 speed_mask |= SUPPORTED_40000baseCR4_Full;
495 SUPPORTED_40000baseCR4_Full |
496 SUPPORTED_40000baseSR4_Full |
497 SUPPORTED_40000baseLR4_Full;
498 491
499 return speed_mask; 492 return speed_mask;
500} 493}
@@ -514,15 +507,8 @@ static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
514 speed_mask |= ADVERTISED_2500baseX_Full; 507 speed_mask |= ADVERTISED_2500baseX_Full;
515 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 508 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
516 speed_mask |= ADVERTISED_10000baseT_Full; 509 speed_mask |= ADVERTISED_10000baseT_Full;
517 /* TODO: how to advertise 20, 25, 40, 50GB with different cable type ?*/
518 if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
519 speed_mask |= ADVERTISED_20000baseMLD2_Full |
520 ADVERTISED_20000baseKR2_Full;
521 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 510 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
522 speed_mask |= ADVERTISED_40000baseKR4_Full | 511 speed_mask |= ADVERTISED_40000baseCR4_Full;
523 ADVERTISED_40000baseCR4_Full |
524 ADVERTISED_40000baseSR4_Full |
525 ADVERTISED_40000baseLR4_Full;
526 return speed_mask; 512 return speed_mask;
527} 513}
528 514
@@ -557,11 +543,12 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
557 u16 ethtool_speed; 543 u16 ethtool_speed;
558 544
559 cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info); 545 cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
546 cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
560 547
561 if (link_info->auto_link_speeds) 548 if (link_info->auto_link_speeds)
562 cmd->supported |= SUPPORTED_Autoneg; 549 cmd->supported |= SUPPORTED_Autoneg;
563 550
564 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 551 if (link_info->autoneg) {
565 cmd->advertising = 552 cmd->advertising =
566 bnxt_fw_to_ethtool_advertised_spds(link_info); 553 bnxt_fw_to_ethtool_advertised_spds(link_info);
567 cmd->advertising |= ADVERTISED_Autoneg; 554 cmd->advertising |= ADVERTISED_Autoneg;
@@ -570,28 +557,16 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
570 cmd->autoneg = AUTONEG_DISABLE; 557 cmd->autoneg = AUTONEG_DISABLE;
571 cmd->advertising = 0; 558 cmd->advertising = 0;
572 } 559 }
573 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) { 560 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) {
574 if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 561 if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
575 BNXT_LINK_PAUSE_BOTH) { 562 BNXT_LINK_PAUSE_BOTH) {
576 cmd->advertising |= ADVERTISED_Pause; 563 cmd->advertising |= ADVERTISED_Pause;
577 cmd->supported |= SUPPORTED_Pause;
578 } else { 564 } else {
579 cmd->advertising |= ADVERTISED_Asym_Pause; 565 cmd->advertising |= ADVERTISED_Asym_Pause;
580 cmd->supported |= SUPPORTED_Asym_Pause;
581 if (link_info->auto_pause_setting & 566 if (link_info->auto_pause_setting &
582 BNXT_LINK_PAUSE_RX) 567 BNXT_LINK_PAUSE_RX)
583 cmd->advertising |= ADVERTISED_Pause; 568 cmd->advertising |= ADVERTISED_Pause;
584 } 569 }
585 } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
586 if ((link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
587 BNXT_LINK_PAUSE_BOTH) {
588 cmd->supported |= SUPPORTED_Pause;
589 } else {
590 cmd->supported |= SUPPORTED_Asym_Pause;
591 if (link_info->force_pause_setting &
592 BNXT_LINK_PAUSE_RX)
593 cmd->supported |= SUPPORTED_Pause;
594 }
595 } 570 }
596 571
597 cmd->port = PORT_NONE; 572 cmd->port = PORT_NONE;
@@ -670,6 +645,9 @@ static u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
670 if (advertising & ADVERTISED_10000baseT_Full) 645 if (advertising & ADVERTISED_10000baseT_Full)
671 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; 646 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
672 647
648 if (advertising & ADVERTISED_40000baseCR4_Full)
649 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
650
673 return fw_speed_mask; 651 return fw_speed_mask;
674} 652}
675 653
@@ -729,7 +707,7 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
729 speed = ethtool_cmd_speed(cmd); 707 speed = ethtool_cmd_speed(cmd);
730 link_info->req_link_speed = bnxt_get_fw_speed(dev, speed); 708 link_info->req_link_speed = bnxt_get_fw_speed(dev, speed);
731 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; 709 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
732 link_info->autoneg &= ~BNXT_AUTONEG_SPEED; 710 link_info->autoneg = 0;
733 link_info->advertising = 0; 711 link_info->advertising = 0;
734 } 712 }
735 713
@@ -748,8 +726,7 @@ static void bnxt_get_pauseparam(struct net_device *dev,
748 726
749 if (BNXT_VF(bp)) 727 if (BNXT_VF(bp))
750 return; 728 return;
751 epause->autoneg = !!(link_info->auto_pause_setting & 729 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
752 BNXT_LINK_PAUSE_BOTH);
753 epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0); 730 epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0);
754 epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0); 731 epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0);
755} 732}
@@ -765,6 +742,9 @@ static int bnxt_set_pauseparam(struct net_device *dev,
765 return rc; 742 return rc;
766 743
767 if (epause->autoneg) { 744 if (epause->autoneg) {
745 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
746 return -EINVAL;
747
768 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 748 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
769 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH; 749 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH;
770 } else { 750 } else {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b15a60d787c7..d7e01a74e927 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2445,8 +2445,7 @@ static void bcmgenet_irq_task(struct work_struct *work)
2445 } 2445 }
2446 2446
2447 /* Link UP/DOWN event */ 2447 /* Link UP/DOWN event */
2448 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && 2448 if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
2449 (priv->irq0_stat & UMAC_IRQ_LINK_EVENT)) {
2450 phy_mac_interrupt(priv->phydev, 2449 phy_mac_interrupt(priv->phydev,
2451 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP)); 2450 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
2452 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT; 2451 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 0d775964b060..457c3bc8cfff 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -401,7 +401,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
401 * Ethernet MAC ISRs 401 * Ethernet MAC ISRs
402 */ 402 */
403 if (priv->internal_phy) 403 if (priv->internal_phy)
404 priv->mii_bus->irq[phydev->mdio.addr] = PHY_IGNORE_INTERRUPT; 404 priv->phydev->irq = PHY_IGNORE_INTERRUPT;
405 405
406 return 0; 406 return 0;
407} 407}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 9293675df7ba..3010080cfeee 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7831,6 +7831,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7831 return ret; 7831 return ret;
7832} 7832}
7833 7833
7834static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7835{
7836 /* Check if we will never have enough descriptors,
7837 * as gso_segs can be more than current ring size
7838 */
7839 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7840}
7841
7834static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); 7842static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7835 7843
7836/* Use GSO to workaround all TSO packets that meet HW bug conditions 7844/* Use GSO to workaround all TSO packets that meet HW bug conditions
@@ -7934,14 +7942,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7934 * vlan encapsulated. 7942 * vlan encapsulated.
7935 */ 7943 */
7936 if (skb->protocol == htons(ETH_P_8021Q) || 7944 if (skb->protocol == htons(ETH_P_8021Q) ||
7937 skb->protocol == htons(ETH_P_8021AD)) 7945 skb->protocol == htons(ETH_P_8021AD)) {
7938 return tg3_tso_bug(tp, tnapi, txq, skb); 7946 if (tg3_tso_bug_gso_check(tnapi, skb))
7947 return tg3_tso_bug(tp, tnapi, txq, skb);
7948 goto drop;
7949 }
7939 7950
7940 if (!skb_is_gso_v6(skb)) { 7951 if (!skb_is_gso_v6(skb)) {
7941 if (unlikely((ETH_HLEN + hdr_len) > 80) && 7952 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7942 tg3_flag(tp, TSO_BUG)) 7953 tg3_flag(tp, TSO_BUG)) {
7943 return tg3_tso_bug(tp, tnapi, txq, skb); 7954 if (tg3_tso_bug_gso_check(tnapi, skb))
7944 7955 return tg3_tso_bug(tp, tnapi, txq, skb);
7956 goto drop;
7957 }
7945 ip_csum = iph->check; 7958 ip_csum = iph->check;
7946 ip_tot_len = iph->tot_len; 7959 ip_tot_len = iph->tot_len;
7947 iph->check = 0; 7960 iph->check = 0;
@@ -8073,7 +8086,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8073 if (would_hit_hwbug) { 8086 if (would_hit_hwbug) {
8074 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 8087 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8075 8088
8076 if (mss) { 8089 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8077 /* If it's a TSO packet, do GSO instead of 8090 /* If it's a TSO packet, do GSO instead of
8078 * allocating and copying to a large linear SKB 8091 * allocating and copying to a large linear SKB
8079 */ 8092 */
@@ -12016,7 +12029,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
12016 int ret; 12029 int ret;
12017 u32 offset, len, b_offset, odd_len; 12030 u32 offset, len, b_offset, odd_len;
12018 u8 *buf; 12031 u8 *buf;
12019 __be32 start, end; 12032 __be32 start = 0, end;
12020 12033
12021 if (tg3_flag(tp, NO_NVRAM) || 12034 if (tg3_flag(tp, NO_NVRAM) ||
12022 eeprom->magic != TG3_EEPROM_MAGIC) 12035 eeprom->magic != TG3_EEPROM_MAGIC)
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 04b0d16b210e..95bc470ae441 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -987,7 +987,7 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
987 if (!list_empty(&rxf->ucast_pending_add_q)) { 987 if (!list_empty(&rxf->ucast_pending_add_q)) {
988 mac = list_first_entry(&rxf->ucast_pending_add_q, 988 mac = list_first_entry(&rxf->ucast_pending_add_q,
989 struct bna_mac, qe); 989 struct bna_mac, qe);
990 list_add_tail(&mac->qe, &rxf->ucast_active_q); 990 list_move_tail(&mac->qe, &rxf->ucast_active_q);
991 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ); 991 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
992 return 1; 992 return 1;
993 } 993 }
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 9d9984a87d42..50c94104f19c 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2823,7 +2823,7 @@ static int macb_probe(struct platform_device *pdev)
2823 struct device_node *np = pdev->dev.of_node; 2823 struct device_node *np = pdev->dev.of_node;
2824 struct device_node *phy_node; 2824 struct device_node *phy_node;
2825 const struct macb_config *macb_config = NULL; 2825 const struct macb_config *macb_config = NULL;
2826 struct clk *pclk, *hclk, *tx_clk; 2826 struct clk *pclk, *hclk = NULL, *tx_clk = NULL;
2827 unsigned int queue_mask, num_queues; 2827 unsigned int queue_mask, num_queues;
2828 struct macb_platform_data *pdata; 2828 struct macb_platform_data *pdata;
2829 bool native_io; 2829 bool native_io;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index b89504405b72..34d269cd5579 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1526,7 +1526,6 @@ static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1526 struct timespec64 *ts) 1526 struct timespec64 *ts)
1527{ 1527{
1528 u64 ns; 1528 u64 ns;
1529 u32 remainder;
1530 unsigned long flags; 1529 unsigned long flags;
1531 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1530 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1532 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1531 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
@@ -1536,8 +1535,7 @@ static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1536 ns += lio->ptp_adjust; 1535 ns += lio->ptp_adjust;
1537 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1536 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1538 1537
1539 ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); 1538 *ts = ns_to_timespec64(ns);
1540 ts->tv_nsec = remainder;
1541 1539
1542 return 0; 1540 return 0;
1543} 1541}
@@ -1685,7 +1683,7 @@ static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
1685 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); 1683 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
1686 /* droq creation and local register settings. */ 1684 /* droq creation and local register settings. */
1687 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); 1685 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
1688 if (ret_val == -1) 1686 if (ret_val < 0)
1689 return ret_val; 1687 return ret_val;
1690 1688
1691 if (ret_val == 1) { 1689 if (ret_val == 1) {
@@ -2526,7 +2524,7 @@ static void handle_timestamp(struct octeon_device *oct,
2526 2524
2527 octeon_swap_8B_data(&resp->timestamp, 1); 2525 octeon_swap_8B_data(&resp->timestamp, 1);
2528 2526
2529 if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) { 2527 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2530 struct skb_shared_hwtstamps ts; 2528 struct skb_shared_hwtstamps ts;
2531 u64 ns = resp->timestamp; 2529 u64 ns = resp->timestamp;
2532 2530
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 4dba86eaa045..174072b3740b 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -983,5 +983,5 @@ int octeon_create_droq(struct octeon_device *oct,
983 983
984create_droq_fail: 984create_droq_fail:
985 octeon_delete_droq(oct, q_no); 985 octeon_delete_droq(oct, q_no);
986 return -1; 986 return -ENOMEM;
987} 987}
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 688828865c48..34e9acea8747 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -116,6 +116,15 @@
116#define NIC_PF_INTR_ID_MBOX0 8 116#define NIC_PF_INTR_ID_MBOX0 8
117#define NIC_PF_INTR_ID_MBOX1 9 117#define NIC_PF_INTR_ID_MBOX1 9
118 118
119/* Minimum FIFO level before all packets for the CQ are dropped
120 *
121 * This value ensures that once a packet has been "accepted"
122 * for reception it will not get dropped due to non-availability
123 * of CQ descriptor. An errata in HW mandates this value to be
124 * atleast 0x100.
125 */
126#define NICPF_CQM_MIN_DROP_LEVEL 0x100
127
119/* Global timer for CQ timer thresh interrupts 128/* Global timer for CQ timer thresh interrupts
120 * Calculated for SCLK of 700Mhz 129 * Calculated for SCLK of 700Mhz
121 * value written should be a 1/16th of what is expected 130 * value written should be a 1/16th of what is expected
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 4dded90076c8..95f17f8cadac 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -304,6 +304,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
304static void nic_init_hw(struct nicpf *nic) 304static void nic_init_hw(struct nicpf *nic)
305{ 305{
306 int i; 306 int i;
307 u64 cqm_cfg;
307 308
308 /* Enable NIC HW block */ 309 /* Enable NIC HW block */
309 nic_reg_write(nic, NIC_PF_CFG, 0x3); 310 nic_reg_write(nic, NIC_PF_CFG, 0x3);
@@ -340,6 +341,11 @@ static void nic_init_hw(struct nicpf *nic)
340 /* Enable VLAN ethertype matching and stripping */ 341 /* Enable VLAN ethertype matching and stripping */
341 nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7, 342 nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
342 (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q); 343 (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
344
345 /* Check if HW expected value is higher (could be in future chips) */
346 cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
347 if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
348 nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
343} 349}
344 350
345/* Channel parse index configuration */ 351/* Channel parse index configuration */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index dd536be20193..afb10e326b4f 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -21,7 +21,7 @@
21#define NIC_PF_TCP_TIMER (0x0060) 21#define NIC_PF_TCP_TIMER (0x0060)
22#define NIC_PF_BP_CFG (0x0080) 22#define NIC_PF_BP_CFG (0x0080)
23#define NIC_PF_RRM_CFG (0x0088) 23#define NIC_PF_RRM_CFG (0x0088)
24#define NIC_PF_CQM_CF (0x00A0) 24#define NIC_PF_CQM_CFG (0x00A0)
25#define NIC_PF_CNM_CF (0x00A8) 25#define NIC_PF_CNM_CF (0x00A8)
26#define NIC_PF_CNM_STATUS (0x00B0) 26#define NIC_PF_CNM_STATUS (0x00B0)
27#define NIC_PF_CQ_AVG_CFG (0x00C0) 27#define NIC_PF_CQ_AVG_CFG (0x00C0)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index c24cb2a86a42..a009bc30dc4d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -574,8 +574,7 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
574 574
575static void nicvf_rcv_pkt_handler(struct net_device *netdev, 575static void nicvf_rcv_pkt_handler(struct net_device *netdev,
576 struct napi_struct *napi, 576 struct napi_struct *napi,
577 struct cmp_queue *cq, 577 struct cqe_rx_t *cqe_rx)
578 struct cqe_rx_t *cqe_rx, int cqe_type)
579{ 578{
580 struct sk_buff *skb; 579 struct sk_buff *skb;
581 struct nicvf *nic = netdev_priv(netdev); 580 struct nicvf *nic = netdev_priv(netdev);
@@ -591,7 +590,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
591 } 590 }
592 591
593 /* Check for errors */ 592 /* Check for errors */
594 err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); 593 err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
595 if (err && !cqe_rx->rb_cnt) 594 if (err && !cqe_rx->rb_cnt)
596 return; 595 return;
597 596
@@ -682,8 +681,7 @@ loop:
682 cq_idx, cq_desc->cqe_type); 681 cq_idx, cq_desc->cqe_type);
683 switch (cq_desc->cqe_type) { 682 switch (cq_desc->cqe_type) {
684 case CQE_TYPE_RX: 683 case CQE_TYPE_RX:
685 nicvf_rcv_pkt_handler(netdev, napi, cq, 684 nicvf_rcv_pkt_handler(netdev, napi, cq_desc);
686 cq_desc, CQE_TYPE_RX);
687 work_done++; 685 work_done++;
688 break; 686 break;
689 case CQE_TYPE_SEND: 687 case CQE_TYPE_SEND:
@@ -1125,7 +1123,6 @@ int nicvf_stop(struct net_device *netdev)
1125 1123
1126 /* Clear multiqset info */ 1124 /* Clear multiqset info */
1127 nic->pnicvf = nic; 1125 nic->pnicvf = nic;
1128 nic->sqs_count = 0;
1129 1126
1130 return 0; 1127 return 0;
1131} 1128}
@@ -1354,6 +1351,9 @@ void nicvf_update_stats(struct nicvf *nic)
1354 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + 1351 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
1355 stats->tx_bcast_frames_ok + 1352 stats->tx_bcast_frames_ok +
1356 stats->tx_mcast_frames_ok; 1353 stats->tx_mcast_frames_ok;
1354 drv_stats->rx_frames_ok = stats->rx_ucast_frames +
1355 stats->rx_bcast_frames +
1356 stats->rx_mcast_frames;
1357 drv_stats->rx_drops = stats->rx_drop_red + 1357 drv_stats->rx_drops = stats->rx_drop_red +
1358 stats->rx_drop_overrun; 1358 stats->rx_drop_overrun;
1359 drv_stats->tx_drops = stats->tx_drops; 1359 drv_stats->tx_drops = stats->tx_drops;
@@ -1538,6 +1538,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1538 1538
1539 nicvf_send_vf_struct(nic); 1539 nicvf_send_vf_struct(nic);
1540 1540
1541 if (!pass1_silicon(nic->pdev))
1542 nic->hw_tso = true;
1543
1541 /* Check if this VF is in QS only mode */ 1544 /* Check if this VF is in QS only mode */
1542 if (nic->sqs_mode) 1545 if (nic->sqs_mode)
1543 return 0; 1546 return 0;
@@ -1557,9 +1560,6 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1557 1560
1558 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 1561 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
1559 1562
1560 if (!pass1_silicon(nic->pdev))
1561 nic->hw_tso = true;
1562
1563 netdev->netdev_ops = &nicvf_netdev_ops; 1563 netdev->netdev_ops = &nicvf_netdev_ops;
1564 netdev->watchdog_timeo = NICVF_TX_TIMEOUT; 1564 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
1565 1565
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d0d1b5490061..767347b1f631 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1329,16 +1329,12 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1329} 1329}
1330 1330
1331/* Check for errors in the receive cmp.queue entry */ 1331/* Check for errors in the receive cmp.queue entry */
1332int nicvf_check_cqe_rx_errs(struct nicvf *nic, 1332int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1333 struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
1334{ 1333{
1335 struct nicvf_hw_stats *stats = &nic->hw_stats; 1334 struct nicvf_hw_stats *stats = &nic->hw_stats;
1336 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1337 1335
1338 if (!cqe_rx->err_level && !cqe_rx->err_opcode) { 1336 if (!cqe_rx->err_level && !cqe_rx->err_opcode)
1339 drv_stats->rx_frames_ok++;
1340 return 0; 1337 return 0;
1341 }
1342 1338
1343 if (netif_msg_rx_err(nic)) 1339 if (netif_msg_rx_err(nic))
1344 netdev_err(nic->netdev, 1340 netdev_err(nic->netdev,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index c5030a7f213a..6673e1133523 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -338,8 +338,7 @@ u64 nicvf_queue_reg_read(struct nicvf *nic,
338/* Stats */ 338/* Stats */
339void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); 339void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
340void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); 340void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
341int nicvf_check_cqe_rx_errs(struct nicvf *nic, 341int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
342 struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
343int nicvf_check_cqe_tx_errs(struct nicvf *nic, 342int nicvf_check_cqe_tx_errs(struct nicvf *nic,
344 struct cmp_queue *cq, struct cqe_send_t *cqe_tx); 343 struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
345#endif /* NICVF_QUEUES_H */ 344#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index ee04caa6c4d8..a89721fad633 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -681,6 +681,24 @@ int t3_seeprom_wp(struct adapter *adapter, int enable)
681 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); 681 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
682} 682}
683 683
684static int vpdstrtouint(char *s, int len, unsigned int base, unsigned int *val)
685{
686 char tok[len + 1];
687
688 memcpy(tok, s, len);
689 tok[len] = 0;
690 return kstrtouint(strim(tok), base, val);
691}
692
693static int vpdstrtou16(char *s, int len, unsigned int base, u16 *val)
694{
695 char tok[len + 1];
696
697 memcpy(tok, s, len);
698 tok[len] = 0;
699 return kstrtou16(strim(tok), base, val);
700}
701
684/** 702/**
685 * get_vpd_params - read VPD parameters from VPD EEPROM 703 * get_vpd_params - read VPD parameters from VPD EEPROM
686 * @adapter: adapter to read 704 * @adapter: adapter to read
@@ -709,19 +727,19 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
709 return ret; 727 return ret;
710 } 728 }
711 729
712 ret = kstrtouint(vpd.cclk_data, 10, &p->cclk); 730 ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk);
713 if (ret) 731 if (ret)
714 return ret; 732 return ret;
715 ret = kstrtouint(vpd.mclk_data, 10, &p->mclk); 733 ret = vpdstrtouint(vpd.mclk_data, vpd.mclk_len, 10, &p->mclk);
716 if (ret) 734 if (ret)
717 return ret; 735 return ret;
718 ret = kstrtouint(vpd.uclk_data, 10, &p->uclk); 736 ret = vpdstrtouint(vpd.uclk_data, vpd.uclk_len, 10, &p->uclk);
719 if (ret) 737 if (ret)
720 return ret; 738 return ret;
721 ret = kstrtouint(vpd.mdc_data, 10, &p->mdc); 739 ret = vpdstrtouint(vpd.mdc_data, vpd.mdc_len, 10, &p->mdc);
722 if (ret) 740 if (ret)
723 return ret; 741 return ret;
724 ret = kstrtouint(vpd.mt_data, 10, &p->mem_timing); 742 ret = vpdstrtouint(vpd.mt_data, vpd.mt_len, 10, &p->mem_timing);
725 if (ret) 743 if (ret)
726 return ret; 744 return ret;
727 memcpy(p->sn, vpd.sn_data, SERNUM_LEN); 745 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
@@ -733,10 +751,12 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
733 } else { 751 } else {
734 p->port_type[0] = hex_to_bin(vpd.port0_data[0]); 752 p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
735 p->port_type[1] = hex_to_bin(vpd.port1_data[0]); 753 p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
736 ret = kstrtou16(vpd.xaui0cfg_data, 16, &p->xauicfg[0]); 754 ret = vpdstrtou16(vpd.xaui0cfg_data, vpd.xaui0cfg_len, 16,
755 &p->xauicfg[0]);
737 if (ret) 756 if (ret)
738 return ret; 757 return ret;
739 ret = kstrtou16(vpd.xaui1cfg_data, 16, &p->xauicfg[1]); 758 ret = vpdstrtou16(vpd.xaui1cfg_data, vpd.xaui1cfg_len, 16,
759 &p->xauicfg[1]);
740 if (ret) 760 if (ret)
741 return ret; 761 return ret;
742 } 762 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index a8dda635456d..06bc2d2e7a73 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -165,6 +165,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
165 CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */ 165 CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */
166 CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */ 166 CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */
167 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ 167 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */
168 CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
168 169
169 /* T6 adapters: 170 /* T6 adapters:
170 */ 171 */
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 1671fa3332c2..7ba6d530b0c0 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
33 33
34#define DRV_NAME "enic" 34#define DRV_NAME "enic"
35#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 35#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
36#define DRV_VERSION "2.3.0.12" 36#define DRV_VERSION "2.3.0.20"
37#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" 37#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
38 38
39#define ENIC_BARS_MAX 6 39#define ENIC_BARS_MAX 6
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 1ffd1050860b..1fdf5fe12a95 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -298,7 +298,8 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
298 int wait) 298 int wait)
299{ 299{
300 struct devcmd2_controller *dc2c = vdev->devcmd2; 300 struct devcmd2_controller *dc2c = vdev->devcmd2;
301 struct devcmd2_result *result = dc2c->result + dc2c->next_result; 301 struct devcmd2_result *result;
302 u8 color;
302 unsigned int i; 303 unsigned int i;
303 int delay, err; 304 int delay, err;
304 u32 fetch_index, new_posted; 305 u32 fetch_index, new_posted;
@@ -336,13 +337,17 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
336 if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) 337 if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
337 return 0; 338 return 0;
338 339
340 result = dc2c->result + dc2c->next_result;
341 color = dc2c->color;
342
343 dc2c->next_result++;
344 if (dc2c->next_result == dc2c->result_size) {
345 dc2c->next_result = 0;
346 dc2c->color = dc2c->color ? 0 : 1;
347 }
348
339 for (delay = 0; delay < wait; delay++) { 349 for (delay = 0; delay < wait; delay++) {
340 if (result->color == dc2c->color) { 350 if (result->color == color) {
341 dc2c->next_result++;
342 if (dc2c->next_result == dc2c->result_size) {
343 dc2c->next_result = 0;
344 dc2c->color = dc2c->color ? 0 : 1;
345 }
346 if (result->error) { 351 if (result->error) {
347 err = result->error; 352 err = result->error;
348 if (err != ERR_ECMDUNKNOWN || 353 if (err != ERR_ECMDUNKNOWN ||
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index cf94b72dbacd..48d91941408d 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -128,7 +128,6 @@ struct board_info {
128 struct resource *data_res; 128 struct resource *data_res;
129 struct resource *addr_req; /* resources requested */ 129 struct resource *addr_req; /* resources requested */
130 struct resource *data_req; 130 struct resource *data_req;
131 struct resource *irq_res;
132 131
133 int irq_wake; 132 int irq_wake;
134 133
@@ -1300,22 +1299,16 @@ static int
1300dm9000_open(struct net_device *dev) 1299dm9000_open(struct net_device *dev)
1301{ 1300{
1302 struct board_info *db = netdev_priv(dev); 1301 struct board_info *db = netdev_priv(dev);
1303 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
1304 1302
1305 if (netif_msg_ifup(db)) 1303 if (netif_msg_ifup(db))
1306 dev_dbg(db->dev, "enabling %s\n", dev->name); 1304 dev_dbg(db->dev, "enabling %s\n", dev->name);
1307 1305
1308 /* If there is no IRQ type specified, default to something that 1306 /* If there is no IRQ type specified, tell the user that this is a
1309 * may work, and tell the user that this is a problem */ 1307 * problem
1310 1308 */
1311 if (irqflags == IRQF_TRIGGER_NONE) 1309 if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE)
1312 irqflags = irq_get_trigger_type(dev->irq);
1313
1314 if (irqflags == IRQF_TRIGGER_NONE)
1315 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); 1310 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1316 1311
1317 irqflags |= IRQF_SHARED;
1318
1319 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ 1312 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1320 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ 1313 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
1321 mdelay(1); /* delay needs by DM9000B */ 1314 mdelay(1); /* delay needs by DM9000B */
@@ -1323,7 +1316,8 @@ dm9000_open(struct net_device *dev)
1323 /* Initialize DM9000 board */ 1316 /* Initialize DM9000 board */
1324 dm9000_init_dm9000(dev); 1317 dm9000_init_dm9000(dev);
1325 1318
1326 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) 1319 if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED,
1320 dev->name, dev))
1327 return -EAGAIN; 1321 return -EAGAIN;
1328 /* Now that we have an interrupt handler hooked up we can unmask 1322 /* Now that we have an interrupt handler hooked up we can unmask
1329 * our interrupts 1323 * our interrupts
@@ -1500,15 +1494,22 @@ dm9000_probe(struct platform_device *pdev)
1500 1494
1501 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1495 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1502 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1496 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1503 db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1504 1497
1505 if (db->addr_res == NULL || db->data_res == NULL || 1498 if (!db->addr_res || !db->data_res) {
1506 db->irq_res == NULL) { 1499 dev_err(db->dev, "insufficient resources addr=%p data=%p\n",
1507 dev_err(db->dev, "insufficient resources\n"); 1500 db->addr_res, db->data_res);
1508 ret = -ENOENT; 1501 ret = -ENOENT;
1509 goto out; 1502 goto out;
1510 } 1503 }
1511 1504
1505 ndev->irq = platform_get_irq(pdev, 0);
1506 if (ndev->irq < 0) {
1507 dev_err(db->dev, "interrupt resource unavailable: %d\n",
1508 ndev->irq);
1509 ret = ndev->irq;
1510 goto out;
1511 }
1512
1512 db->irq_wake = platform_get_irq(pdev, 1); 1513 db->irq_wake = platform_get_irq(pdev, 1);
1513 if (db->irq_wake >= 0) { 1514 if (db->irq_wake >= 0) {
1514 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake); 1515 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
@@ -1570,7 +1571,6 @@ dm9000_probe(struct platform_device *pdev)
1570 1571
1571 /* fill in parameters for net-dev structure */ 1572 /* fill in parameters for net-dev structure */
1572 ndev->base_addr = (unsigned long)db->io_addr; 1573 ndev->base_addr = (unsigned long)db->io_addr;
1573 ndev->irq = db->irq_res->start;
1574 1574
1575 /* ensure at least we have a default set of IO routines */ 1575 /* ensure at least we have a default set of IO routines */
1576 dm9000_set_io(db, iosize); 1576 dm9000_set_io(db, iosize);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index cf837831304b..f9751294ece7 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -531,6 +531,7 @@ struct be_adapter {
531 531
532 struct delayed_work be_err_detection_work; 532 struct delayed_work be_err_detection_work;
533 u8 err_flags; 533 u8 err_flags;
534 bool pcicfg_mapped; /* pcicfg obtained via pci_iomap() */
534 u32 flags; 535 u32 flags;
535 u32 cmd_privileges; 536 u32 cmd_privileges;
536 /* Ethtool knobs and info */ 537 /* Ethtool knobs and info */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 241819b36ca7..6d9a8d78e8ad 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -622,10 +622,13 @@ enum be_if_flags {
622 BE_IF_FLAGS_VLAN_PROMISCUOUS |\ 622 BE_IF_FLAGS_VLAN_PROMISCUOUS |\
623 BE_IF_FLAGS_MCAST_PROMISCUOUS) 623 BE_IF_FLAGS_MCAST_PROMISCUOUS)
624 624
625#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\ 625#define BE_IF_FILT_FLAGS_BASIC (BE_IF_FLAGS_BROADCAST | \
626 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED) 626 BE_IF_FLAGS_PASS_L3L4_ERRORS | \
627 BE_IF_FLAGS_UNTAGGED)
627 628
628#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS) 629#define BE_IF_ALL_FILT_FLAGS (BE_IF_FILT_FLAGS_BASIC | \
630 BE_IF_FLAGS_MULTICAST | \
631 BE_IF_FLAGS_ALL_PROMISCUOUS)
629 632
630/* An RX interface is an object with one or more MAC addresses and 633/* An RX interface is an object with one or more MAC addresses and
631 * filtering capabilities. */ 634 * filtering capabilities. */
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index f99de3657ce3..d1cf1274fc2f 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -125,6 +125,11 @@ static const char * const ue_status_hi_desc[] = {
125 "Unknown" 125 "Unknown"
126}; 126};
127 127
128#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
129 BE_IF_FLAGS_BROADCAST | \
130 BE_IF_FLAGS_MULTICAST | \
131 BE_IF_FLAGS_PASS_L3L4_ERRORS)
132
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) 133static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{ 134{
130 struct be_dma_mem *mem = &q->dma_mem; 135 struct be_dma_mem *mem = &q->dma_mem;
@@ -3537,7 +3542,7 @@ static int be_enable_if_filters(struct be_adapter *adapter)
3537{ 3542{
3538 int status; 3543 int status;
3539 3544
3540 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON); 3545 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
3541 if (status) 3546 if (status)
3542 return status; 3547 return status;
3543 3548
@@ -3857,8 +3862,7 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3857 int status; 3862 int status;
3858 3863
3859 /* If a FW profile exists, then cap_flags are updated */ 3864 /* If a FW profile exists, then cap_flags are updated */
3860 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 3865 cap_flags = BE_VF_IF_EN_FLAGS;
3861 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3862 3866
3863 for_all_vfs(adapter, vf_cfg, vf) { 3867 for_all_vfs(adapter, vf_cfg, vf) {
3864 if (!BE3_chip(adapter)) { 3868 if (!BE3_chip(adapter)) {
@@ -3874,10 +3878,8 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3874 } 3878 }
3875 } 3879 }
3876 3880
3877 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | 3881 /* PF should enable IF flags during proxy if_create call */
3878 BE_IF_FLAGS_BROADCAST | 3882 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
3879 BE_IF_FLAGS_MULTICAST |
3880 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3881 status = be_cmd_if_create(adapter, cap_flags, en_flags, 3883 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3882 &vf_cfg->if_handle, vf + 1); 3884 &vf_cfg->if_handle, vf + 1);
3883 if (status) 3885 if (status)
@@ -4968,6 +4970,8 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
4968 pci_iounmap(adapter->pdev, adapter->csr); 4970 pci_iounmap(adapter->pdev, adapter->csr);
4969 if (adapter->db) 4971 if (adapter->db)
4970 pci_iounmap(adapter->pdev, adapter->db); 4972 pci_iounmap(adapter->pdev, adapter->db);
4973 if (adapter->pcicfg && adapter->pcicfg_mapped)
4974 pci_iounmap(adapter->pdev, adapter->pcicfg);
4971} 4975}
4972 4976
4973static int db_bar(struct be_adapter *adapter) 4977static int db_bar(struct be_adapter *adapter)
@@ -5019,8 +5023,10 @@ static int be_map_pci_bars(struct be_adapter *adapter)
5019 if (!addr) 5023 if (!addr)
5020 goto pci_map_err; 5024 goto pci_map_err;
5021 adapter->pcicfg = addr; 5025 adapter->pcicfg = addr;
5026 adapter->pcicfg_mapped = true;
5022 } else { 5027 } else {
5023 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET; 5028 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5029 adapter->pcicfg_mapped = false;
5024 } 5030 }
5025 } 5031 }
5026 5032
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 62fa136554ac..41b010645100 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1265,7 +1265,6 @@ static int ethoc_remove(struct platform_device *pdev)
1265 1265
1266 if (priv->mdio) { 1266 if (priv->mdio) {
1267 mdiobus_unregister(priv->mdio); 1267 mdiobus_unregister(priv->mdio);
1268 kfree(priv->mdio->irq);
1269 mdiobus_free(priv->mdio); 1268 mdiobus_free(priv->mdio);
1270 } 1269 }
1271 if (priv->clk) 1270 if (priv->clk)
diff --git a/drivers/net/ethernet/ezchip/Kconfig b/drivers/net/ethernet/ezchip/Kconfig
index 48ecbc8aaaea..b423ad380b6a 100644
--- a/drivers/net/ethernet/ezchip/Kconfig
+++ b/drivers/net/ethernet/ezchip/Kconfig
@@ -18,6 +18,7 @@ if NET_VENDOR_EZCHIP
18config EZCHIP_NPS_MANAGEMENT_ENET 18config EZCHIP_NPS_MANAGEMENT_ENET
19 tristate "EZchip NPS management enet support" 19 tristate "EZchip NPS management enet support"
20 depends on OF_IRQ && OF_NET 20 depends on OF_IRQ && OF_NET
21 depends on HAS_IOMEM
21 ---help--- 22 ---help---
22 Simple LAN device for debug or management purposes. 23 Simple LAN device for debug or management purposes.
23 Device supports interrupts for RX and TX(completion). 24 Device supports interrupts for RX and TX(completion).
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 4097c58d17a7..cbe21dc7e37e 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -4,6 +4,9 @@
4 4
5obj-$(CONFIG_FEC) += fec.o 5obj-$(CONFIG_FEC) += fec.o
6fec-objs :=fec_main.o fec_ptp.o 6fec-objs :=fec_main.o fec_ptp.o
7CFLAGS_fec_main.o := -D__CHECK_ENDIAN__
8CFLAGS_fec_ptp.o := -D__CHECK_ENDIAN__
9
7obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o 10obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
8ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) 11ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
9 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o 12 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 99d33e2d35e6..2106d72c91dc 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -19,8 +19,7 @@
19#include <linux/timecounter.h> 19#include <linux/timecounter.h>
20 20
21#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 21#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
22 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ 22 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
23 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
24/* 23/*
25 * Just figures, Motorola would have to change the offsets for 24 * Just figures, Motorola would have to change the offsets for
26 * registers in the same peripheral device on different models 25 * registers in the same peripheral device on different models
@@ -190,28 +189,45 @@
190 189
191/* 190/*
192 * Define the buffer descriptor structure. 191 * Define the buffer descriptor structure.
192 *
193 * Evidently, ARM SoCs have the FEC block generated in a
194 * little endian mode so adjust endianness accordingly.
193 */ 195 */
194#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) 196#if defined(CONFIG_ARM)
197#define fec32_to_cpu le32_to_cpu
198#define fec16_to_cpu le16_to_cpu
199#define cpu_to_fec32 cpu_to_le32
200#define cpu_to_fec16 cpu_to_le16
201#define __fec32 __le32
202#define __fec16 __le16
203
195struct bufdesc { 204struct bufdesc {
196 unsigned short cbd_datlen; /* Data length */ 205 __fec16 cbd_datlen; /* Data length */
197 unsigned short cbd_sc; /* Control and status info */ 206 __fec16 cbd_sc; /* Control and status info */
198 unsigned long cbd_bufaddr; /* Buffer address */ 207 __fec32 cbd_bufaddr; /* Buffer address */
199}; 208};
200#else 209#else
210#define fec32_to_cpu be32_to_cpu
211#define fec16_to_cpu be16_to_cpu
212#define cpu_to_fec32 cpu_to_be32
213#define cpu_to_fec16 cpu_to_be16
214#define __fec32 __be32
215#define __fec16 __be16
216
201struct bufdesc { 217struct bufdesc {
202 unsigned short cbd_sc; /* Control and status info */ 218 __fec16 cbd_sc; /* Control and status info */
203 unsigned short cbd_datlen; /* Data length */ 219 __fec16 cbd_datlen; /* Data length */
204 unsigned long cbd_bufaddr; /* Buffer address */ 220 __fec32 cbd_bufaddr; /* Buffer address */
205}; 221};
206#endif 222#endif
207 223
208struct bufdesc_ex { 224struct bufdesc_ex {
209 struct bufdesc desc; 225 struct bufdesc desc;
210 unsigned long cbd_esc; 226 __fec32 cbd_esc;
211 unsigned long cbd_prot; 227 __fec32 cbd_prot;
212 unsigned long cbd_bdu; 228 __fec32 cbd_bdu;
213 unsigned long ts; 229 __fec32 ts;
214 unsigned short res0[4]; 230 __fec16 res0[4];
215}; 231};
216 232
217/* 233/*
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 502da6f48f95..41c81f6ec630 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -332,11 +332,13 @@ static void fec_dump(struct net_device *ndev)
332 bdp = txq->tx_bd_base; 332 bdp = txq->tx_bd_base;
333 333
334 do { 334 do {
335 pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", 335 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
336 index, 336 index,
337 bdp == txq->cur_tx ? 'S' : ' ', 337 bdp == txq->cur_tx ? 'S' : ' ',
338 bdp == txq->dirty_tx ? 'H' : ' ', 338 bdp == txq->dirty_tx ? 'H' : ' ',
339 bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, 339 fec16_to_cpu(bdp->cbd_sc),
340 fec32_to_cpu(bdp->cbd_bufaddr),
341 fec16_to_cpu(bdp->cbd_datlen),
340 txq->tx_skbuff[index]); 342 txq->tx_skbuff[index]);
341 bdp = fec_enet_get_nextdesc(bdp, fep, 0); 343 bdp = fec_enet_get_nextdesc(bdp, fep, 0);
342 index++; 344 index++;
@@ -389,7 +391,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
389 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 391 bdp = fec_enet_get_nextdesc(bdp, fep, queue);
390 ebdp = (struct bufdesc_ex *)bdp; 392 ebdp = (struct bufdesc_ex *)bdp;
391 393
392 status = bdp->cbd_sc; 394 status = fec16_to_cpu(bdp->cbd_sc);
393 status &= ~BD_ENET_TX_STATS; 395 status &= ~BD_ENET_TX_STATS;
394 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 396 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
395 frag_len = skb_shinfo(skb)->frags[frag].size; 397 frag_len = skb_shinfo(skb)->frags[frag].size;
@@ -411,7 +413,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
411 if (skb->ip_summed == CHECKSUM_PARTIAL) 413 if (skb->ip_summed == CHECKSUM_PARTIAL)
412 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 414 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
413 ebdp->cbd_bdu = 0; 415 ebdp->cbd_bdu = 0;
414 ebdp->cbd_esc = estatus; 416 ebdp->cbd_esc = cpu_to_fec32(estatus);
415 } 417 }
416 418
417 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; 419 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
@@ -435,9 +437,9 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
435 goto dma_mapping_error; 437 goto dma_mapping_error;
436 } 438 }
437 439
438 bdp->cbd_bufaddr = addr; 440 bdp->cbd_bufaddr = cpu_to_fec32(addr);
439 bdp->cbd_datlen = frag_len; 441 bdp->cbd_datlen = cpu_to_fec16(frag_len);
440 bdp->cbd_sc = status; 442 bdp->cbd_sc = cpu_to_fec16(status);
441 } 443 }
442 444
443 return bdp; 445 return bdp;
@@ -445,8 +447,8 @@ dma_mapping_error:
445 bdp = txq->cur_tx; 447 bdp = txq->cur_tx;
446 for (i = 0; i < frag; i++) { 448 for (i = 0; i < frag; i++) {
447 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 449 bdp = fec_enet_get_nextdesc(bdp, fep, queue);
448 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 450 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
449 bdp->cbd_datlen, DMA_TO_DEVICE); 451 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
450 } 452 }
451 return ERR_PTR(-ENOMEM); 453 return ERR_PTR(-ENOMEM);
452} 454}
@@ -483,7 +485,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
483 /* Fill in a Tx ring entry */ 485 /* Fill in a Tx ring entry */
484 bdp = txq->cur_tx; 486 bdp = txq->cur_tx;
485 last_bdp = bdp; 487 last_bdp = bdp;
486 status = bdp->cbd_sc; 488 status = fec16_to_cpu(bdp->cbd_sc);
487 status &= ~BD_ENET_TX_STATS; 489 status &= ~BD_ENET_TX_STATS;
488 490
489 /* Set buffer length and buffer pointer */ 491 /* Set buffer length and buffer pointer */
@@ -539,21 +541,21 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
539 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 541 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
540 542
541 ebdp->cbd_bdu = 0; 543 ebdp->cbd_bdu = 0;
542 ebdp->cbd_esc = estatus; 544 ebdp->cbd_esc = cpu_to_fec32(estatus);
543 } 545 }
544 546
545 index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); 547 index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
546 /* Save skb pointer */ 548 /* Save skb pointer */
547 txq->tx_skbuff[index] = skb; 549 txq->tx_skbuff[index] = skb;
548 550
549 bdp->cbd_datlen = buflen; 551 bdp->cbd_datlen = cpu_to_fec16(buflen);
550 bdp->cbd_bufaddr = addr; 552 bdp->cbd_bufaddr = cpu_to_fec32(addr);
551 553
552 /* Send it on its way. Tell FEC it's ready, interrupt when done, 554 /* Send it on its way. Tell FEC it's ready, interrupt when done,
553 * it's the last BD of the frame, and to put the CRC on the end. 555 * it's the last BD of the frame, and to put the CRC on the end.
554 */ 556 */
555 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); 557 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
556 bdp->cbd_sc = status; 558 bdp->cbd_sc = cpu_to_fec16(status);
557 559
558 /* If this was the last BD in the ring, start at the beginning again. */ 560 /* If this was the last BD in the ring, start at the beginning again. */
559 bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); 561 bdp = fec_enet_get_nextdesc(last_bdp, fep, queue);
@@ -585,7 +587,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
585 unsigned int estatus = 0; 587 unsigned int estatus = 0;
586 dma_addr_t addr; 588 dma_addr_t addr;
587 589
588 status = bdp->cbd_sc; 590 status = fec16_to_cpu(bdp->cbd_sc);
589 status &= ~BD_ENET_TX_STATS; 591 status &= ~BD_ENET_TX_STATS;
590 592
591 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 593 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
@@ -607,8 +609,8 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
607 return NETDEV_TX_BUSY; 609 return NETDEV_TX_BUSY;
608 } 610 }
609 611
610 bdp->cbd_datlen = size; 612 bdp->cbd_datlen = cpu_to_fec16(size);
611 bdp->cbd_bufaddr = addr; 613 bdp->cbd_bufaddr = cpu_to_fec32(addr);
612 614
613 if (fep->bufdesc_ex) { 615 if (fep->bufdesc_ex) {
614 if (fep->quirks & FEC_QUIRK_HAS_AVB) 616 if (fep->quirks & FEC_QUIRK_HAS_AVB)
@@ -616,7 +618,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
616 if (skb->ip_summed == CHECKSUM_PARTIAL) 618 if (skb->ip_summed == CHECKSUM_PARTIAL)
617 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 619 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
618 ebdp->cbd_bdu = 0; 620 ebdp->cbd_bdu = 0;
619 ebdp->cbd_esc = estatus; 621 ebdp->cbd_esc = cpu_to_fec32(estatus);
620 } 622 }
621 623
622 /* Handle the last BD specially */ 624 /* Handle the last BD specially */
@@ -625,10 +627,10 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
625 if (is_last) { 627 if (is_last) {
626 status |= BD_ENET_TX_INTR; 628 status |= BD_ENET_TX_INTR;
627 if (fep->bufdesc_ex) 629 if (fep->bufdesc_ex)
628 ebdp->cbd_esc |= BD_ENET_TX_INT; 630 ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
629 } 631 }
630 632
631 bdp->cbd_sc = status; 633 bdp->cbd_sc = cpu_to_fec16(status);
632 634
633 return 0; 635 return 0;
634} 636}
@@ -647,7 +649,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
647 unsigned short status; 649 unsigned short status;
648 unsigned int estatus = 0; 650 unsigned int estatus = 0;
649 651
650 status = bdp->cbd_sc; 652 status = fec16_to_cpu(bdp->cbd_sc);
651 status &= ~BD_ENET_TX_STATS; 653 status &= ~BD_ENET_TX_STATS;
652 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 654 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
653 655
@@ -671,8 +673,8 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
671 } 673 }
672 } 674 }
673 675
674 bdp->cbd_bufaddr = dmabuf; 676 bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
675 bdp->cbd_datlen = hdr_len; 677 bdp->cbd_datlen = cpu_to_fec16(hdr_len);
676 678
677 if (fep->bufdesc_ex) { 679 if (fep->bufdesc_ex) {
678 if (fep->quirks & FEC_QUIRK_HAS_AVB) 680 if (fep->quirks & FEC_QUIRK_HAS_AVB)
@@ -680,10 +682,10 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
680 if (skb->ip_summed == CHECKSUM_PARTIAL) 682 if (skb->ip_summed == CHECKSUM_PARTIAL)
681 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 683 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
682 ebdp->cbd_bdu = 0; 684 ebdp->cbd_bdu = 0;
683 ebdp->cbd_esc = estatus; 685 ebdp->cbd_esc = cpu_to_fec32(estatus);
684 } 686 }
685 687
686 bdp->cbd_sc = status; 688 bdp->cbd_sc = cpu_to_fec16(status);
687 689
688 return 0; 690 return 0;
689} 691}
@@ -823,15 +825,15 @@ static void fec_enet_bd_init(struct net_device *dev)
823 825
824 /* Initialize the BD for every fragment in the page. */ 826 /* Initialize the BD for every fragment in the page. */
825 if (bdp->cbd_bufaddr) 827 if (bdp->cbd_bufaddr)
826 bdp->cbd_sc = BD_ENET_RX_EMPTY; 828 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
827 else 829 else
828 bdp->cbd_sc = 0; 830 bdp->cbd_sc = cpu_to_fec16(0);
829 bdp = fec_enet_get_nextdesc(bdp, fep, q); 831 bdp = fec_enet_get_nextdesc(bdp, fep, q);
830 } 832 }
831 833
832 /* Set the last buffer to wrap */ 834 /* Set the last buffer to wrap */
833 bdp = fec_enet_get_prevdesc(bdp, fep, q); 835 bdp = fec_enet_get_prevdesc(bdp, fep, q);
834 bdp->cbd_sc |= BD_SC_WRAP; 836 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
835 837
836 rxq->cur_rx = rxq->rx_bd_base; 838 rxq->cur_rx = rxq->rx_bd_base;
837 } 839 }
@@ -844,18 +846,18 @@ static void fec_enet_bd_init(struct net_device *dev)
844 846
845 for (i = 0; i < txq->tx_ring_size; i++) { 847 for (i = 0; i < txq->tx_ring_size; i++) {
846 /* Initialize the BD for every fragment in the page. */ 848 /* Initialize the BD for every fragment in the page. */
847 bdp->cbd_sc = 0; 849 bdp->cbd_sc = cpu_to_fec16(0);
848 if (txq->tx_skbuff[i]) { 850 if (txq->tx_skbuff[i]) {
849 dev_kfree_skb_any(txq->tx_skbuff[i]); 851 dev_kfree_skb_any(txq->tx_skbuff[i]);
850 txq->tx_skbuff[i] = NULL; 852 txq->tx_skbuff[i] = NULL;
851 } 853 }
852 bdp->cbd_bufaddr = 0; 854 bdp->cbd_bufaddr = cpu_to_fec32(0);
853 bdp = fec_enet_get_nextdesc(bdp, fep, q); 855 bdp = fec_enet_get_nextdesc(bdp, fep, q);
854 } 856 }
855 857
856 /* Set the last buffer to wrap */ 858 /* Set the last buffer to wrap */
857 bdp = fec_enet_get_prevdesc(bdp, fep, q); 859 bdp = fec_enet_get_prevdesc(bdp, fep, q);
858 bdp->cbd_sc |= BD_SC_WRAP; 860 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
859 txq->dirty_tx = bdp; 861 txq->dirty_tx = bdp;
860 } 862 }
861} 863}
@@ -947,8 +949,10 @@ fec_restart(struct net_device *ndev)
947 */ 949 */
948 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 950 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
949 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); 951 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
950 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); 952 writel((__force u32)cpu_to_be32(temp_mac[0]),
951 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); 953 fep->hwp + FEC_ADDR_LOW);
954 writel((__force u32)cpu_to_be32(temp_mac[1]),
955 fep->hwp + FEC_ADDR_HIGH);
952 } 956 }
953 957
954 /* Clear any outstanding interrupt. */ 958 /* Clear any outstanding interrupt. */
@@ -1222,7 +1226,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1222 while (bdp != READ_ONCE(txq->cur_tx)) { 1226 while (bdp != READ_ONCE(txq->cur_tx)) {
1223 /* Order the load of cur_tx and cbd_sc */ 1227 /* Order the load of cur_tx and cbd_sc */
1224 rmb(); 1228 rmb();
1225 status = READ_ONCE(bdp->cbd_sc); 1229 status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1226 if (status & BD_ENET_TX_READY) 1230 if (status & BD_ENET_TX_READY)
1227 break; 1231 break;
1228 1232
@@ -1230,10 +1234,12 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1230 1234
1231 skb = txq->tx_skbuff[index]; 1235 skb = txq->tx_skbuff[index];
1232 txq->tx_skbuff[index] = NULL; 1236 txq->tx_skbuff[index] = NULL;
1233 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) 1237 if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1234 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 1238 dma_unmap_single(&fep->pdev->dev,
1235 bdp->cbd_datlen, DMA_TO_DEVICE); 1239 fec32_to_cpu(bdp->cbd_bufaddr),
1236 bdp->cbd_bufaddr = 0; 1240 fec16_to_cpu(bdp->cbd_datlen),
1241 DMA_TO_DEVICE);
1242 bdp->cbd_bufaddr = cpu_to_fec32(0);
1237 if (!skb) { 1243 if (!skb) {
1238 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); 1244 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1239 continue; 1245 continue;
@@ -1264,7 +1270,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1264 struct skb_shared_hwtstamps shhwtstamps; 1270 struct skb_shared_hwtstamps shhwtstamps;
1265 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1271 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1266 1272
1267 fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps); 1273 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1268 skb_tstamp_tx(skb, &shhwtstamps); 1274 skb_tstamp_tx(skb, &shhwtstamps);
1269 } 1275 }
1270 1276
@@ -1324,10 +1330,8 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
1324 if (off) 1330 if (off)
1325 skb_reserve(skb, fep->rx_align + 1 - off); 1331 skb_reserve(skb, fep->rx_align + 1 - off);
1326 1332
1327 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, 1333 bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
1328 FEC_ENET_RX_FRSIZE - fep->rx_align, 1334 if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
1329 DMA_FROM_DEVICE);
1330 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
1331 if (net_ratelimit()) 1335 if (net_ratelimit())
1332 netdev_err(ndev, "Rx DMA memory map failed\n"); 1336 netdev_err(ndev, "Rx DMA memory map failed\n");
1333 return -ENOMEM; 1337 return -ENOMEM;
@@ -1349,7 +1353,8 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1349 if (!new_skb) 1353 if (!new_skb)
1350 return false; 1354 return false;
1351 1355
1352 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, 1356 dma_sync_single_for_cpu(&fep->pdev->dev,
1357 fec32_to_cpu(bdp->cbd_bufaddr),
1353 FEC_ENET_RX_FRSIZE - fep->rx_align, 1358 FEC_ENET_RX_FRSIZE - fep->rx_align,
1354 DMA_FROM_DEVICE); 1359 DMA_FROM_DEVICE);
1355 if (!swap) 1360 if (!swap)
@@ -1396,7 +1401,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1396 */ 1401 */
1397 bdp = rxq->cur_rx; 1402 bdp = rxq->cur_rx;
1398 1403
1399 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { 1404 while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
1400 1405
1401 if (pkt_received >= budget) 1406 if (pkt_received >= budget)
1402 break; 1407 break;
@@ -1438,7 +1443,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1438 1443
1439 /* Process the incoming frame. */ 1444 /* Process the incoming frame. */
1440 ndev->stats.rx_packets++; 1445 ndev->stats.rx_packets++;
1441 pkt_len = bdp->cbd_datlen; 1446 pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1442 ndev->stats.rx_bytes += pkt_len; 1447 ndev->stats.rx_bytes += pkt_len;
1443 1448
1444 index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); 1449 index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
@@ -1456,7 +1461,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1456 ndev->stats.rx_dropped++; 1461 ndev->stats.rx_dropped++;
1457 goto rx_processing_done; 1462 goto rx_processing_done;
1458 } 1463 }
1459 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 1464 dma_unmap_single(&fep->pdev->dev,
1465 fec32_to_cpu(bdp->cbd_bufaddr),
1460 FEC_ENET_RX_FRSIZE - fep->rx_align, 1466 FEC_ENET_RX_FRSIZE - fep->rx_align,
1461 DMA_FROM_DEVICE); 1467 DMA_FROM_DEVICE);
1462 } 1468 }
@@ -1475,7 +1481,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1475 /* If this is a VLAN packet remove the VLAN Tag */ 1481 /* If this is a VLAN packet remove the VLAN Tag */
1476 vlan_packet_rcvd = false; 1482 vlan_packet_rcvd = false;
1477 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1483 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1478 fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { 1484 fep->bufdesc_ex &&
1485 (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
1479 /* Push and remove the vlan tag */ 1486 /* Push and remove the vlan tag */
1480 struct vlan_hdr *vlan_header = 1487 struct vlan_hdr *vlan_header =
1481 (struct vlan_hdr *) (data + ETH_HLEN); 1488 (struct vlan_hdr *) (data + ETH_HLEN);
@@ -1491,12 +1498,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1491 1498
1492 /* Get receive timestamp from the skb */ 1499 /* Get receive timestamp from the skb */
1493 if (fep->hwts_rx_en && fep->bufdesc_ex) 1500 if (fep->hwts_rx_en && fep->bufdesc_ex)
1494 fec_enet_hwtstamp(fep, ebdp->ts, 1501 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1495 skb_hwtstamps(skb)); 1502 skb_hwtstamps(skb));
1496 1503
1497 if (fep->bufdesc_ex && 1504 if (fep->bufdesc_ex &&
1498 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { 1505 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1499 if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { 1506 if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1500 /* don't check it */ 1507 /* don't check it */
1501 skb->ip_summed = CHECKSUM_UNNECESSARY; 1508 skb->ip_summed = CHECKSUM_UNNECESSARY;
1502 } else { 1509 } else {
@@ -1513,7 +1520,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1513 napi_gro_receive(&fep->napi, skb); 1520 napi_gro_receive(&fep->napi, skb);
1514 1521
1515 if (is_copybreak) { 1522 if (is_copybreak) {
1516 dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, 1523 dma_sync_single_for_device(&fep->pdev->dev,
1524 fec32_to_cpu(bdp->cbd_bufaddr),
1517 FEC_ENET_RX_FRSIZE - fep->rx_align, 1525 FEC_ENET_RX_FRSIZE - fep->rx_align,
1518 DMA_FROM_DEVICE); 1526 DMA_FROM_DEVICE);
1519 } else { 1527 } else {
@@ -1527,12 +1535,12 @@ rx_processing_done:
1527 1535
1528 /* Mark the buffer empty */ 1536 /* Mark the buffer empty */
1529 status |= BD_ENET_RX_EMPTY; 1537 status |= BD_ENET_RX_EMPTY;
1530 bdp->cbd_sc = status; 1538 bdp->cbd_sc = cpu_to_fec16(status);
1531 1539
1532 if (fep->bufdesc_ex) { 1540 if (fep->bufdesc_ex) {
1533 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1541 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1534 1542
1535 ebdp->cbd_esc = BD_ENET_RX_INT; 1543 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1536 ebdp->cbd_prot = 0; 1544 ebdp->cbd_prot = 0;
1537 ebdp->cbd_bdu = 0; 1545 ebdp->cbd_bdu = 0;
1538 } 1546 }
@@ -2145,8 +2153,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
2145 2153
2146/* List of registers that can be safety be read to dump them with ethtool */ 2154/* List of registers that can be safety be read to dump them with ethtool */
2147#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 2155#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2148 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ 2156 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
2149 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
2150static u32 fec_enet_register_offset[] = { 2157static u32 fec_enet_register_offset[] = {
2151 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, 2158 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2152 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, 2159 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
@@ -2662,7 +2669,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
2662 rxq->rx_skbuff[i] = NULL; 2669 rxq->rx_skbuff[i] = NULL;
2663 if (skb) { 2670 if (skb) {
2664 dma_unmap_single(&fep->pdev->dev, 2671 dma_unmap_single(&fep->pdev->dev,
2665 bdp->cbd_bufaddr, 2672 fec32_to_cpu(bdp->cbd_bufaddr),
2666 FEC_ENET_RX_FRSIZE - fep->rx_align, 2673 FEC_ENET_RX_FRSIZE - fep->rx_align,
2667 DMA_FROM_DEVICE); 2674 DMA_FROM_DEVICE);
2668 dev_kfree_skb(skb); 2675 dev_kfree_skb(skb);
@@ -2777,11 +2784,11 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
2777 } 2784 }
2778 2785
2779 rxq->rx_skbuff[i] = skb; 2786 rxq->rx_skbuff[i] = skb;
2780 bdp->cbd_sc = BD_ENET_RX_EMPTY; 2787 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
2781 2788
2782 if (fep->bufdesc_ex) { 2789 if (fep->bufdesc_ex) {
2783 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 2790 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2784 ebdp->cbd_esc = BD_ENET_RX_INT; 2791 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
2785 } 2792 }
2786 2793
2787 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 2794 bdp = fec_enet_get_nextdesc(bdp, fep, queue);
@@ -2789,7 +2796,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
2789 2796
2790 /* Set the last buffer to wrap. */ 2797 /* Set the last buffer to wrap. */
2791 bdp = fec_enet_get_prevdesc(bdp, fep, queue); 2798 bdp = fec_enet_get_prevdesc(bdp, fep, queue);
2792 bdp->cbd_sc |= BD_SC_WRAP; 2799 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2793 return 0; 2800 return 0;
2794 2801
2795 err_alloc: 2802 err_alloc:
@@ -2812,12 +2819,12 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
2812 if (!txq->tx_bounce[i]) 2819 if (!txq->tx_bounce[i])
2813 goto err_alloc; 2820 goto err_alloc;
2814 2821
2815 bdp->cbd_sc = 0; 2822 bdp->cbd_sc = cpu_to_fec16(0);
2816 bdp->cbd_bufaddr = 0; 2823 bdp->cbd_bufaddr = cpu_to_fec32(0);
2817 2824
2818 if (fep->bufdesc_ex) { 2825 if (fep->bufdesc_ex) {
2819 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 2826 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2820 ebdp->cbd_esc = BD_ENET_TX_INT; 2827 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
2821 } 2828 }
2822 2829
2823 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 2830 bdp = fec_enet_get_nextdesc(bdp, fep, queue);
@@ -2825,7 +2832,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
2825 2832
2826 /* Set the last buffer to wrap. */ 2833 /* Set the last buffer to wrap. */
2827 bdp = fec_enet_get_prevdesc(bdp, fep, queue); 2834 bdp = fec_enet_get_prevdesc(bdp, fep, queue);
2828 bdp->cbd_sc |= BD_SC_WRAP; 2835 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2829 2836
2830 return 0; 2837 return 0;
2831 2838
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 623aa1c8ebc6..79a210aaf0bb 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2791,6 +2791,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
2791 goto fman_free; 2791 goto fman_free;
2792 } 2792 }
2793 2793
2794 fman->dev = &of_dev->dev;
2795
2794 return fman; 2796 return fman;
2795 2797
2796fman_node_put: 2798fman_node_put:
@@ -2845,8 +2847,6 @@ static int fman_probe(struct platform_device *of_dev)
2845 2847
2846 dev_set_drvdata(dev, fman); 2848 dev_set_drvdata(dev, fman);
2847 2849
2848 fman->dev = dev;
2849
2850 dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id); 2850 dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id);
2851 2851
2852 return 0; 2852 return 0;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index 52e0091b4fb2..1ba359f17ec6 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -552,7 +552,7 @@ static void tx_restart(struct net_device *dev)
552 cbd_t __iomem *prev_bd; 552 cbd_t __iomem *prev_bd;
553 cbd_t __iomem *last_tx_bd; 553 cbd_t __iomem *last_tx_bd;
554 554
555 last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t)); 555 last_tx_bd = fep->tx_bd_base + (fpi->tx_ring - 1);
556 556
557 /* get the current bd held in TBPTR and scan back from this point */ 557 /* get the current bd held in TBPTR and scan back from this point */
558 recheck_bd = curr_tbptr = (cbd_t __iomem *) 558 recheck_bd = curr_tbptr = (cbd_t __iomem *)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 2aa7b401cc3b..b9ecf197ad11 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1111,8 +1111,10 @@ static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1111 1111
1112 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) 1112 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1113 priv->errata |= GFAR_ERRATA_12; 1113 priv->errata |= GFAR_ERRATA_12;
1114 /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
1114 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || 1115 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
1115 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20))) 1116 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
1117 ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
1116 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ 1118 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
1117} 1119}
1118#endif 1120#endif
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index a7139f588ad2..678f5018d0be 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -469,8 +469,8 @@ static int fmvj18x_config(struct pcmcia_device *link)
469 goto failed; 469 goto failed;
470 } 470 }
471 /* Read MACID from CIS */ 471 /* Read MACID from CIS */
472 for (i = 5; i < 11; i++) 472 for (i = 0; i < 6; i++)
473 dev->dev_addr[i] = buf[i]; 473 dev->dev_addr[i] = buf[i + 5];
474 kfree(buf); 474 kfree(buf);
475 } else { 475 } else {
476 if (pcmcia_get_mac_from_cis(link, dev)) 476 if (pcmcia_get_mac_from_cis(link, dev))
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 74beb1867230..4ccc032633c4 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -25,6 +25,7 @@ config HIX5HD2_GMAC
25 25
26config HIP04_ETH 26config HIP04_ETH
27 tristate "HISILICON P04 Ethernet support" 27 tristate "HISILICON P04 Ethernet support"
28 depends on HAS_IOMEM # For MFD_SYSCON
28 select MARVELL_PHY 29 select MARVELL_PHY
29 select MFD_SYSCON 30 select MFD_SYSCON
30 select HNS_MDIO 31 select HNS_MDIO
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index b3645297477e..3bfe36f9405b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -95,21 +95,17 @@ static struct hnae_buf_ops hnae_bops = {
95static int __ae_match(struct device *dev, const void *data) 95static int __ae_match(struct device *dev, const void *data)
96{ 96{
97 struct hnae_ae_dev *hdev = cls_to_ae_dev(dev); 97 struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
98 const char *ae_id = data;
99 98
100 if (!strncmp(ae_id, hdev->name, AE_NAME_SIZE)) 99 return hdev->dev->of_node == data;
101 return 1;
102
103 return 0;
104} 100}
105 101
106static struct hnae_ae_dev *find_ae(const char *ae_id) 102static struct hnae_ae_dev *find_ae(const struct device_node *ae_node)
107{ 103{
108 struct device *dev; 104 struct device *dev;
109 105
110 WARN_ON(!ae_id); 106 WARN_ON(!ae_node);
111 107
112 dev = class_find_device(hnae_class, NULL, ae_id, __ae_match); 108 dev = class_find_device(hnae_class, NULL, ae_node, __ae_match);
113 109
114 return dev ? cls_to_ae_dev(dev) : NULL; 110 return dev ? cls_to_ae_dev(dev) : NULL;
115} 111}
@@ -316,7 +312,8 @@ EXPORT_SYMBOL(hnae_reinit_handle);
316 * return handle ptr or ERR_PTR 312 * return handle ptr or ERR_PTR
317 */ 313 */
318struct hnae_handle *hnae_get_handle(struct device *owner_dev, 314struct hnae_handle *hnae_get_handle(struct device *owner_dev,
319 const char *ae_id, u32 port_id, 315 const struct device_node *ae_node,
316 u32 port_id,
320 struct hnae_buf_ops *bops) 317 struct hnae_buf_ops *bops)
321{ 318{
322 struct hnae_ae_dev *dev; 319 struct hnae_ae_dev *dev;
@@ -324,7 +321,7 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev,
324 int i, j; 321 int i, j;
325 int ret; 322 int ret;
326 323
327 dev = find_ae(ae_id); 324 dev = find_ae(ae_node);
328 if (!dev) 325 if (!dev)
329 return ERR_PTR(-ENODEV); 326 return ERR_PTR(-ENODEV);
330 327
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 6ca94dc3dda3..1cbcb9fa3fb5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -524,8 +524,11 @@ struct hnae_handle {
524 524
525#define ring_to_dev(ring) ((ring)->q->dev->dev) 525#define ring_to_dev(ring) ((ring)->q->dev->dev)
526 526
527struct hnae_handle *hnae_get_handle(struct device *owner_dev, const char *ae_id, 527struct hnae_handle *hnae_get_handle(struct device *owner_dev,
528 u32 port_id, struct hnae_buf_ops *bops); 528 const struct device_node *ae_node,
529 u32 port_id,
530 struct hnae_buf_ops *bops);
531
529void hnae_put_handle(struct hnae_handle *handle); 532void hnae_put_handle(struct hnae_handle *handle);
530int hnae_ae_register(struct hnae_ae_dev *dev, struct module *owner); 533int hnae_ae_register(struct hnae_ae_dev *dev, struct module *owner);
531void hnae_ae_unregister(struct hnae_ae_dev *dev); 534void hnae_ae_unregister(struct hnae_ae_dev *dev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index 522b264866b4..d4f92ed322d6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -675,8 +675,12 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
675{ 675{
676 int ret; 676 int ret;
677 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); 677 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
678 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
678 679
679 switch (loop) { 680 switch (loop) {
681 case MAC_INTERNALLOOP_PHY:
682 ret = 0;
683 break;
680 case MAC_INTERNALLOOP_SERDES: 684 case MAC_INTERNALLOOP_SERDES:
681 ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en); 685 ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en);
682 break; 686 break;
@@ -686,6 +690,10 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
686 default: 690 default:
687 ret = -EINVAL; 691 ret = -EINVAL;
688 } 692 }
693
694 if (!ret)
695 hns_dsaf_set_inner_lb(mac_cb->dsaf_dev, mac_cb->mac_id, en);
696
689 return ret; 697 return ret;
690} 698}
691 699
@@ -847,6 +855,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
847int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev) 855int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
848{ 856{
849 struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev; 857 struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev;
858 static atomic_t id = ATOMIC_INIT(-1);
850 859
851 switch (dsaf_dev->dsaf_ver) { 860 switch (dsaf_dev->dsaf_ver) {
852 case AE_VERSION_1: 861 case AE_VERSION_1:
@@ -858,6 +867,9 @@ int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
858 default: 867 default:
859 break; 868 break;
860 } 869 }
870
871 snprintf(ae_dev->name, AE_NAME_SIZE, "%s%d", DSAF_DEVICE_NAME,
872 (int)atomic_inc_return(&id));
861 ae_dev->ops = &hns_dsaf_ops; 873 ae_dev->ops = &hns_dsaf_ops;
862 ae_dev->dev = dsaf_dev->dev; 874 ae_dev->dev = dsaf_dev->dev;
863 875
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 1c33bd06bd5c..38fc5be3870c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -35,7 +35,7 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
35 int ret, i; 35 int ret, i;
36 u32 desc_num; 36 u32 desc_num;
37 u32 buf_size; 37 u32 buf_size;
38 const char *name, *mode_str; 38 const char *mode_str;
39 struct device_node *np = dsaf_dev->dev->of_node; 39 struct device_node *np = dsaf_dev->dev->of_node;
40 40
41 if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1")) 41 if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1"))
@@ -43,14 +43,6 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
43 else 43 else
44 dsaf_dev->dsaf_ver = AE_VERSION_2; 44 dsaf_dev->dsaf_ver = AE_VERSION_2;
45 45
46 ret = of_property_read_string(np, "dsa_name", &name);
47 if (ret) {
48 dev_err(dsaf_dev->dev, "get dsaf name fail, ret=%d!\n", ret);
49 return ret;
50 }
51 strncpy(dsaf_dev->ae_dev.name, name, AE_NAME_SIZE);
52 dsaf_dev->ae_dev.name[AE_NAME_SIZE - 1] = '\0';
53
54 ret = of_property_read_string(np, "mode", &mode_str); 46 ret = of_property_read_string(np, "mode", &mode_str);
55 if (ret) { 47 if (ret) {
56 dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret); 48 dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret);
@@ -238,6 +230,30 @@ static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev)
238 } 230 }
239} 231}
240 232
233static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev)
234{
235 u16 max_q_per_vf, max_vfn;
236 u32 q_id, q_num_per_port;
237 u32 mac_id;
238
239 if (AE_IS_VER1(dsaf_dev->dsaf_ver))
240 return;
241
242 hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode,
243 HNS_DSAF_COMM_SERVICE_NW_IDX,
244 &max_vfn, &max_q_per_vf);
245 q_num_per_port = max_vfn * max_q_per_vf;
246
247 for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) {
248 dsaf_set_dev_field(dsaf_dev,
249 DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
250 DSAFV2_SERDES_LBK_QID_M,
251 DSAFV2_SERDES_LBK_QID_S,
252 q_id);
253 q_id += q_num_per_port;
254 }
255}
256
241/** 257/**
242 * hns_dsaf_sw_port_type_cfg - cfg sw type 258 * hns_dsaf_sw_port_type_cfg - cfg sw type
243 * @dsaf_id: dsa fabric id 259 * @dsaf_id: dsa fabric id
@@ -699,6 +715,16 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
699 dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en); 715 dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en);
700} 716}
701 717
718void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en)
719{
720 if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
721 dsaf_dev->mac_cb[mac_id].mac_type == HNAE_PORT_DEBUG)
722 return;
723
724 dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
725 DSAFV2_SERDES_LBK_EN_B, !!en);
726}
727
702/** 728/**
703 * hns_dsaf_tbl_stat_en - tbl 729 * hns_dsaf_tbl_stat_en - tbl
704 * @dsaf_id: dsa fabric id 730 * @dsaf_id: dsa fabric id
@@ -1030,6 +1056,9 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
1030 /* set promisc def queue id */ 1056 /* set promisc def queue id */
1031 hns_dsaf_mix_def_qid_cfg(dsaf_dev); 1057 hns_dsaf_mix_def_qid_cfg(dsaf_dev);
1032 1058
1059 /* set inner loopback queue id */
1060 hns_dsaf_inner_qid_cfg(dsaf_dev);
1061
1033 /* in non switch mode, set all port to access mode */ 1062 /* in non switch mode, set all port to access mode */
1034 hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN); 1063 hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN);
1035 1064
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 31c312f9826e..5fea226efaf3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -18,6 +18,7 @@ struct hns_mac_cb;
18 18
19#define DSAF_DRV_NAME "hns_dsaf" 19#define DSAF_DRV_NAME "hns_dsaf"
20#define DSAF_MOD_VERSION "v1.0" 20#define DSAF_MOD_VERSION "v1.0"
21#define DSAF_DEVICE_NAME "dsaf"
21 22
22#define HNS_DSAF_DEBUG_NW_REG_OFFSET 0x100000 23#define HNS_DSAF_DEBUG_NW_REG_OFFSET 0x100000
23 24
@@ -416,5 +417,6 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port);
416void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data); 417void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
417int hns_dsaf_get_regs_count(void); 418int hns_dsaf_get_regs_count(void);
418void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en); 419void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en);
420void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en);
419 421
420#endif /* __HNS_DSAF_MAIN_H__ */ 422#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index f0c4f9b09d5b..60d695daa471 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -134,6 +134,7 @@
134#define DSAF_XGE_INT_STS_0_REG 0x1C0 134#define DSAF_XGE_INT_STS_0_REG 0x1C0
135#define DSAF_PPE_INT_STS_0_REG 0x1E0 135#define DSAF_PPE_INT_STS_0_REG 0x1E0
136#define DSAF_ROCEE_INT_STS_0_REG 0x200 136#define DSAF_ROCEE_INT_STS_0_REG 0x200
137#define DSAFV2_SERDES_LBK_0_REG 0x220
137#define DSAF_PPE_QID_CFG_0_REG 0x300 138#define DSAF_PPE_QID_CFG_0_REG 0x300
138#define DSAF_SW_PORT_TYPE_0_REG 0x320 139#define DSAF_SW_PORT_TYPE_0_REG 0x320
139#define DSAF_STP_PORT_TYPE_0_REG 0x340 140#define DSAF_STP_PORT_TYPE_0_REG 0x340
@@ -857,6 +858,10 @@
857#define PPEV2_CFG_RSS_TBL_4N3_S 24 858#define PPEV2_CFG_RSS_TBL_4N3_S 24
858#define PPEV2_CFG_RSS_TBL_4N3_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N3_S) 859#define PPEV2_CFG_RSS_TBL_4N3_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N3_S)
859 860
861#define DSAFV2_SERDES_LBK_EN_B 8
862#define DSAFV2_SERDES_LBK_QID_S 0
863#define DSAFV2_SERDES_LBK_QID_M (((1UL << 8) - 1) << DSAFV2_SERDES_LBK_QID_S)
864
860#define PPE_CNT_CLR_CE_B 0 865#define PPE_CNT_CLR_CE_B 0
861#define PPE_CNT_CLR_SNAP_EN_B 1 866#define PPE_CNT_CLR_SNAP_EN_B 1
862 867
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 0e30846a24f8..3f77ff77abbc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1802,7 +1802,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev)
1802 int ret; 1802 int ret;
1803 1803
1804 h = hnae_get_handle(&priv->netdev->dev, 1804 h = hnae_get_handle(&priv->netdev->dev,
1805 priv->ae_name, priv->port_id, NULL); 1805 priv->ae_node, priv->port_id, NULL);
1806 if (IS_ERR_OR_NULL(h)) { 1806 if (IS_ERR_OR_NULL(h)) {
1807 ret = PTR_ERR(h); 1807 ret = PTR_ERR(h);
1808 dev_dbg(priv->dev, "has not handle, register notifier!\n"); 1808 dev_dbg(priv->dev, "has not handle, register notifier!\n");
@@ -1880,13 +1880,16 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
1880 else 1880 else
1881 priv->enet_ver = AE_VERSION_2; 1881 priv->enet_ver = AE_VERSION_2;
1882 1882
1883 ret = of_property_read_string(node, "ae-name", &priv->ae_name); 1883 priv->ae_node = (void *)of_parse_phandle(node, "ae-handle", 0);
1884 if (ret) 1884 if (IS_ERR_OR_NULL(priv->ae_node)) {
1885 goto out_read_string_fail; 1885 ret = PTR_ERR(priv->ae_node);
1886 dev_err(dev, "not find ae-handle\n");
1887 goto out_read_prop_fail;
1888 }
1886 1889
1887 ret = of_property_read_u32(node, "port-id", &priv->port_id); 1890 ret = of_property_read_u32(node, "port-id", &priv->port_id);
1888 if (ret) 1891 if (ret)
1889 goto out_read_string_fail; 1892 goto out_read_prop_fail;
1890 1893
1891 hns_init_mac_addr(ndev); 1894 hns_init_mac_addr(ndev);
1892 1895
@@ -1945,7 +1948,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
1945 1948
1946out_notify_fail: 1949out_notify_fail:
1947 (void)cancel_work_sync(&priv->service_task); 1950 (void)cancel_work_sync(&priv->service_task);
1948out_read_string_fail: 1951out_read_prop_fail:
1949 free_netdev(ndev); 1952 free_netdev(ndev);
1950 return ret; 1953 return ret;
1951} 1954}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
index 4b75270f014e..c68ab3d34fc2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -51,7 +51,7 @@ struct hns_nic_ops {
51}; 51};
52 52
53struct hns_nic_priv { 53struct hns_nic_priv {
54 const char *ae_name; 54 const struct device_node *ae_node;
55 u32 enet_ver; 55 u32 enet_ver;
56 u32 port_id; 56 u32 port_id;
57 int phy_mode; 57 int phy_mode;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 3df22840fcd1..3c4a3bc31a89 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -295,8 +295,10 @@ static int __lb_setup(struct net_device *ndev,
295 295
296 switch (loop) { 296 switch (loop) {
297 case MAC_INTERNALLOOP_PHY: 297 case MAC_INTERNALLOOP_PHY:
298 if ((phy_dev) && (!phy_dev->is_c45)) 298 if ((phy_dev) && (!phy_dev->is_c45)) {
299 ret = hns_nic_config_phy_loopback(phy_dev, 0x1); 299 ret = hns_nic_config_phy_loopback(phy_dev, 0x1);
300 ret |= h->dev->ops->set_loopback(h, loop, 0x1);
301 }
300 break; 302 break;
301 case MAC_INTERNALLOOP_MAC: 303 case MAC_INTERNALLOOP_MAC:
302 if ((h->dev->ops->set_loopback) && 304 if ((h->dev->ops->set_loopback) &&
@@ -376,6 +378,7 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
376 struct sk_buff *skb) 378 struct sk_buff *skb)
377{ 379{
378 struct net_device *ndev; 380 struct net_device *ndev;
381 struct hns_nic_priv *priv;
379 struct hnae_ring *ring; 382 struct hnae_ring *ring;
380 struct netdev_queue *dev_queue; 383 struct netdev_queue *dev_queue;
381 struct sk_buff *new_skb; 384 struct sk_buff *new_skb;
@@ -385,8 +388,17 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
385 char buff[33]; /* 32B data and the last character '\0' */ 388 char buff[33]; /* 32B data and the last character '\0' */
386 389
387 if (!ring_data) { /* Just for doing create frame*/ 390 if (!ring_data) { /* Just for doing create frame*/
391 ndev = skb->dev;
392 priv = netdev_priv(ndev);
393
388 frame_size = skb->len; 394 frame_size = skb->len;
389 memset(skb->data, 0xFF, frame_size); 395 memset(skb->data, 0xFF, frame_size);
396 if ((!AE_IS_VER1(priv->enet_ver)) &&
397 (priv->ae_handle->port_type == HNAE_PORT_SERVICE)) {
398 memcpy(skb->data, ndev->dev_addr, 6);
399 skb->data[5] += 0x1f;
400 }
401
390 frame_size &= ~1ul; 402 frame_size &= ~1ul;
391 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); 403 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
392 memset(&skb->data[frame_size / 2 + 10], 0xBE, 404 memset(&skb->data[frame_size / 2 + 10], 0xBE,
@@ -486,6 +498,7 @@ static int __lb_run_test(struct net_device *ndev,
486 498
487 /* place data into test skb */ 499 /* place data into test skb */
488 (void)skb_put(skb, size); 500 (void)skb_put(skb, size);
501 skb->dev = ndev;
489 __lb_other_process(NULL, skb); 502 __lb_other_process(NULL, skb);
490 skb->queue_mapping = NIC_LB_TEST_RING_ID; 503 skb->queue_mapping = NIC_LB_TEST_RING_ID;
491 504
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 1d5c3e16d8f4..3daf2d4a7ca0 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -194,7 +194,6 @@ static const char *hp100_isa_tbl[] = {
194}; 194};
195#endif 195#endif
196 196
197#ifdef CONFIG_EISA
198static struct eisa_device_id hp100_eisa_tbl[] = { 197static struct eisa_device_id hp100_eisa_tbl[] = {
199 { "HWPF180" }, /* HP J2577 rev A */ 198 { "HWPF180" }, /* HP J2577 rev A */
200 { "HWP1920" }, /* HP 27248B */ 199 { "HWP1920" }, /* HP 27248B */
@@ -205,9 +204,7 @@ static struct eisa_device_id hp100_eisa_tbl[] = {
205 { "" } /* Mandatory final entry ! */ 204 { "" } /* Mandatory final entry ! */
206}; 205};
207MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl); 206MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
208#endif
209 207
210#ifdef CONFIG_PCI
211static const struct pci_device_id hp100_pci_tbl[] = { 208static const struct pci_device_id hp100_pci_tbl[] = {
212 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,}, 209 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
213 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,}, 210 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
@@ -219,7 +216,6 @@ static const struct pci_device_id hp100_pci_tbl[] = {
219 {} /* Terminating entry */ 216 {} /* Terminating entry */
220}; 217};
221MODULE_DEVICE_TABLE(pci, hp100_pci_tbl); 218MODULE_DEVICE_TABLE(pci, hp100_pci_tbl);
222#endif
223 219
224static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO; 220static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO;
225static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX; 221static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX;
@@ -2842,7 +2838,6 @@ static void cleanup_dev(struct net_device *d)
2842 free_netdev(d); 2838 free_netdev(d);
2843} 2839}
2844 2840
2845#ifdef CONFIG_EISA
2846static int hp100_eisa_probe(struct device *gendev) 2841static int hp100_eisa_probe(struct device *gendev)
2847{ 2842{
2848 struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); 2843 struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
@@ -2884,9 +2879,7 @@ static struct eisa_driver hp100_eisa_driver = {
2884 .remove = hp100_eisa_remove, 2879 .remove = hp100_eisa_remove,
2885 } 2880 }
2886}; 2881};
2887#endif
2888 2882
2889#ifdef CONFIG_PCI
2890static int hp100_pci_probe(struct pci_dev *pdev, 2883static int hp100_pci_probe(struct pci_dev *pdev,
2891 const struct pci_device_id *ent) 2884 const struct pci_device_id *ent)
2892{ 2885{
@@ -2955,7 +2948,6 @@ static struct pci_driver hp100_pci_driver = {
2955 .probe = hp100_pci_probe, 2948 .probe = hp100_pci_probe,
2956 .remove = hp100_pci_remove, 2949 .remove = hp100_pci_remove,
2957}; 2950};
2958#endif
2959 2951
2960/* 2952/*
2961 * module section 2953 * module section
@@ -3032,23 +3024,17 @@ static int __init hp100_module_init(void)
3032 err = hp100_isa_init(); 3024 err = hp100_isa_init();
3033 if (err && err != -ENODEV) 3025 if (err && err != -ENODEV)
3034 goto out; 3026 goto out;
3035#ifdef CONFIG_EISA
3036 err = eisa_driver_register(&hp100_eisa_driver); 3027 err = eisa_driver_register(&hp100_eisa_driver);
3037 if (err && err != -ENODEV) 3028 if (err && err != -ENODEV)
3038 goto out2; 3029 goto out2;
3039#endif
3040#ifdef CONFIG_PCI
3041 err = pci_register_driver(&hp100_pci_driver); 3030 err = pci_register_driver(&hp100_pci_driver);
3042 if (err && err != -ENODEV) 3031 if (err && err != -ENODEV)
3043 goto out3; 3032 goto out3;
3044#endif
3045 out: 3033 out:
3046 return err; 3034 return err;
3047 out3: 3035 out3:
3048#ifdef CONFIG_EISA
3049 eisa_driver_unregister (&hp100_eisa_driver); 3036 eisa_driver_unregister (&hp100_eisa_driver);
3050 out2: 3037 out2:
3051#endif
3052 hp100_isa_cleanup(); 3038 hp100_isa_cleanup();
3053 goto out; 3039 goto out;
3054} 3040}
@@ -3057,12 +3043,8 @@ static int __init hp100_module_init(void)
3057static void __exit hp100_module_exit(void) 3043static void __exit hp100_module_exit(void)
3058{ 3044{
3059 hp100_isa_cleanup(); 3045 hp100_isa_cleanup();
3060#ifdef CONFIG_EISA
3061 eisa_driver_unregister (&hp100_eisa_driver); 3046 eisa_driver_unregister (&hp100_eisa_driver);
3062#endif
3063#ifdef CONFIG_PCI
3064 pci_unregister_driver (&hp100_pci_driver); 3047 pci_unregister_driver (&hp100_pci_driver);
3065#endif
3066} 3048}
3067 3049
3068module_init(hp100_module_init) 3050module_init(hp100_module_init)
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 335417b4756b..ebe60719e489 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1166,7 +1166,10 @@ map_failed:
1166 if (!firmware_has_feature(FW_FEATURE_CMO)) 1166 if (!firmware_has_feature(FW_FEATURE_CMO))
1167 netdev_err(netdev, "tx: unable to map xmit buffer\n"); 1167 netdev_err(netdev, "tx: unable to map xmit buffer\n");
1168 adapter->tx_map_failed++; 1168 adapter->tx_map_failed++;
1169 skb_linearize(skb); 1169 if (skb_linearize(skb)) {
1170 netdev->stats.tx_dropped++;
1171 goto out;
1172 }
1170 force_bounce = 1; 1173 force_bounce = 1;
1171 goto retry_bounce; 1174 goto retry_bounce;
1172} 1175}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 7d6570843723..6e9e16eee5d0 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1348,44 +1348,44 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1348 crq.request_capability.cmd = REQUEST_CAPABILITY; 1348 crq.request_capability.cmd = REQUEST_CAPABILITY;
1349 1349
1350 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); 1350 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
1351 crq.request_capability.number = cpu_to_be32(adapter->req_tx_queues); 1351 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
1352 ibmvnic_send_crq(adapter, &crq); 1352 ibmvnic_send_crq(adapter, &crq);
1353 1353
1354 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); 1354 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
1355 crq.request_capability.number = cpu_to_be32(adapter->req_rx_queues); 1355 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
1356 ibmvnic_send_crq(adapter, &crq); 1356 ibmvnic_send_crq(adapter, &crq);
1357 1357
1358 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); 1358 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
1359 crq.request_capability.number = cpu_to_be32(adapter->req_rx_add_queues); 1359 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
1360 ibmvnic_send_crq(adapter, &crq); 1360 ibmvnic_send_crq(adapter, &crq);
1361 1361
1362 crq.request_capability.capability = 1362 crq.request_capability.capability =
1363 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); 1363 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1364 crq.request_capability.number = 1364 crq.request_capability.number =
1365 cpu_to_be32(adapter->req_tx_entries_per_subcrq); 1365 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
1366 ibmvnic_send_crq(adapter, &crq); 1366 ibmvnic_send_crq(adapter, &crq);
1367 1367
1368 crq.request_capability.capability = 1368 crq.request_capability.capability =
1369 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); 1369 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1370 crq.request_capability.number = 1370 crq.request_capability.number =
1371 cpu_to_be32(adapter->req_rx_add_entries_per_subcrq); 1371 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
1372 ibmvnic_send_crq(adapter, &crq); 1372 ibmvnic_send_crq(adapter, &crq);
1373 1373
1374 crq.request_capability.capability = cpu_to_be16(REQ_MTU); 1374 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
1375 crq.request_capability.number = cpu_to_be32(adapter->req_mtu); 1375 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
1376 ibmvnic_send_crq(adapter, &crq); 1376 ibmvnic_send_crq(adapter, &crq);
1377 1377
1378 if (adapter->netdev->flags & IFF_PROMISC) { 1378 if (adapter->netdev->flags & IFF_PROMISC) {
1379 if (adapter->promisc_supported) { 1379 if (adapter->promisc_supported) {
1380 crq.request_capability.capability = 1380 crq.request_capability.capability =
1381 cpu_to_be16(PROMISC_REQUESTED); 1381 cpu_to_be16(PROMISC_REQUESTED);
1382 crq.request_capability.number = cpu_to_be32(1); 1382 crq.request_capability.number = cpu_to_be64(1);
1383 ibmvnic_send_crq(adapter, &crq); 1383 ibmvnic_send_crq(adapter, &crq);
1384 } 1384 }
1385 } else { 1385 } else {
1386 crq.request_capability.capability = 1386 crq.request_capability.capability =
1387 cpu_to_be16(PROMISC_REQUESTED); 1387 cpu_to_be16(PROMISC_REQUESTED);
1388 crq.request_capability.number = cpu_to_be32(0); 1388 crq.request_capability.number = cpu_to_be64(0);
1389 ibmvnic_send_crq(adapter, &crq); 1389 ibmvnic_send_crq(adapter, &crq);
1390 } 1390 }
1391 1391
@@ -2312,93 +2312,93 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2312 switch (be16_to_cpu(crq->query_capability.capability)) { 2312 switch (be16_to_cpu(crq->query_capability.capability)) {
2313 case MIN_TX_QUEUES: 2313 case MIN_TX_QUEUES:
2314 adapter->min_tx_queues = 2314 adapter->min_tx_queues =
2315 be32_to_cpu(crq->query_capability.number); 2315 be64_to_cpu(crq->query_capability.number);
2316 netdev_dbg(netdev, "min_tx_queues = %lld\n", 2316 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2317 adapter->min_tx_queues); 2317 adapter->min_tx_queues);
2318 break; 2318 break;
2319 case MIN_RX_QUEUES: 2319 case MIN_RX_QUEUES:
2320 adapter->min_rx_queues = 2320 adapter->min_rx_queues =
2321 be32_to_cpu(crq->query_capability.number); 2321 be64_to_cpu(crq->query_capability.number);
2322 netdev_dbg(netdev, "min_rx_queues = %lld\n", 2322 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2323 adapter->min_rx_queues); 2323 adapter->min_rx_queues);
2324 break; 2324 break;
2325 case MIN_RX_ADD_QUEUES: 2325 case MIN_RX_ADD_QUEUES:
2326 adapter->min_rx_add_queues = 2326 adapter->min_rx_add_queues =
2327 be32_to_cpu(crq->query_capability.number); 2327 be64_to_cpu(crq->query_capability.number);
2328 netdev_dbg(netdev, "min_rx_add_queues = %lld\n", 2328 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2329 adapter->min_rx_add_queues); 2329 adapter->min_rx_add_queues);
2330 break; 2330 break;
2331 case MAX_TX_QUEUES: 2331 case MAX_TX_QUEUES:
2332 adapter->max_tx_queues = 2332 adapter->max_tx_queues =
2333 be32_to_cpu(crq->query_capability.number); 2333 be64_to_cpu(crq->query_capability.number);
2334 netdev_dbg(netdev, "max_tx_queues = %lld\n", 2334 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2335 adapter->max_tx_queues); 2335 adapter->max_tx_queues);
2336 break; 2336 break;
2337 case MAX_RX_QUEUES: 2337 case MAX_RX_QUEUES:
2338 adapter->max_rx_queues = 2338 adapter->max_rx_queues =
2339 be32_to_cpu(crq->query_capability.number); 2339 be64_to_cpu(crq->query_capability.number);
2340 netdev_dbg(netdev, "max_rx_queues = %lld\n", 2340 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2341 adapter->max_rx_queues); 2341 adapter->max_rx_queues);
2342 break; 2342 break;
2343 case MAX_RX_ADD_QUEUES: 2343 case MAX_RX_ADD_QUEUES:
2344 adapter->max_rx_add_queues = 2344 adapter->max_rx_add_queues =
2345 be32_to_cpu(crq->query_capability.number); 2345 be64_to_cpu(crq->query_capability.number);
2346 netdev_dbg(netdev, "max_rx_add_queues = %lld\n", 2346 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2347 adapter->max_rx_add_queues); 2347 adapter->max_rx_add_queues);
2348 break; 2348 break;
2349 case MIN_TX_ENTRIES_PER_SUBCRQ: 2349 case MIN_TX_ENTRIES_PER_SUBCRQ:
2350 adapter->min_tx_entries_per_subcrq = 2350 adapter->min_tx_entries_per_subcrq =
2351 be32_to_cpu(crq->query_capability.number); 2351 be64_to_cpu(crq->query_capability.number);
2352 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", 2352 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2353 adapter->min_tx_entries_per_subcrq); 2353 adapter->min_tx_entries_per_subcrq);
2354 break; 2354 break;
2355 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: 2355 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2356 adapter->min_rx_add_entries_per_subcrq = 2356 adapter->min_rx_add_entries_per_subcrq =
2357 be32_to_cpu(crq->query_capability.number); 2357 be64_to_cpu(crq->query_capability.number);
2358 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", 2358 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2359 adapter->min_rx_add_entries_per_subcrq); 2359 adapter->min_rx_add_entries_per_subcrq);
2360 break; 2360 break;
2361 case MAX_TX_ENTRIES_PER_SUBCRQ: 2361 case MAX_TX_ENTRIES_PER_SUBCRQ:
2362 adapter->max_tx_entries_per_subcrq = 2362 adapter->max_tx_entries_per_subcrq =
2363 be32_to_cpu(crq->query_capability.number); 2363 be64_to_cpu(crq->query_capability.number);
2364 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", 2364 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2365 adapter->max_tx_entries_per_subcrq); 2365 adapter->max_tx_entries_per_subcrq);
2366 break; 2366 break;
2367 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: 2367 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2368 adapter->max_rx_add_entries_per_subcrq = 2368 adapter->max_rx_add_entries_per_subcrq =
2369 be32_to_cpu(crq->query_capability.number); 2369 be64_to_cpu(crq->query_capability.number);
2370 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", 2370 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2371 adapter->max_rx_add_entries_per_subcrq); 2371 adapter->max_rx_add_entries_per_subcrq);
2372 break; 2372 break;
2373 case TCP_IP_OFFLOAD: 2373 case TCP_IP_OFFLOAD:
2374 adapter->tcp_ip_offload = 2374 adapter->tcp_ip_offload =
2375 be32_to_cpu(crq->query_capability.number); 2375 be64_to_cpu(crq->query_capability.number);
2376 netdev_dbg(netdev, "tcp_ip_offload = %lld\n", 2376 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2377 adapter->tcp_ip_offload); 2377 adapter->tcp_ip_offload);
2378 break; 2378 break;
2379 case PROMISC_SUPPORTED: 2379 case PROMISC_SUPPORTED:
2380 adapter->promisc_supported = 2380 adapter->promisc_supported =
2381 be32_to_cpu(crq->query_capability.number); 2381 be64_to_cpu(crq->query_capability.number);
2382 netdev_dbg(netdev, "promisc_supported = %lld\n", 2382 netdev_dbg(netdev, "promisc_supported = %lld\n",
2383 adapter->promisc_supported); 2383 adapter->promisc_supported);
2384 break; 2384 break;
2385 case MIN_MTU: 2385 case MIN_MTU:
2386 adapter->min_mtu = be32_to_cpu(crq->query_capability.number); 2386 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
2387 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); 2387 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2388 break; 2388 break;
2389 case MAX_MTU: 2389 case MAX_MTU:
2390 adapter->max_mtu = be32_to_cpu(crq->query_capability.number); 2390 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
2391 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); 2391 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2392 break; 2392 break;
2393 case MAX_MULTICAST_FILTERS: 2393 case MAX_MULTICAST_FILTERS:
2394 adapter->max_multicast_filters = 2394 adapter->max_multicast_filters =
2395 be32_to_cpu(crq->query_capability.number); 2395 be64_to_cpu(crq->query_capability.number);
2396 netdev_dbg(netdev, "max_multicast_filters = %lld\n", 2396 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2397 adapter->max_multicast_filters); 2397 adapter->max_multicast_filters);
2398 break; 2398 break;
2399 case VLAN_HEADER_INSERTION: 2399 case VLAN_HEADER_INSERTION:
2400 adapter->vlan_header_insertion = 2400 adapter->vlan_header_insertion =
2401 be32_to_cpu(crq->query_capability.number); 2401 be64_to_cpu(crq->query_capability.number);
2402 if (adapter->vlan_header_insertion) 2402 if (adapter->vlan_header_insertion)
2403 netdev->features |= NETIF_F_HW_VLAN_STAG_TX; 2403 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2404 netdev_dbg(netdev, "vlan_header_insertion = %lld\n", 2404 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
@@ -2406,43 +2406,43 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2406 break; 2406 break;
2407 case MAX_TX_SG_ENTRIES: 2407 case MAX_TX_SG_ENTRIES:
2408 adapter->max_tx_sg_entries = 2408 adapter->max_tx_sg_entries =
2409 be32_to_cpu(crq->query_capability.number); 2409 be64_to_cpu(crq->query_capability.number);
2410 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", 2410 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2411 adapter->max_tx_sg_entries); 2411 adapter->max_tx_sg_entries);
2412 break; 2412 break;
2413 case RX_SG_SUPPORTED: 2413 case RX_SG_SUPPORTED:
2414 adapter->rx_sg_supported = 2414 adapter->rx_sg_supported =
2415 be32_to_cpu(crq->query_capability.number); 2415 be64_to_cpu(crq->query_capability.number);
2416 netdev_dbg(netdev, "rx_sg_supported = %lld\n", 2416 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2417 adapter->rx_sg_supported); 2417 adapter->rx_sg_supported);
2418 break; 2418 break;
2419 case OPT_TX_COMP_SUB_QUEUES: 2419 case OPT_TX_COMP_SUB_QUEUES:
2420 adapter->opt_tx_comp_sub_queues = 2420 adapter->opt_tx_comp_sub_queues =
2421 be32_to_cpu(crq->query_capability.number); 2421 be64_to_cpu(crq->query_capability.number);
2422 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", 2422 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2423 adapter->opt_tx_comp_sub_queues); 2423 adapter->opt_tx_comp_sub_queues);
2424 break; 2424 break;
2425 case OPT_RX_COMP_QUEUES: 2425 case OPT_RX_COMP_QUEUES:
2426 adapter->opt_rx_comp_queues = 2426 adapter->opt_rx_comp_queues =
2427 be32_to_cpu(crq->query_capability.number); 2427 be64_to_cpu(crq->query_capability.number);
2428 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", 2428 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2429 adapter->opt_rx_comp_queues); 2429 adapter->opt_rx_comp_queues);
2430 break; 2430 break;
2431 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: 2431 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2432 adapter->opt_rx_bufadd_q_per_rx_comp_q = 2432 adapter->opt_rx_bufadd_q_per_rx_comp_q =
2433 be32_to_cpu(crq->query_capability.number); 2433 be64_to_cpu(crq->query_capability.number);
2434 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", 2434 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2435 adapter->opt_rx_bufadd_q_per_rx_comp_q); 2435 adapter->opt_rx_bufadd_q_per_rx_comp_q);
2436 break; 2436 break;
2437 case OPT_TX_ENTRIES_PER_SUBCRQ: 2437 case OPT_TX_ENTRIES_PER_SUBCRQ:
2438 adapter->opt_tx_entries_per_subcrq = 2438 adapter->opt_tx_entries_per_subcrq =
2439 be32_to_cpu(crq->query_capability.number); 2439 be64_to_cpu(crq->query_capability.number);
2440 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", 2440 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2441 adapter->opt_tx_entries_per_subcrq); 2441 adapter->opt_tx_entries_per_subcrq);
2442 break; 2442 break;
2443 case OPT_RXBA_ENTRIES_PER_SUBCRQ: 2443 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2444 adapter->opt_rxba_entries_per_subcrq = 2444 adapter->opt_rxba_entries_per_subcrq =
2445 be32_to_cpu(crq->query_capability.number); 2445 be64_to_cpu(crq->query_capability.number);
2446 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", 2446 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2447 adapter->opt_rxba_entries_per_subcrq); 2447 adapter->opt_rxba_entries_per_subcrq);
2448 break; 2448 break;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 1242925ad34c..1a9993cc79b5 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -319,10 +319,8 @@ struct ibmvnic_capability {
319 u8 first; 319 u8 first;
320 u8 cmd; 320 u8 cmd;
321 __be16 capability; /* one of ibmvnic_capabilities */ 321 __be16 capability; /* one of ibmvnic_capabilities */
322 __be64 number;
322 struct ibmvnic_rc rc; 323 struct ibmvnic_rc rc;
323 __be32 number; /*FIX: should be __be64, but I'm getting the least
324 * significant word first
325 */
326} __packed __aligned(8); 324} __packed __aligned(8);
327 325
328struct ibmvnic_login { 326struct ibmvnic_login {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index bb4612c159fd..8f3b53e0dc46 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -7117,9 +7117,7 @@ static void i40e_service_task(struct work_struct *work)
7117 i40e_watchdog_subtask(pf); 7117 i40e_watchdog_subtask(pf);
7118 i40e_fdir_reinit_subtask(pf); 7118 i40e_fdir_reinit_subtask(pf);
7119 i40e_sync_filters_subtask(pf); 7119 i40e_sync_filters_subtask(pf);
7120#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
7121 i40e_sync_udp_filters_subtask(pf); 7120 i40e_sync_udp_filters_subtask(pf);
7122#endif
7123 i40e_clean_adminq_subtask(pf); 7121 i40e_clean_adminq_subtask(pf);
7124 7122
7125 i40e_service_event_complete(pf); 7123 i40e_service_event_complete(pf);
@@ -8515,6 +8513,8 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
8515} 8513}
8516 8514
8517#endif 8515#endif
8516
8517#if IS_ENABLED(CONFIG_VXLAN)
8518/** 8518/**
8519 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up 8519 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
8520 * @netdev: This physical port's netdev 8520 * @netdev: This physical port's netdev
@@ -8524,7 +8524,6 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
8524static void i40e_add_vxlan_port(struct net_device *netdev, 8524static void i40e_add_vxlan_port(struct net_device *netdev,
8525 sa_family_t sa_family, __be16 port) 8525 sa_family_t sa_family, __be16 port)
8526{ 8526{
8527#if IS_ENABLED(CONFIG_VXLAN)
8528 struct i40e_netdev_priv *np = netdev_priv(netdev); 8527 struct i40e_netdev_priv *np = netdev_priv(netdev);
8529 struct i40e_vsi *vsi = np->vsi; 8528 struct i40e_vsi *vsi = np->vsi;
8530 struct i40e_pf *pf = vsi->back; 8529 struct i40e_pf *pf = vsi->back;
@@ -8557,7 +8556,6 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
8557 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; 8556 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
8558 pf->pending_udp_bitmap |= BIT_ULL(next_idx); 8557 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
8559 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 8558 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8560#endif
8561} 8559}
8562 8560
8563/** 8561/**
@@ -8569,7 +8567,6 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
8569static void i40e_del_vxlan_port(struct net_device *netdev, 8567static void i40e_del_vxlan_port(struct net_device *netdev,
8570 sa_family_t sa_family, __be16 port) 8568 sa_family_t sa_family, __be16 port)
8571{ 8569{
8572#if IS_ENABLED(CONFIG_VXLAN)
8573 struct i40e_netdev_priv *np = netdev_priv(netdev); 8570 struct i40e_netdev_priv *np = netdev_priv(netdev);
8574 struct i40e_vsi *vsi = np->vsi; 8571 struct i40e_vsi *vsi = np->vsi;
8575 struct i40e_pf *pf = vsi->back; 8572 struct i40e_pf *pf = vsi->back;
@@ -8592,9 +8589,10 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
8592 netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", 8589 netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
8593 ntohs(port)); 8590 ntohs(port));
8594 } 8591 }
8595#endif
8596} 8592}
8593#endif
8597 8594
8595#if IS_ENABLED(CONFIG_GENEVE)
8598/** 8596/**
8599 * i40e_add_geneve_port - Get notifications about GENEVE ports that come up 8597 * i40e_add_geneve_port - Get notifications about GENEVE ports that come up
8600 * @netdev: This physical port's netdev 8598 * @netdev: This physical port's netdev
@@ -8604,7 +8602,6 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
8604static void i40e_add_geneve_port(struct net_device *netdev, 8602static void i40e_add_geneve_port(struct net_device *netdev,
8605 sa_family_t sa_family, __be16 port) 8603 sa_family_t sa_family, __be16 port)
8606{ 8604{
8607#if IS_ENABLED(CONFIG_GENEVE)
8608 struct i40e_netdev_priv *np = netdev_priv(netdev); 8605 struct i40e_netdev_priv *np = netdev_priv(netdev);
8609 struct i40e_vsi *vsi = np->vsi; 8606 struct i40e_vsi *vsi = np->vsi;
8610 struct i40e_pf *pf = vsi->back; 8607 struct i40e_pf *pf = vsi->back;
@@ -8639,7 +8636,6 @@ static void i40e_add_geneve_port(struct net_device *netdev,
8639 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 8636 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8640 8637
8641 dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port)); 8638 dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port));
8642#endif
8643} 8639}
8644 8640
8645/** 8641/**
@@ -8651,7 +8647,6 @@ static void i40e_add_geneve_port(struct net_device *netdev,
8651static void i40e_del_geneve_port(struct net_device *netdev, 8647static void i40e_del_geneve_port(struct net_device *netdev,
8652 sa_family_t sa_family, __be16 port) 8648 sa_family_t sa_family, __be16 port)
8653{ 8649{
8654#if IS_ENABLED(CONFIG_GENEVE)
8655 struct i40e_netdev_priv *np = netdev_priv(netdev); 8650 struct i40e_netdev_priv *np = netdev_priv(netdev);
8656 struct i40e_vsi *vsi = np->vsi; 8651 struct i40e_vsi *vsi = np->vsi;
8657 struct i40e_pf *pf = vsi->back; 8652 struct i40e_pf *pf = vsi->back;
@@ -8677,8 +8672,8 @@ static void i40e_del_geneve_port(struct net_device *netdev,
8677 netdev_warn(netdev, "geneve port %d was not found, not deleting\n", 8672 netdev_warn(netdev, "geneve port %d was not found, not deleting\n",
8678 ntohs(port)); 8673 ntohs(port));
8679 } 8674 }
8680#endif
8681} 8675}
8676#endif
8682 8677
8683static int i40e_get_phys_port_id(struct net_device *netdev, 8678static int i40e_get_phys_port_id(struct net_device *netdev,
8684 struct netdev_phys_item_id *ppid) 8679 struct netdev_phys_item_id *ppid)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 720516b0e8ee..47bd8b3145a7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2313,8 +2313,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2313 struct iphdr *this_ip_hdr; 2313 struct iphdr *this_ip_hdr;
2314 u32 network_hdr_len; 2314 u32 network_hdr_len;
2315 u8 l4_hdr = 0; 2315 u8 l4_hdr = 0;
2316 struct udphdr *oudph; 2316 struct udphdr *oudph = NULL;
2317 struct iphdr *oiph; 2317 struct iphdr *oiph = NULL;
2318 u32 l4_tunnel = 0; 2318 u32 l4_tunnel = 0;
2319 2319
2320 if (skb->encapsulation) { 2320 if (skb->encapsulation) {
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b1de7afd4116..3ddf657bc10b 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -270,11 +270,17 @@ jme_reset_mac_processor(struct jme_adapter *jme)
270} 270}
271 271
272static inline void 272static inline void
273jme_clear_pm(struct jme_adapter *jme) 273jme_clear_pm_enable_wol(struct jme_adapter *jme)
274{ 274{
275 jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs); 275 jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
276} 276}
277 277
278static inline void
279jme_clear_pm_disable_wol(struct jme_adapter *jme)
280{
281 jwrite32(jme, JME_PMCS, PMCS_STMASK);
282}
283
278static int 284static int
279jme_reload_eeprom(struct jme_adapter *jme) 285jme_reload_eeprom(struct jme_adapter *jme)
280{ 286{
@@ -1853,7 +1859,7 @@ jme_open(struct net_device *netdev)
1853 struct jme_adapter *jme = netdev_priv(netdev); 1859 struct jme_adapter *jme = netdev_priv(netdev);
1854 int rc; 1860 int rc;
1855 1861
1856 jme_clear_pm(jme); 1862 jme_clear_pm_disable_wol(jme);
1857 JME_NAPI_ENABLE(jme); 1863 JME_NAPI_ENABLE(jme);
1858 1864
1859 tasklet_init(&jme->linkch_task, jme_link_change_tasklet, 1865 tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
@@ -1925,11 +1931,11 @@ jme_wait_link(struct jme_adapter *jme)
1925static void 1931static void
1926jme_powersave_phy(struct jme_adapter *jme) 1932jme_powersave_phy(struct jme_adapter *jme)
1927{ 1933{
1928 if (jme->reg_pmcs) { 1934 if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) {
1929 jme_set_100m_half(jme); 1935 jme_set_100m_half(jme);
1930 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) 1936 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
1931 jme_wait_link(jme); 1937 jme_wait_link(jme);
1932 jme_clear_pm(jme); 1938 jme_clear_pm_enable_wol(jme);
1933 } else { 1939 } else {
1934 jme_phy_off(jme); 1940 jme_phy_off(jme);
1935 } 1941 }
@@ -2646,9 +2652,6 @@ jme_set_wol(struct net_device *netdev,
2646 if (wol->wolopts & WAKE_MAGIC) 2652 if (wol->wolopts & WAKE_MAGIC)
2647 jme->reg_pmcs |= PMCS_MFEN; 2653 jme->reg_pmcs |= PMCS_MFEN;
2648 2654
2649 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
2650 device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
2651
2652 return 0; 2655 return 0;
2653} 2656}
2654 2657
@@ -3172,8 +3175,8 @@ jme_init_one(struct pci_dev *pdev,
3172 jme->mii_if.mdio_read = jme_mdio_read; 3175 jme->mii_if.mdio_read = jme_mdio_read;
3173 jme->mii_if.mdio_write = jme_mdio_write; 3176 jme->mii_if.mdio_write = jme_mdio_write;
3174 3177
3175 jme_clear_pm(jme); 3178 jme_clear_pm_disable_wol(jme);
3176 device_set_wakeup_enable(&pdev->dev, true); 3179 device_init_wakeup(&pdev->dev, true);
3177 3180
3178 jme_set_phyfifo_5level(jme); 3181 jme_set_phyfifo_5level(jme);
3179 jme->pcirev = pdev->revision; 3182 jme->pcirev = pdev->revision;
@@ -3304,7 +3307,7 @@ jme_resume(struct device *dev)
3304 if (!netif_running(netdev)) 3307 if (!netif_running(netdev))
3305 return 0; 3308 return 0;
3306 3309
3307 jme_clear_pm(jme); 3310 jme_clear_pm_disable_wol(jme);
3308 jme_phy_on(jme); 3311 jme_phy_on(jme);
3309 if (test_bit(JME_FLAG_SSET, &jme->flags)) 3312 if (test_bit(JME_FLAG_SSET, &jme->flags))
3310 jme_set_settings(netdev, &jme->old_ecmd); 3313 jme_set_settings(netdev, &jme->old_ecmd);
@@ -3312,13 +3315,14 @@ jme_resume(struct device *dev)
3312 jme_reset_phy_processor(jme); 3315 jme_reset_phy_processor(jme);
3313 jme_phy_calibration(jme); 3316 jme_phy_calibration(jme);
3314 jme_phy_setEA(jme); 3317 jme_phy_setEA(jme);
3315 jme_start_irq(jme);
3316 netif_device_attach(netdev); 3318 netif_device_attach(netdev);
3317 3319
3318 atomic_inc(&jme->link_changing); 3320 atomic_inc(&jme->link_changing);
3319 3321
3320 jme_reset_link(jme); 3322 jme_reset_link(jme);
3321 3323
3324 jme_start_irq(jme);
3325
3322 return 0; 3326 return 0;
3323} 3327}
3324 3328
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index a0c03834a2f7..55831188bc32 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -762,10 +762,10 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
762 762
763 if (length <= 8 && (uintptr_t)data & 0x7) { 763 if (length <= 8 && (uintptr_t)data & 0x7) {
764 /* Copy unaligned small data fragment to TSO header data area */ 764 /* Copy unaligned small data fragment to TSO header data area */
765 memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE, 765 memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE,
766 data, length); 766 data, length);
767 desc->buf_ptr = txq->tso_hdrs_dma 767 desc->buf_ptr = txq->tso_hdrs_dma
768 + txq->tx_curr_desc * TSO_HEADER_SIZE; 768 + tx_index * TSO_HEADER_SIZE;
769 } else { 769 } else {
770 /* Alignment is okay, map buffer and hand off to hardware */ 770 /* Alignment is okay, map buffer and hand off to hardware */
771 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; 771 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index fabc8df40392..b0ae69f84493 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -11,28 +11,28 @@
11 * warranty of any kind, whether express or implied. 11 * warranty of any kind, whether express or implied.
12 */ 12 */
13 13
14#include <linux/kernel.h> 14#include <linux/clk.h>
15#include <linux/netdevice.h> 15#include <linux/cpu.h>
16#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
17#include <linux/platform_device.h> 17#include <linux/if_vlan.h>
18#include <linux/skbuff.h>
19#include <linux/inetdevice.h> 18#include <linux/inetdevice.h>
20#include <linux/mbus.h>
21#include <linux/module.h>
22#include <linux/interrupt.h> 19#include <linux/interrupt.h>
23#include <linux/if_vlan.h>
24#include <net/ip.h>
25#include <net/ipv6.h>
26#include <linux/io.h> 20#include <linux/io.h>
27#include <net/tso.h> 21#include <linux/kernel.h>
22#include <linux/mbus.h>
23#include <linux/module.h>
24#include <linux/netdevice.h>
28#include <linux/of.h> 25#include <linux/of.h>
26#include <linux/of_address.h>
29#include <linux/of_irq.h> 27#include <linux/of_irq.h>
30#include <linux/of_mdio.h> 28#include <linux/of_mdio.h>
31#include <linux/of_net.h> 29#include <linux/of_net.h>
32#include <linux/of_address.h>
33#include <linux/phy.h> 30#include <linux/phy.h>
34#include <linux/clk.h> 31#include <linux/platform_device.h>
35#include <linux/cpu.h> 32#include <linux/skbuff.h>
33#include <net/ip.h>
34#include <net/ipv6.h>
35#include <net/tso.h>
36 36
37/* Registers */ 37/* Registers */
38#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) 38#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
@@ -370,9 +370,16 @@ struct mvneta_port {
370 struct net_device *dev; 370 struct net_device *dev;
371 struct notifier_block cpu_notifier; 371 struct notifier_block cpu_notifier;
372 int rxq_def; 372 int rxq_def;
373 /* Protect the access to the percpu interrupt registers,
374 * ensuring that the configuration remains coherent.
375 */
376 spinlock_t lock;
377 bool is_stopped;
373 378
374 /* Core clock */ 379 /* Core clock */
375 struct clk *clk; 380 struct clk *clk;
381 /* AXI clock */
382 struct clk *clk_bus;
376 u8 mcast_count[256]; 383 u8 mcast_count[256];
377 u16 tx_ring_size; 384 u16 tx_ring_size;
378 u16 rx_ring_size; 385 u16 rx_ring_size;
@@ -1036,6 +1043,43 @@ static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
1036 } 1043 }
1037} 1044}
1038 1045
1046static void mvneta_percpu_unmask_interrupt(void *arg)
1047{
1048 struct mvneta_port *pp = arg;
1049
1050 /* All the queue are unmasked, but actually only the ones
1051 * mapped to this CPU will be unmasked
1052 */
1053 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1054 MVNETA_RX_INTR_MASK_ALL |
1055 MVNETA_TX_INTR_MASK_ALL |
1056 MVNETA_MISCINTR_INTR_MASK);
1057}
1058
1059static void mvneta_percpu_mask_interrupt(void *arg)
1060{
1061 struct mvneta_port *pp = arg;
1062
1063 /* All the queue are masked, but actually only the ones
1064 * mapped to this CPU will be masked
1065 */
1066 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1067 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1068 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1069}
1070
1071static void mvneta_percpu_clear_intr_cause(void *arg)
1072{
1073 struct mvneta_port *pp = arg;
1074
1075 /* All the queue are cleared, but actually only the ones
1076 * mapped to this CPU will be cleared
1077 */
1078 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1079 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1080 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1081}
1082
1039/* This method sets defaults to the NETA port: 1083/* This method sets defaults to the NETA port:
1040 * Clears interrupt Cause and Mask registers. 1084 * Clears interrupt Cause and Mask registers.
1041 * Clears all MAC tables. 1085 * Clears all MAC tables.
@@ -1053,14 +1097,10 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
1053 int max_cpu = num_present_cpus(); 1097 int max_cpu = num_present_cpus();
1054 1098
1055 /* Clear all Cause registers */ 1099 /* Clear all Cause registers */
1056 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); 1100 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1057 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1058 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1059 1101
1060 /* Mask all interrupts */ 1102 /* Mask all interrupts */
1061 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 1103 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1062 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1063 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1064 mvreg_write(pp, MVNETA_INTR_ENABLE, 0); 1104 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1065 1105
1066 /* Enable MBUS Retry bit16 */ 1106 /* Enable MBUS Retry bit16 */
@@ -2526,34 +2566,9 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
2526 return 0; 2566 return 0;
2527} 2567}
2528 2568
2529static void mvneta_percpu_unmask_interrupt(void *arg)
2530{
2531 struct mvneta_port *pp = arg;
2532
2533 /* All the queue are unmasked, but actually only the ones
2534 * maped to this CPU will be unmasked
2535 */
2536 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2537 MVNETA_RX_INTR_MASK_ALL |
2538 MVNETA_TX_INTR_MASK_ALL |
2539 MVNETA_MISCINTR_INTR_MASK);
2540}
2541
2542static void mvneta_percpu_mask_interrupt(void *arg)
2543{
2544 struct mvneta_port *pp = arg;
2545
2546 /* All the queue are masked, but actually only the ones
2547 * maped to this CPU will be masked
2548 */
2549 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2550 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2551 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2552}
2553
2554static void mvneta_start_dev(struct mvneta_port *pp) 2569static void mvneta_start_dev(struct mvneta_port *pp)
2555{ 2570{
2556 unsigned int cpu; 2571 int cpu;
2557 2572
2558 mvneta_max_rx_size_set(pp, pp->pkt_size); 2573 mvneta_max_rx_size_set(pp, pp->pkt_size);
2559 mvneta_txq_max_tx_size_set(pp, pp->pkt_size); 2574 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -2562,16 +2577,15 @@ static void mvneta_start_dev(struct mvneta_port *pp)
2562 mvneta_port_enable(pp); 2577 mvneta_port_enable(pp);
2563 2578
2564 /* Enable polling on the port */ 2579 /* Enable polling on the port */
2565 for_each_present_cpu(cpu) { 2580 for_each_online_cpu(cpu) {
2566 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 2581 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2567 2582
2568 napi_enable(&port->napi); 2583 napi_enable(&port->napi);
2569 } 2584 }
2570 2585
2571 /* Unmask interrupts. It has to be done from each CPU */ 2586 /* Unmask interrupts. It has to be done from each CPU */
2572 for_each_online_cpu(cpu) 2587 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
2573 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, 2588
2574 pp, true);
2575 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 2589 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2576 MVNETA_CAUSE_PHY_STATUS_CHANGE | 2590 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2577 MVNETA_CAUSE_LINK_CHANGE | 2591 MVNETA_CAUSE_LINK_CHANGE |
@@ -2587,7 +2601,7 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
2587 2601
2588 phy_stop(pp->phy_dev); 2602 phy_stop(pp->phy_dev);
2589 2603
2590 for_each_present_cpu(cpu) { 2604 for_each_online_cpu(cpu) {
2591 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 2605 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2592 2606
2593 napi_disable(&port->napi); 2607 napi_disable(&port->napi);
@@ -2602,13 +2616,10 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
2602 mvneta_port_disable(pp); 2616 mvneta_port_disable(pp);
2603 2617
2604 /* Clear all ethernet port interrupts */ 2618 /* Clear all ethernet port interrupts */
2605 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 2619 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
2606 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2607 2620
2608 /* Mask all ethernet port interrupts */ 2621 /* Mask all ethernet port interrupts */
2609 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2622 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
2610 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2611 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2612 2623
2613 mvneta_tx_reset(pp); 2624 mvneta_tx_reset(pp);
2614 mvneta_rx_reset(pp); 2625 mvneta_rx_reset(pp);
@@ -2845,11 +2856,20 @@ static void mvneta_percpu_disable(void *arg)
2845 disable_percpu_irq(pp->dev->irq); 2856 disable_percpu_irq(pp->dev->irq);
2846} 2857}
2847 2858
2859/* Electing a CPU must be done in an atomic way: it should be done
2860 * after or before the removal/insertion of a CPU and this function is
2861 * not reentrant.
2862 */
2848static void mvneta_percpu_elect(struct mvneta_port *pp) 2863static void mvneta_percpu_elect(struct mvneta_port *pp)
2849{ 2864{
2850 int online_cpu_idx, max_cpu, cpu, i = 0; 2865 int elected_cpu = 0, max_cpu, cpu, i = 0;
2866
2867 /* Use the cpu associated to the rxq when it is online, in all
2868 * the other cases, use the cpu 0 which can't be offline.
2869 */
2870 if (cpu_online(pp->rxq_def))
2871 elected_cpu = pp->rxq_def;
2851 2872
2852 online_cpu_idx = pp->rxq_def % num_online_cpus();
2853 max_cpu = num_present_cpus(); 2873 max_cpu = num_present_cpus();
2854 2874
2855 for_each_online_cpu(cpu) { 2875 for_each_online_cpu(cpu) {
@@ -2860,7 +2880,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
2860 if ((rxq % max_cpu) == cpu) 2880 if ((rxq % max_cpu) == cpu)
2861 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); 2881 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
2862 2882
2863 if (i == online_cpu_idx) 2883 if (cpu == elected_cpu)
2864 /* Map the default receive queue queue to the 2884 /* Map the default receive queue queue to the
2865 * elected CPU 2885 * elected CPU
2866 */ 2886 */
@@ -2871,7 +2891,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
2871 * the CPU bound to the default RX queue 2891 * the CPU bound to the default RX queue
2872 */ 2892 */
2873 if (txq_number == 1) 2893 if (txq_number == 1)
2874 txq_map = (i == online_cpu_idx) ? 2894 txq_map = (cpu == elected_cpu) ?
2875 MVNETA_CPU_TXQ_ACCESS(1) : 0; 2895 MVNETA_CPU_TXQ_ACCESS(1) : 0;
2876 else 2896 else
2877 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & 2897 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
@@ -2900,6 +2920,14 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2900 switch (action) { 2920 switch (action) {
2901 case CPU_ONLINE: 2921 case CPU_ONLINE:
2902 case CPU_ONLINE_FROZEN: 2922 case CPU_ONLINE_FROZEN:
2923 spin_lock(&pp->lock);
2924 /* Configuring the driver for a new CPU while the
2925 * driver is stopping is racy, so just avoid it.
2926 */
2927 if (pp->is_stopped) {
2928 spin_unlock(&pp->lock);
2929 break;
2930 }
2903 netif_tx_stop_all_queues(pp->dev); 2931 netif_tx_stop_all_queues(pp->dev);
2904 2932
2905 /* We have to synchronise on tha napi of each CPU 2933 /* We have to synchronise on tha napi of each CPU
@@ -2915,9 +2943,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2915 } 2943 }
2916 2944
2917 /* Mask all ethernet port interrupts */ 2945 /* Mask all ethernet port interrupts */
2918 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2946 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
2919 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2920 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2921 napi_enable(&port->napi); 2947 napi_enable(&port->napi);
2922 2948
2923 2949
@@ -2932,27 +2958,25 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2932 */ 2958 */
2933 mvneta_percpu_elect(pp); 2959 mvneta_percpu_elect(pp);
2934 2960
2935 /* Unmask all ethernet port interrupts, as this 2961 /* Unmask all ethernet port interrupts */
2936 * notifier is called for each CPU then the CPU to 2962 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
2937 * Queue mapping is applied
2938 */
2939 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2940 MVNETA_RX_INTR_MASK(rxq_number) |
2941 MVNETA_TX_INTR_MASK(txq_number) |
2942 MVNETA_MISCINTR_INTR_MASK);
2943 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 2963 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2944 MVNETA_CAUSE_PHY_STATUS_CHANGE | 2964 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2945 MVNETA_CAUSE_LINK_CHANGE | 2965 MVNETA_CAUSE_LINK_CHANGE |
2946 MVNETA_CAUSE_PSC_SYNC_CHANGE); 2966 MVNETA_CAUSE_PSC_SYNC_CHANGE);
2947 netif_tx_start_all_queues(pp->dev); 2967 netif_tx_start_all_queues(pp->dev);
2968 spin_unlock(&pp->lock);
2948 break; 2969 break;
2949 case CPU_DOWN_PREPARE: 2970 case CPU_DOWN_PREPARE:
2950 case CPU_DOWN_PREPARE_FROZEN: 2971 case CPU_DOWN_PREPARE_FROZEN:
2951 netif_tx_stop_all_queues(pp->dev); 2972 netif_tx_stop_all_queues(pp->dev);
2973 /* Thanks to this lock we are sure that any pending
2974 * cpu election is done
2975 */
2976 spin_lock(&pp->lock);
2952 /* Mask all ethernet port interrupts */ 2977 /* Mask all ethernet port interrupts */
2953 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2978 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
2954 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 2979 spin_unlock(&pp->lock);
2955 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2956 2980
2957 napi_synchronize(&port->napi); 2981 napi_synchronize(&port->napi);
2958 napi_disable(&port->napi); 2982 napi_disable(&port->napi);
@@ -2966,12 +2990,11 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2966 case CPU_DEAD: 2990 case CPU_DEAD:
2967 case CPU_DEAD_FROZEN: 2991 case CPU_DEAD_FROZEN:
2968 /* Check if a new CPU must be elected now this on is down */ 2992 /* Check if a new CPU must be elected now this on is down */
2993 spin_lock(&pp->lock);
2969 mvneta_percpu_elect(pp); 2994 mvneta_percpu_elect(pp);
2995 spin_unlock(&pp->lock);
2970 /* Unmask all ethernet port interrupts */ 2996 /* Unmask all ethernet port interrupts */
2971 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 2997 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
2972 MVNETA_RX_INTR_MASK(rxq_number) |
2973 MVNETA_TX_INTR_MASK(txq_number) |
2974 MVNETA_MISCINTR_INTR_MASK);
2975 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 2998 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2976 MVNETA_CAUSE_PHY_STATUS_CHANGE | 2999 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2977 MVNETA_CAUSE_LINK_CHANGE | 3000 MVNETA_CAUSE_LINK_CHANGE |
@@ -2986,7 +3009,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2986static int mvneta_open(struct net_device *dev) 3009static int mvneta_open(struct net_device *dev)
2987{ 3010{
2988 struct mvneta_port *pp = netdev_priv(dev); 3011 struct mvneta_port *pp = netdev_priv(dev);
2989 int ret, cpu; 3012 int ret;
2990 3013
2991 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 3014 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2992 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + 3015 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
@@ -3008,22 +3031,12 @@ static int mvneta_open(struct net_device *dev)
3008 goto err_cleanup_txqs; 3031 goto err_cleanup_txqs;
3009 } 3032 }
3010 3033
3011 /* Even though the documentation says that request_percpu_irq
3012 * doesn't enable the interrupts automatically, it actually
3013 * does so on the local CPU.
3014 *
3015 * Make sure it's disabled.
3016 */
3017 mvneta_percpu_disable(pp);
3018
3019 /* Enable per-CPU interrupt on all the CPU to handle our RX 3034 /* Enable per-CPU interrupt on all the CPU to handle our RX
3020 * queue interrupts 3035 * queue interrupts
3021 */ 3036 */
3022 for_each_online_cpu(cpu) 3037 on_each_cpu(mvneta_percpu_enable, pp, true);
3023 smp_call_function_single(cpu, mvneta_percpu_enable,
3024 pp, true);
3025
3026 3038
3039 pp->is_stopped = false;
3027 /* Register a CPU notifier to handle the case where our CPU 3040 /* Register a CPU notifier to handle the case where our CPU
3028 * might be taken offline. 3041 * might be taken offline.
3029 */ 3042 */
@@ -3055,13 +3068,20 @@ err_cleanup_rxqs:
3055static int mvneta_stop(struct net_device *dev) 3068static int mvneta_stop(struct net_device *dev)
3056{ 3069{
3057 struct mvneta_port *pp = netdev_priv(dev); 3070 struct mvneta_port *pp = netdev_priv(dev);
3058 int cpu;
3059 3071
3072 /* Inform that we are stopping so we don't want to setup the
3073 * driver for new CPUs in the notifiers
3074 */
3075 spin_lock(&pp->lock);
3076 pp->is_stopped = true;
3060 mvneta_stop_dev(pp); 3077 mvneta_stop_dev(pp);
3061 mvneta_mdio_remove(pp); 3078 mvneta_mdio_remove(pp);
3062 unregister_cpu_notifier(&pp->cpu_notifier); 3079 unregister_cpu_notifier(&pp->cpu_notifier);
3063 for_each_present_cpu(cpu) 3080 /* Now that the notifier are unregistered, we can release le
3064 smp_call_function_single(cpu, mvneta_percpu_disable, pp, true); 3081 * lock
3082 */
3083 spin_unlock(&pp->lock);
3084 on_each_cpu(mvneta_percpu_disable, pp, true);
3065 free_percpu_irq(dev->irq, pp->ports); 3085 free_percpu_irq(dev->irq, pp->ports);
3066 mvneta_cleanup_rxqs(pp); 3086 mvneta_cleanup_rxqs(pp);
3067 mvneta_cleanup_txqs(pp); 3087 mvneta_cleanup_txqs(pp);
@@ -3242,26 +3262,25 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
3242 const struct mvneta_statistic *s; 3262 const struct mvneta_statistic *s;
3243 void __iomem *base = pp->base; 3263 void __iomem *base = pp->base;
3244 u32 high, low, val; 3264 u32 high, low, val;
3265 u64 val64;
3245 int i; 3266 int i;
3246 3267
3247 for (i = 0, s = mvneta_statistics; 3268 for (i = 0, s = mvneta_statistics;
3248 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); 3269 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
3249 s++, i++) { 3270 s++, i++) {
3250 val = 0;
3251
3252 switch (s->type) { 3271 switch (s->type) {
3253 case T_REG_32: 3272 case T_REG_32:
3254 val = readl_relaxed(base + s->offset); 3273 val = readl_relaxed(base + s->offset);
3274 pp->ethtool_stats[i] += val;
3255 break; 3275 break;
3256 case T_REG_64: 3276 case T_REG_64:
3257 /* Docs say to read low 32-bit then high */ 3277 /* Docs say to read low 32-bit then high */
3258 low = readl_relaxed(base + s->offset); 3278 low = readl_relaxed(base + s->offset);
3259 high = readl_relaxed(base + s->offset + 4); 3279 high = readl_relaxed(base + s->offset + 4);
3260 val = (u64)high << 32 | low; 3280 val64 = (u64)high << 32 | low;
3281 pp->ethtool_stats[i] += val64;
3261 break; 3282 break;
3262 } 3283 }
3263
3264 pp->ethtool_stats[i] += val;
3265 } 3284 }
3266} 3285}
3267 3286
@@ -3311,9 +3330,7 @@ static int mvneta_config_rss(struct mvneta_port *pp)
3311 3330
3312 netif_tx_stop_all_queues(pp->dev); 3331 netif_tx_stop_all_queues(pp->dev);
3313 3332
3314 for_each_online_cpu(cpu) 3333 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3315 smp_call_function_single(cpu, mvneta_percpu_mask_interrupt,
3316 pp, true);
3317 3334
3318 /* We have to synchronise on the napi of each CPU */ 3335 /* We have to synchronise on the napi of each CPU */
3319 for_each_online_cpu(cpu) { 3336 for_each_online_cpu(cpu) {
@@ -3334,7 +3351,9 @@ static int mvneta_config_rss(struct mvneta_port *pp)
3334 mvreg_write(pp, MVNETA_PORT_CONFIG, val); 3351 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
3335 3352
3336 /* Update the elected CPU matching the new rxq_def */ 3353 /* Update the elected CPU matching the new rxq_def */
3354 spin_lock(&pp->lock);
3337 mvneta_percpu_elect(pp); 3355 mvneta_percpu_elect(pp);
3356 spin_unlock(&pp->lock);
3338 3357
3339 /* We have to synchronise on the napi of each CPU */ 3358 /* We have to synchronise on the napi of each CPU */
3340 for_each_online_cpu(cpu) { 3359 for_each_online_cpu(cpu) {
@@ -3605,7 +3624,9 @@ static int mvneta_probe(struct platform_device *pdev)
3605 3624
3606 pp->indir[0] = rxq_def; 3625 pp->indir[0] = rxq_def;
3607 3626
3608 pp->clk = devm_clk_get(&pdev->dev, NULL); 3627 pp->clk = devm_clk_get(&pdev->dev, "core");
3628 if (IS_ERR(pp->clk))
3629 pp->clk = devm_clk_get(&pdev->dev, NULL);
3609 if (IS_ERR(pp->clk)) { 3630 if (IS_ERR(pp->clk)) {
3610 err = PTR_ERR(pp->clk); 3631 err = PTR_ERR(pp->clk);
3611 goto err_put_phy_node; 3632 goto err_put_phy_node;
@@ -3613,6 +3634,10 @@ static int mvneta_probe(struct platform_device *pdev)
3613 3634
3614 clk_prepare_enable(pp->clk); 3635 clk_prepare_enable(pp->clk);
3615 3636
3637 pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
3638 if (!IS_ERR(pp->clk_bus))
3639 clk_prepare_enable(pp->clk_bus);
3640
3616 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3641 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3617 pp->base = devm_ioremap_resource(&pdev->dev, res); 3642 pp->base = devm_ioremap_resource(&pdev->dev, res);
3618 if (IS_ERR(pp->base)) { 3643 if (IS_ERR(pp->base)) {
@@ -3724,6 +3749,7 @@ err_free_stats:
3724err_free_ports: 3749err_free_ports:
3725 free_percpu(pp->ports); 3750 free_percpu(pp->ports);
3726err_clk: 3751err_clk:
3752 clk_disable_unprepare(pp->clk_bus);
3727 clk_disable_unprepare(pp->clk); 3753 clk_disable_unprepare(pp->clk);
3728err_put_phy_node: 3754err_put_phy_node:
3729 of_node_put(phy_node); 3755 of_node_put(phy_node);
@@ -3741,6 +3767,7 @@ static int mvneta_remove(struct platform_device *pdev)
3741 struct mvneta_port *pp = netdev_priv(dev); 3767 struct mvneta_port *pp = netdev_priv(dev);
3742 3768
3743 unregister_netdev(dev); 3769 unregister_netdev(dev);
3770 clk_disable_unprepare(pp->clk_bus);
3744 clk_disable_unprepare(pp->clk); 3771 clk_disable_unprepare(pp->clk);
3745 free_percpu(pp->ports); 3772 free_percpu(pp->ports);
3746 free_percpu(pp->stats); 3773 free_percpu(pp->stats);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index a4beccf1fd46..c797971aefab 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -3061,7 +3061,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3061 3061
3062 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 3062 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3063 if (!pe) 3063 if (!pe)
3064 return -1; 3064 return -ENOMEM;
3065 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 3065 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3066 pe->index = tid; 3066 pe->index = tid;
3067 3067
@@ -3077,7 +3077,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3077 if (pmap == 0) { 3077 if (pmap == 0) {
3078 if (add) { 3078 if (add) {
3079 kfree(pe); 3079 kfree(pe);
3080 return -1; 3080 return -EINVAL;
3081 } 3081 }
3082 mvpp2_prs_hw_inv(priv, pe->index); 3082 mvpp2_prs_hw_inv(priv, pe->index);
3083 priv->prs_shadow[pe->index].valid = false; 3083 priv->prs_shadow[pe->index].valid = false;
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 715de8affcc9..c7e939945259 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -182,10 +182,17 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
182 err = mlx4_reset_slave(dev); 182 err = mlx4_reset_slave(dev);
183 else 183 else
184 err = mlx4_reset_master(dev); 184 err = mlx4_reset_master(dev);
185 BUG_ON(err != 0);
186 185
186 if (!err) {
187 mlx4_err(dev, "device was reset successfully\n");
188 } else {
189 /* EEH could have disabled the PCI channel during reset. That's
190 * recoverable and the PCI error flow will handle it.
191 */
192 if (!pci_channel_offline(dev->persist->pdev))
193 BUG_ON(1);
194 }
187 dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR; 195 dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR;
188 mlx4_err(dev, "device was reset successfully\n");
189 mutex_unlock(&persist->device_state_mutex); 196 mutex_unlock(&persist->device_state_mutex);
190 197
191 /* At that step HW was already reset, now notify clients */ 198 /* At that step HW was already reset, now notify clients */
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index d48d5793407d..e94ca1c3fc7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2429,7 +2429,7 @@ err_thread:
2429 flush_workqueue(priv->mfunc.master.comm_wq); 2429 flush_workqueue(priv->mfunc.master.comm_wq);
2430 destroy_workqueue(priv->mfunc.master.comm_wq); 2430 destroy_workqueue(priv->mfunc.master.comm_wq);
2431err_slaves: 2431err_slaves:
2432 while (--i) { 2432 while (i--) {
2433 for (port = 1; port <= MLX4_MAX_PORTS; port++) 2433 for (port = 1; port <= MLX4_MAX_PORTS; port++)
2434 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); 2434 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2435 } 2435 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 3348e646db70..a849da92f857 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -318,7 +318,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
318 if (timestamp_en) 318 if (timestamp_en)
319 cq_context->flags |= cpu_to_be32(1 << 19); 319 cq_context->flags |= cpu_to_be32(1 << 19);
320 320
321 cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); 321 cq_context->logsize_usrpage =
322 cpu_to_be32((ilog2(nent) << 24) |
323 mlx4_to_hw_uar_index(dev, uar->index));
322 cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn; 324 cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
323 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; 325 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
324 326
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 038f9ce391e6..1494997c4f7e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -236,6 +236,24 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
236 .enable = mlx4_en_phc_enable, 236 .enable = mlx4_en_phc_enable,
237}; 237};
238 238
239#define MLX4_EN_WRAP_AROUND_SEC 10ULL
240
241/* This function calculates the max shift that enables the user range
242 * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
243 */
244static u32 freq_to_shift(u16 freq)
245{
246 u32 freq_khz = freq * 1000;
247 u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
248 u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
249 max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
250 /* calculate max possible multiplier in order to fit in 64bit */
251 u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
252
253 /* This comes from the reverse of clocksource_khz2mult */
254 return ilog2(div_u64(max_mul * freq_khz, 1000000));
255}
256
239void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) 257void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
240{ 258{
241 struct mlx4_dev *dev = mdev->dev; 259 struct mlx4_dev *dev = mdev->dev;
@@ -254,12 +272,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
254 memset(&mdev->cycles, 0, sizeof(mdev->cycles)); 272 memset(&mdev->cycles, 0, sizeof(mdev->cycles));
255 mdev->cycles.read = mlx4_en_read_clock; 273 mdev->cycles.read = mlx4_en_read_clock;
256 mdev->cycles.mask = CLOCKSOURCE_MASK(48); 274 mdev->cycles.mask = CLOCKSOURCE_MASK(48);
257 /* Using shift to make calculation more accurate. Since current HW 275 mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
258 * clock frequency is 427 MHz, and cycles are given using a 48 bits
259 * register, the biggest shift when calculating using u64, is 14
260 * (max_cycles * multiplier < 2^64)
261 */
262 mdev->cycles.shift = 14;
263 mdev->cycles.mult = 276 mdev->cycles.mult =
264 clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); 277 clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
265 mdev->nominal_c_mult = mdev->cycles.mult; 278 mdev->nominal_c_mult = mdev->cycles.mult;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 0c7e3f69a73b..21e2c0960271 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2245,7 +2245,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2245 struct mlx4_en_dev *mdev = en_priv->mdev; 2245 struct mlx4_en_dev *mdev = en_priv->mdev;
2246 u64 mac_u64 = mlx4_mac_to_u64(mac); 2246 u64 mac_u64 = mlx4_mac_to_u64(mac);
2247 2247
2248 if (!is_valid_ether_addr(mac)) 2248 if (is_multicast_ether_addr(mac))
2249 return -EINVAL; 2249 return -EINVAL;
2250 2250
2251 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); 2251 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
@@ -2344,8 +2344,6 @@ out:
2344 /* set offloads */ 2344 /* set offloads */
2345 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 2345 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2346 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; 2346 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
2347 priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2348 priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
2349} 2347}
2350 2348
2351static void mlx4_en_del_vxlan_offloads(struct work_struct *work) 2349static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2356,8 +2354,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2356 /* unset offloads */ 2354 /* unset offloads */
2357 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 2355 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2358 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL); 2356 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
2359 priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
2360 priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
2361 2357
2362 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2358 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2363 VXLAN_STEER_BY_OUTER_MAC, 0); 2359 VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2980,6 +2976,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2980 priv->rss_hash_fn = ETH_RSS_HASH_TOP; 2976 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
2981 } 2977 }
2982 2978
2979 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
2980 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2981 dev->features |= NETIF_F_GSO_UDP_TUNNEL;
2982 }
2983
2983 mdev->pndev[port] = dev; 2984 mdev->pndev[port] = dev;
2984 mdev->upper[port] = NULL; 2985 mdev->upper[port] = NULL;
2985 2986
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index ee99e67187f5..3904b5fc0b7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -238,11 +238,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
238 stats->collisions = 0; 238 stats->collisions = 0;
239 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP); 239 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
240 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); 240 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
241 stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); 241 stats->rx_over_errors = 0;
242 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); 242 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
243 stats->rx_frame_errors = 0; 243 stats->rx_frame_errors = 0;
244 stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); 244 stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
245 stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); 245 stats->rx_missed_errors = 0;
246 stats->tx_aborted_errors = 0; 246 stats->tx_aborted_errors = 0;
247 stats->tx_carrier_errors = 0; 247 stats->tx_carrier_errors = 0;
248 stats->tx_fifo_errors = 0; 248 stats->tx_fifo_errors = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 12aab5a659d3..02e925d6f734 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -58,7 +58,8 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
58 } else { 58 } else {
59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4; 59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
60 } 60 }
61 context->usr_page = cpu_to_be32(mdev->priv_uar.index); 61 context->usr_page = cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
62 mdev->priv_uar.index));
62 context->local_qpn = cpu_to_be32(qpn); 63 context->local_qpn = cpu_to_be32(qpn);
63 context->pri_path.ackto = 1 & 0x07; 64 context->pri_path.ackto = 1 & 0x07;
64 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; 65 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 4421bf5463f6..e0946ab22010 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -213,7 +213,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
213 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 213 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
214 ring->cqn, user_prio, &ring->context); 214 ring->cqn, user_prio, &ring->context);
215 if (ring->bf_alloced) 215 if (ring->bf_alloced)
216 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); 216 ring->context.usr_page =
217 cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
218 ring->bf.uar->index));
217 219
218 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, 220 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
219 &ring->qp, &ring->qp_state); 221 &ring->qp, &ring->qp_state);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 4696053165f8..f613977455e0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -940,9 +940,10 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
940 940
941 if (!priv->eq_table.uar_map[index]) { 941 if (!priv->eq_table.uar_map[index]) {
942 priv->eq_table.uar_map[index] = 942 priv->eq_table.uar_map[index] =
943 ioremap(pci_resource_start(dev->persist->pdev, 2) + 943 ioremap(
944 ((eq->eqn / 4) << PAGE_SHIFT), 944 pci_resource_start(dev->persist->pdev, 2) +
945 PAGE_SIZE); 945 ((eq->eqn / 4) << (dev->uar_page_shift)),
946 (1 << (dev->uar_page_shift)));
946 if (!priv->eq_table.uar_map[index]) { 947 if (!priv->eq_table.uar_map[index]) {
947 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", 948 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
948 eq->eqn); 949 eq->eqn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index f1b6d219e445..f8674ae62752 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -168,6 +168,20 @@ struct mlx4_port_config {
168 168
169static atomic_t pf_loading = ATOMIC_INIT(0); 169static atomic_t pf_loading = ATOMIC_INIT(0);
170 170
171static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
172 struct mlx4_dev_cap *dev_cap)
173{
174 /* The reserved_uars is calculated by system page size unit.
175 * Therefore, adjustment is added when the uar page size is less
176 * than the system page size
177 */
178 dev->caps.reserved_uars =
179 max_t(int,
180 mlx4_get_num_reserved_uar(dev),
181 dev_cap->reserved_uars /
182 (1 << (PAGE_SHIFT - dev->uar_page_shift)));
183}
184
171int mlx4_check_port_params(struct mlx4_dev *dev, 185int mlx4_check_port_params(struct mlx4_dev *dev,
172 enum mlx4_port_type *port_type) 186 enum mlx4_port_type *port_type)
173{ 187{
@@ -386,8 +400,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
386 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 400 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
387 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 401 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
388 402
389 /* The first 128 UARs are used for EQ doorbells */
390 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
391 dev->caps.reserved_pds = dev_cap->reserved_pds; 403 dev->caps.reserved_pds = dev_cap->reserved_pds;
392 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 404 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
393 dev_cap->reserved_xrcds : 0; 405 dev_cap->reserved_xrcds : 0;
@@ -405,6 +417,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
405 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 417 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
406 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 418 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
407 419
420 /* Save uar page shift */
421 if (!mlx4_is_slave(dev)) {
422 /* Virtual PCI function needs to determine UAR page size from
423 * firmware. Only master PCI function can set the uar page size
424 */
425 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
426 mlx4_set_num_reserved_uars(dev, dev_cap);
427 }
428
408 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) { 429 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
409 struct mlx4_init_hca_param hca_param; 430 struct mlx4_init_hca_param hca_param;
410 431
@@ -815,16 +836,25 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
815 return -ENODEV; 836 return -ENODEV;
816 } 837 }
817 838
818 /* slave gets uar page size from QUERY_HCA fw command */ 839 /* Set uar_page_shift for VF */
819 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); 840 dev->uar_page_shift = hca_param.uar_page_sz + 12;
820 841
821 /* TODO: relax this assumption */ 842 /* Make sure the master uar page size is valid */
822 if (dev->caps.uar_page_size != PAGE_SIZE) { 843 if (dev->uar_page_shift > PAGE_SHIFT) {
823 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", 844 mlx4_err(dev,
824 dev->caps.uar_page_size, PAGE_SIZE); 845 "Invalid configuration: uar page size is larger than system page size\n");
825 return -ENODEV; 846 return -ENODEV;
826 } 847 }
827 848
849 /* Set reserved_uars based on the uar_page_shift */
850 mlx4_set_num_reserved_uars(dev, &dev_cap);
851
852 /* Although uar page size in FW differs from system page size,
853 * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core)
854 * still works with assumption that uar page size == system page size
855 */
856 dev->caps.uar_page_size = PAGE_SIZE;
857
828 memset(&func_cap, 0, sizeof(func_cap)); 858 memset(&func_cap, 0, sizeof(func_cap));
829 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 859 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
830 if (err) { 860 if (err) {
@@ -1226,6 +1256,7 @@ err_set_port:
1226static int mlx4_mf_bond(struct mlx4_dev *dev) 1256static int mlx4_mf_bond(struct mlx4_dev *dev)
1227{ 1257{
1228 int err = 0; 1258 int err = 0;
1259 int nvfs;
1229 struct mlx4_slaves_pport slaves_port1; 1260 struct mlx4_slaves_pport slaves_port1;
1230 struct mlx4_slaves_pport slaves_port2; 1261 struct mlx4_slaves_pport slaves_port2;
1231 DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX); 1262 DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX);
@@ -1242,11 +1273,18 @@ static int mlx4_mf_bond(struct mlx4_dev *dev)
1242 return -EINVAL; 1273 return -EINVAL;
1243 } 1274 }
1244 1275
1276 /* number of virtual functions is number of total functions minus one
1277 * physical function for each port.
1278 */
1279 nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
1280 bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2;
1281
1245 /* limit on maximum allowed VFs */ 1282 /* limit on maximum allowed VFs */
1246 if ((bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + 1283 if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) {
1247 bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1)) > 1284 mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n",
1248 MAX_MF_BOND_ALLOWED_SLAVES) 1285 nvfs, MAX_MF_BOND_ALLOWED_SLAVES);
1249 return -EINVAL; 1286 return -EINVAL;
1287 }
1250 1288
1251 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { 1289 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
1252 mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n"); 1290 mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n");
@@ -2179,8 +2217,12 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
2179 2217
2180 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 2218 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
2181 2219
2182 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 2220 /* Always set UAR page size 4KB, set log_uar_sz accordingly */
2183 init_hca.uar_page_sz = PAGE_SHIFT - 12; 2221 init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
2222 PAGE_SHIFT -
2223 DEFAULT_UAR_PAGE_SHIFT;
2224 init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
2225
2184 init_hca.mw_enabled = 0; 2226 init_hca.mw_enabled = 0;
2185 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 2227 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2186 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) 2228 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 609c59dc854e..b3cc3ab63799 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -269,9 +269,15 @@ EXPORT_SYMBOL_GPL(mlx4_bf_free);
269 269
270int mlx4_init_uar_table(struct mlx4_dev *dev) 270int mlx4_init_uar_table(struct mlx4_dev *dev)
271{ 271{
272 if (dev->caps.num_uars <= 128) { 272 int num_reserved_uar = mlx4_get_num_reserved_uar(dev);
273 mlx4_err(dev, "Only %d UAR pages (need more than 128)\n", 273
274 dev->caps.num_uars); 274 mlx4_dbg(dev, "uar_page_shift = %d", dev->uar_page_shift);
275 mlx4_dbg(dev, "Effective reserved_uars=%d", dev->caps.reserved_uars);
276
277 if (dev->caps.num_uars <= num_reserved_uar) {
278 mlx4_err(
279 dev, "Only %d UAR pages (need more than %d)\n",
280 dev->caps.num_uars, num_reserved_uar);
275 mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n"); 281 mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n");
276 return -ENODEV; 282 return -ENODEV;
277 } 283 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 787b7bb54d52..211c65087997 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -193,10 +193,10 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
193 if (need_mf_bond) { 193 if (need_mf_bond) {
194 if (port == 1) { 194 if (port == 1) {
195 mutex_lock(&table->mutex); 195 mutex_lock(&table->mutex);
196 mutex_lock(&dup_table->mutex); 196 mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
197 } else { 197 } else {
198 mutex_lock(&dup_table->mutex); 198 mutex_lock(&dup_table->mutex);
199 mutex_lock(&table->mutex); 199 mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
200 } 200 }
201 } else { 201 } else {
202 mutex_lock(&table->mutex); 202 mutex_lock(&table->mutex);
@@ -389,10 +389,10 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
389 if (dup) { 389 if (dup) {
390 if (port == 1) { 390 if (port == 1) {
391 mutex_lock(&table->mutex); 391 mutex_lock(&table->mutex);
392 mutex_lock(&dup_table->mutex); 392 mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
393 } else { 393 } else {
394 mutex_lock(&dup_table->mutex); 394 mutex_lock(&dup_table->mutex);
395 mutex_lock(&table->mutex); 395 mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
396 } 396 }
397 } else { 397 } else {
398 mutex_lock(&table->mutex); 398 mutex_lock(&table->mutex);
@@ -479,10 +479,10 @@ int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
479 if (dup) { 479 if (dup) {
480 if (port == 1) { 480 if (port == 1) {
481 mutex_lock(&table->mutex); 481 mutex_lock(&table->mutex);
482 mutex_lock(&dup_table->mutex); 482 mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
483 } else { 483 } else {
484 mutex_lock(&dup_table->mutex); 484 mutex_lock(&dup_table->mutex);
485 mutex_lock(&table->mutex); 485 mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
486 } 486 }
487 } else { 487 } else {
488 mutex_lock(&table->mutex); 488 mutex_lock(&table->mutex);
@@ -588,10 +588,10 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
588 if (need_mf_bond) { 588 if (need_mf_bond) {
589 if (port == 1) { 589 if (port == 1) {
590 mutex_lock(&table->mutex); 590 mutex_lock(&table->mutex);
591 mutex_lock(&dup_table->mutex); 591 mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
592 } else { 592 } else {
593 mutex_lock(&dup_table->mutex); 593 mutex_lock(&dup_table->mutex);
594 mutex_lock(&table->mutex); 594 mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
595 } 595 }
596 } else { 596 } else {
597 mutex_lock(&table->mutex); 597 mutex_lock(&table->mutex);
@@ -764,10 +764,10 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
764 if (dup) { 764 if (dup) {
765 if (port == 1) { 765 if (port == 1) {
766 mutex_lock(&table->mutex); 766 mutex_lock(&table->mutex);
767 mutex_lock(&dup_table->mutex); 767 mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
768 } else { 768 } else {
769 mutex_lock(&dup_table->mutex); 769 mutex_lock(&dup_table->mutex);
770 mutex_lock(&table->mutex); 770 mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
771 } 771 }
772 } else { 772 } else {
773 mutex_lock(&table->mutex); 773 mutex_lock(&table->mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index b46dbe29ef6c..25ce1b030a00 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -915,11 +915,13 @@ static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
915 915
916 spin_lock_irq(mlx4_tlock(dev)); 916 spin_lock_irq(mlx4_tlock(dev));
917 r = find_res(dev, counter_index, RES_COUNTER); 917 r = find_res(dev, counter_index, RES_COUNTER);
918 if (!r || r->owner != slave) 918 if (!r || r->owner != slave) {
919 ret = -EINVAL; 919 ret = -EINVAL;
920 counter = container_of(r, struct res_counter, com); 920 } else {
921 if (!counter->port) 921 counter = container_of(r, struct res_counter, com);
922 counter->port = port; 922 if (!counter->port)
923 counter->port = port;
924 }
923 925
924 spin_unlock_irq(mlx4_tlock(dev)); 926 spin_unlock_irq(mlx4_tlock(dev));
925 return ret; 927 return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index aac071a7e830..5b1753233c5d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -223,6 +223,7 @@ struct mlx5e_pport_stats {
223 223
224static const char rq_stats_strings[][ETH_GSTRING_LEN] = { 224static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
225 "packets", 225 "packets",
226 "bytes",
226 "csum_none", 227 "csum_none",
227 "csum_sw", 228 "csum_sw",
228 "lro_packets", 229 "lro_packets",
@@ -232,16 +233,18 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
232 233
233struct mlx5e_rq_stats { 234struct mlx5e_rq_stats {
234 u64 packets; 235 u64 packets;
236 u64 bytes;
235 u64 csum_none; 237 u64 csum_none;
236 u64 csum_sw; 238 u64 csum_sw;
237 u64 lro_packets; 239 u64 lro_packets;
238 u64 lro_bytes; 240 u64 lro_bytes;
239 u64 wqe_err; 241 u64 wqe_err;
240#define NUM_RQ_STATS 6 242#define NUM_RQ_STATS 7
241}; 243};
242 244
243static const char sq_stats_strings[][ETH_GSTRING_LEN] = { 245static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
244 "packets", 246 "packets",
247 "bytes",
245 "tso_packets", 248 "tso_packets",
246 "tso_bytes", 249 "tso_bytes",
247 "csum_offload_none", 250 "csum_offload_none",
@@ -253,6 +256,7 @@ static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
253 256
254struct mlx5e_sq_stats { 257struct mlx5e_sq_stats {
255 u64 packets; 258 u64 packets;
259 u64 bytes;
256 u64 tso_packets; 260 u64 tso_packets;
257 u64 tso_bytes; 261 u64 tso_bytes;
258 u64 csum_offload_none; 262 u64 csum_offload_none;
@@ -260,7 +264,7 @@ struct mlx5e_sq_stats {
260 u64 wake; 264 u64 wake;
261 u64 dropped; 265 u64 dropped;
262 u64 nop; 266 u64 nop;
263#define NUM_SQ_STATS 8 267#define NUM_SQ_STATS 9
264}; 268};
265 269
266struct mlx5e_stats { 270struct mlx5e_stats {
@@ -304,14 +308,9 @@ enum {
304 MLX5E_RQ_STATE_POST_WQES_ENABLE, 308 MLX5E_RQ_STATE_POST_WQES_ENABLE,
305}; 309};
306 310
307enum cq_flags {
308 MLX5E_CQ_HAS_CQES = 1,
309};
310
311struct mlx5e_cq { 311struct mlx5e_cq {
312 /* data path - accessed per cqe */ 312 /* data path - accessed per cqe */
313 struct mlx5_cqwq wq; 313 struct mlx5_cqwq wq;
314 unsigned long flags;
315 314
316 /* data path - accessed per napi poll */ 315 /* data path - accessed per napi poll */
317 struct napi_struct *napi; 316 struct napi_struct *napi;
@@ -452,6 +451,8 @@ enum mlx5e_traffic_types {
452 MLX5E_NUM_TT, 451 MLX5E_NUM_TT,
453}; 452};
454 453
454#define IS_HASHING_TT(tt) (tt != MLX5E_TT_ANY)
455
455enum mlx5e_rqt_ix { 456enum mlx5e_rqt_ix {
456 MLX5E_INDIRECTION_RQT, 457 MLX5E_INDIRECTION_RQT,
457 MLX5E_SINGLE_RQ_RQT, 458 MLX5E_SINGLE_RQ_RQT,
@@ -618,9 +619,12 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
618void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); 619void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
619 620
620int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix); 621int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
622void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
621 623
622int mlx5e_open_locked(struct net_device *netdev); 624int mlx5e_open_locked(struct net_device *netdev);
623int mlx5e_close_locked(struct net_device *netdev); 625int mlx5e_close_locked(struct net_device *netdev);
626void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
627 int num_channels);
624 628
625static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, 629static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
626 struct mlx5e_tx_wqe *wqe, int bf_sz) 630 struct mlx5e_tx_wqe *wqe, int bf_sz)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index be6543570b2b..2018eebe1531 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -62,10 +62,11 @@ static void mlx5e_timestamp_overflow(struct work_struct *work)
62 struct delayed_work *dwork = to_delayed_work(work); 62 struct delayed_work *dwork = to_delayed_work(work);
63 struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, 63 struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
64 overflow_work); 64 overflow_work);
65 unsigned long flags;
65 66
66 write_lock(&tstamp->lock); 67 write_lock_irqsave(&tstamp->lock, flags);
67 timecounter_read(&tstamp->clock); 68 timecounter_read(&tstamp->clock);
68 write_unlock(&tstamp->lock); 69 write_unlock_irqrestore(&tstamp->lock, flags);
69 schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period); 70 schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
70} 71}
71 72
@@ -136,10 +137,11 @@ static int mlx5e_ptp_settime(struct ptp_clock_info *ptp,
136 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, 137 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
137 ptp_info); 138 ptp_info);
138 u64 ns = timespec64_to_ns(ts); 139 u64 ns = timespec64_to_ns(ts);
140 unsigned long flags;
139 141
140 write_lock(&tstamp->lock); 142 write_lock_irqsave(&tstamp->lock, flags);
141 timecounter_init(&tstamp->clock, &tstamp->cycles, ns); 143 timecounter_init(&tstamp->clock, &tstamp->cycles, ns);
142 write_unlock(&tstamp->lock); 144 write_unlock_irqrestore(&tstamp->lock, flags);
143 145
144 return 0; 146 return 0;
145} 147}
@@ -150,10 +152,11 @@ static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp,
150 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, 152 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
151 ptp_info); 153 ptp_info);
152 u64 ns; 154 u64 ns;
155 unsigned long flags;
153 156
154 write_lock(&tstamp->lock); 157 write_lock_irqsave(&tstamp->lock, flags);
155 ns = timecounter_read(&tstamp->clock); 158 ns = timecounter_read(&tstamp->clock);
156 write_unlock(&tstamp->lock); 159 write_unlock_irqrestore(&tstamp->lock, flags);
157 160
158 *ts = ns_to_timespec64(ns); 161 *ts = ns_to_timespec64(ns);
159 162
@@ -164,10 +167,11 @@ static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
164{ 167{
165 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, 168 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
166 ptp_info); 169 ptp_info);
170 unsigned long flags;
167 171
168 write_lock(&tstamp->lock); 172 write_lock_irqsave(&tstamp->lock, flags);
169 timecounter_adjtime(&tstamp->clock, delta); 173 timecounter_adjtime(&tstamp->clock, delta);
170 write_unlock(&tstamp->lock); 174 write_unlock_irqrestore(&tstamp->lock, flags);
171 175
172 return 0; 176 return 0;
173} 177}
@@ -176,6 +180,7 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
176{ 180{
177 u64 adj; 181 u64 adj;
178 u32 diff; 182 u32 diff;
183 unsigned long flags;
179 int neg_adj = 0; 184 int neg_adj = 0;
180 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, 185 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
181 ptp_info); 186 ptp_info);
@@ -189,11 +194,11 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
189 adj *= delta; 194 adj *= delta;
190 diff = div_u64(adj, 1000000000ULL); 195 diff = div_u64(adj, 1000000000ULL);
191 196
192 write_lock(&tstamp->lock); 197 write_lock_irqsave(&tstamp->lock, flags);
193 timecounter_read(&tstamp->clock); 198 timecounter_read(&tstamp->clock);
194 tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff : 199 tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff :
195 tstamp->nominal_c_mult + diff; 200 tstamp->nominal_c_mult + diff;
196 write_unlock(&tstamp->lock); 201 write_unlock_irqrestore(&tstamp->lock, flags);
197 202
198 return 0; 203 return 0;
199} 204}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 65624ac65b4c..5abeb00fceb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -385,6 +385,8 @@ static int mlx5e_set_channels(struct net_device *dev,
385 mlx5e_close_locked(dev); 385 mlx5e_close_locked(dev);
386 386
387 priv->params.num_channels = count; 387 priv->params.num_channels = count;
388 mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
389 MLX5E_INDIR_RQT_SIZE, count);
388 390
389 if (was_opened) 391 if (was_opened)
390 err = mlx5e_open_locked(dev); 392 err = mlx5e_open_locked(dev);
@@ -703,18 +705,36 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
703 return 0; 705 return 0;
704} 706}
705 707
708static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
709{
710 struct mlx5_core_dev *mdev = priv->mdev;
711 void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
712 int i;
713
714 MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
715 mlx5e_build_tir_ctx_hash(tirc, priv);
716
717 for (i = 0; i < MLX5E_NUM_TT; i++)
718 if (IS_HASHING_TT(i))
719 mlx5_core_modify_tir(mdev, priv->tirn[i], in, inlen);
720}
721
706static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, 722static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
707 const u8 *key, const u8 hfunc) 723 const u8 *key, const u8 hfunc)
708{ 724{
709 struct mlx5e_priv *priv = netdev_priv(dev); 725 struct mlx5e_priv *priv = netdev_priv(dev);
710 bool close_open; 726 int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
711 int err = 0; 727 void *in;
712 728
713 if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && 729 if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
714 (hfunc != ETH_RSS_HASH_XOR) && 730 (hfunc != ETH_RSS_HASH_XOR) &&
715 (hfunc != ETH_RSS_HASH_TOP)) 731 (hfunc != ETH_RSS_HASH_TOP))
716 return -EINVAL; 732 return -EINVAL;
717 733
734 in = mlx5_vzalloc(inlen);
735 if (!in)
736 return -ENOMEM;
737
718 mutex_lock(&priv->state_lock); 738 mutex_lock(&priv->state_lock);
719 739
720 if (indir) { 740 if (indir) {
@@ -723,11 +743,6 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
723 mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT); 743 mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
724 } 744 }
725 745
726 close_open = (key || (hfunc != ETH_RSS_HASH_NO_CHANGE)) &&
727 test_bit(MLX5E_STATE_OPENED, &priv->state);
728 if (close_open)
729 mlx5e_close_locked(dev);
730
731 if (key) 746 if (key)
732 memcpy(priv->params.toeplitz_hash_key, key, 747 memcpy(priv->params.toeplitz_hash_key, key,
733 sizeof(priv->params.toeplitz_hash_key)); 748 sizeof(priv->params.toeplitz_hash_key));
@@ -735,12 +750,13 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
735 if (hfunc != ETH_RSS_HASH_NO_CHANGE) 750 if (hfunc != ETH_RSS_HASH_NO_CHANGE)
736 priv->params.rss_hfunc = hfunc; 751 priv->params.rss_hfunc = hfunc;
737 752
738 if (close_open) 753 mlx5e_modify_tirs_hash(priv, in, inlen);
739 err = mlx5e_open_locked(priv->netdev);
740 754
741 mutex_unlock(&priv->state_lock); 755 mutex_unlock(&priv->state_lock);
742 756
743 return err; 757 kvfree(in);
758
759 return 0;
744} 760}
745 761
746static int mlx5e_get_rxnfc(struct net_device *netdev, 762static int mlx5e_get_rxnfc(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 6a3e430f1062..402994bf7e16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -141,6 +141,10 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
141 return; 141 return;
142 142
143 /* Collect firts the SW counters and then HW for consistency */ 143 /* Collect firts the SW counters and then HW for consistency */
144 s->rx_packets = 0;
145 s->rx_bytes = 0;
146 s->tx_packets = 0;
147 s->tx_bytes = 0;
144 s->tso_packets = 0; 148 s->tso_packets = 0;
145 s->tso_bytes = 0; 149 s->tso_bytes = 0;
146 s->tx_queue_stopped = 0; 150 s->tx_queue_stopped = 0;
@@ -155,6 +159,8 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
155 for (i = 0; i < priv->params.num_channels; i++) { 159 for (i = 0; i < priv->params.num_channels; i++) {
156 rq_stats = &priv->channel[i]->rq.stats; 160 rq_stats = &priv->channel[i]->rq.stats;
157 161
162 s->rx_packets += rq_stats->packets;
163 s->rx_bytes += rq_stats->bytes;
158 s->lro_packets += rq_stats->lro_packets; 164 s->lro_packets += rq_stats->lro_packets;
159 s->lro_bytes += rq_stats->lro_bytes; 165 s->lro_bytes += rq_stats->lro_bytes;
160 s->rx_csum_none += rq_stats->csum_none; 166 s->rx_csum_none += rq_stats->csum_none;
@@ -164,6 +170,8 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
164 for (j = 0; j < priv->params.num_tc; j++) { 170 for (j = 0; j < priv->params.num_tc; j++) {
165 sq_stats = &priv->channel[i]->sq[j].stats; 171 sq_stats = &priv->channel[i]->sq[j].stats;
166 172
173 s->tx_packets += sq_stats->packets;
174 s->tx_bytes += sq_stats->bytes;
167 s->tso_packets += sq_stats->tso_packets; 175 s->tso_packets += sq_stats->tso_packets;
168 s->tso_bytes += sq_stats->tso_bytes; 176 s->tso_bytes += sq_stats->tso_bytes;
169 s->tx_queue_stopped += sq_stats->stopped; 177 s->tx_queue_stopped += sq_stats->stopped;
@@ -225,23 +233,6 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
225 s->tx_broadcast_bytes = 233 s->tx_broadcast_bytes =
226 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); 234 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
227 235
228 s->rx_packets =
229 s->rx_unicast_packets +
230 s->rx_multicast_packets +
231 s->rx_broadcast_packets;
232 s->rx_bytes =
233 s->rx_unicast_bytes +
234 s->rx_multicast_bytes +
235 s->rx_broadcast_bytes;
236 s->tx_packets =
237 s->tx_unicast_packets +
238 s->tx_multicast_packets +
239 s->tx_broadcast_packets;
240 s->tx_bytes =
241 s->tx_unicast_bytes +
242 s->tx_multicast_bytes +
243 s->tx_broadcast_bytes;
244
245 /* Update calculated offload counters */ 236 /* Update calculated offload counters */
246 s->tx_csum_offload = s->tx_packets - tx_offload_none; 237 s->tx_csum_offload = s->tx_packets - tx_offload_none;
247 s->rx_csum_good = s->rx_packets - s->rx_csum_none - 238 s->rx_csum_good = s->rx_packets - s->rx_csum_none -
@@ -1199,7 +1190,6 @@ static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
1199 ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE); 1190 ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
1200 1191
1201 ix = priv->params.indirection_rqt[ix]; 1192 ix = priv->params.indirection_rqt[ix];
1202 ix = ix % priv->params.num_channels;
1203 MLX5_SET(rqtc, rqtc, rq_num[i], 1193 MLX5_SET(rqtc, rqtc, rq_num[i],
1204 test_bit(MLX5E_STATE_OPENED, &priv->state) ? 1194 test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1205 priv->channel[ix]->rq.rqn : 1195 priv->channel[ix]->rq.rqn :
@@ -1317,7 +1307,22 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
1317 lro_timer_supported_periods[2])); 1307 lro_timer_supported_periods[2]));
1318} 1308}
1319 1309
1320static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) 1310void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
1311{
1312 MLX5_SET(tirc, tirc, rx_hash_fn,
1313 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
1314 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
1315 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
1316 rx_hash_toeplitz_key);
1317 size_t len = MLX5_FLD_SZ_BYTES(tirc,
1318 rx_hash_toeplitz_key);
1319
1320 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
1321 memcpy(rss_key, priv->params.toeplitz_hash_key, len);
1322 }
1323}
1324
1325static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
1321{ 1326{
1322 struct mlx5_core_dev *mdev = priv->mdev; 1327 struct mlx5_core_dev *mdev = priv->mdev;
1323 1328
@@ -1325,6 +1330,7 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
1325 void *tirc; 1330 void *tirc;
1326 int inlen; 1331 int inlen;
1327 int err; 1332 int err;
1333 int tt;
1328 1334
1329 inlen = MLX5_ST_SZ_BYTES(modify_tir_in); 1335 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1330 in = mlx5_vzalloc(inlen); 1336 in = mlx5_vzalloc(inlen);
@@ -1336,7 +1342,11 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
1336 1342
1337 mlx5e_build_tir_ctx_lro(tirc, priv); 1343 mlx5e_build_tir_ctx_lro(tirc, priv);
1338 1344
1339 err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen); 1345 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
1346 err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
1347 if (err)
1348 break;
1349 }
1340 1350
1341 kvfree(in); 1351 kvfree(in);
1342 1352
@@ -1672,17 +1682,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
1672 default: 1682 default:
1673 MLX5_SET(tirc, tirc, indirect_table, 1683 MLX5_SET(tirc, tirc, indirect_table,
1674 priv->rqtn[MLX5E_INDIRECTION_RQT]); 1684 priv->rqtn[MLX5E_INDIRECTION_RQT]);
1675 MLX5_SET(tirc, tirc, rx_hash_fn, 1685 mlx5e_build_tir_ctx_hash(tirc, priv);
1676 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
1677 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
1678 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
1679 rx_hash_toeplitz_key);
1680 size_t len = MLX5_FLD_SZ_BYTES(tirc,
1681 rx_hash_toeplitz_key);
1682
1683 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
1684 memcpy(rss_key, priv->params.toeplitz_hash_key, len);
1685 }
1686 break; 1686 break;
1687 } 1687 }
1688 1688
@@ -1885,8 +1885,10 @@ static int mlx5e_set_features(struct net_device *netdev,
1885 mlx5e_close_locked(priv->netdev); 1885 mlx5e_close_locked(priv->netdev);
1886 1886
1887 priv->params.lro_en = !!(features & NETIF_F_LRO); 1887 priv->params.lro_en = !!(features & NETIF_F_LRO);
1888 mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP); 1888 err = mlx5e_modify_tirs_lro(priv);
1889 mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP); 1889 if (err)
1890 mlx5_core_warn(priv->mdev, "lro modify failed, %d\n",
1891 err);
1890 1892
1891 if (was_opened) 1893 if (was_opened)
1892 err = mlx5e_open_locked(priv->netdev); 1894 err = mlx5e_open_locked(priv->netdev);
@@ -2024,18 +2026,37 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
2024 vf_stats); 2026 vf_stats);
2025} 2027}
2026 2028
2027static struct net_device_ops mlx5e_netdev_ops = { 2029static const struct net_device_ops mlx5e_netdev_ops_basic = {
2030 .ndo_open = mlx5e_open,
2031 .ndo_stop = mlx5e_close,
2032 .ndo_start_xmit = mlx5e_xmit,
2033 .ndo_get_stats64 = mlx5e_get_stats,
2034 .ndo_set_rx_mode = mlx5e_set_rx_mode,
2035 .ndo_set_mac_address = mlx5e_set_mac,
2036 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
2037 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
2038 .ndo_set_features = mlx5e_set_features,
2039 .ndo_change_mtu = mlx5e_change_mtu,
2040 .ndo_do_ioctl = mlx5e_ioctl,
2041};
2042
2043static const struct net_device_ops mlx5e_netdev_ops_sriov = {
2028 .ndo_open = mlx5e_open, 2044 .ndo_open = mlx5e_open,
2029 .ndo_stop = mlx5e_close, 2045 .ndo_stop = mlx5e_close,
2030 .ndo_start_xmit = mlx5e_xmit, 2046 .ndo_start_xmit = mlx5e_xmit,
2031 .ndo_get_stats64 = mlx5e_get_stats, 2047 .ndo_get_stats64 = mlx5e_get_stats,
2032 .ndo_set_rx_mode = mlx5e_set_rx_mode, 2048 .ndo_set_rx_mode = mlx5e_set_rx_mode,
2033 .ndo_set_mac_address = mlx5e_set_mac, 2049 .ndo_set_mac_address = mlx5e_set_mac,
2034 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, 2050 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
2035 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, 2051 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
2036 .ndo_set_features = mlx5e_set_features, 2052 .ndo_set_features = mlx5e_set_features,
2037 .ndo_change_mtu = mlx5e_change_mtu, 2053 .ndo_change_mtu = mlx5e_change_mtu,
2038 .ndo_do_ioctl = mlx5e_ioctl, 2054 .ndo_do_ioctl = mlx5e_ioctl,
2055 .ndo_set_vf_mac = mlx5e_set_vf_mac,
2056 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
2057 .ndo_get_vf_config = mlx5e_get_vf_config,
2058 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
2059 .ndo_get_vf_stats = mlx5e_get_vf_stats,
2039}; 2060};
2040 2061
2041static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) 2062static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -2070,12 +2091,20 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
2070 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/; 2091 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
2071} 2092}
2072 2093
2094void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
2095 int num_channels)
2096{
2097 int i;
2098
2099 for (i = 0; i < len; i++)
2100 indirection_rqt[i] = i % num_channels;
2101}
2102
2073static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, 2103static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
2074 struct net_device *netdev, 2104 struct net_device *netdev,
2075 int num_channels) 2105 int num_channels)
2076{ 2106{
2077 struct mlx5e_priv *priv = netdev_priv(netdev); 2107 struct mlx5e_priv *priv = netdev_priv(netdev);
2078 int i;
2079 2108
2080 priv->params.log_sq_size = 2109 priv->params.log_sq_size =
2081 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; 2110 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
@@ -2099,8 +2128,8 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
2099 netdev_rss_key_fill(priv->params.toeplitz_hash_key, 2128 netdev_rss_key_fill(priv->params.toeplitz_hash_key,
2100 sizeof(priv->params.toeplitz_hash_key)); 2129 sizeof(priv->params.toeplitz_hash_key));
2101 2130
2102 for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) 2131 mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
2103 priv->params.indirection_rqt[i] = i % num_channels; 2132 MLX5E_INDIR_RQT_SIZE, num_channels);
2104 2133
2105 priv->params.lro_wqe_sz = 2134 priv->params.lro_wqe_sz =
2106 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; 2135 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
@@ -2137,18 +2166,11 @@ static void mlx5e_build_netdev(struct net_device *netdev)
2137 2166
2138 SET_NETDEV_DEV(netdev, &mdev->pdev->dev); 2167 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
2139 2168
2140 if (priv->params.num_tc > 1) 2169 if (MLX5_CAP_GEN(mdev, vport_group_manager))
2141 mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue; 2170 netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
2142 2171 else
2143 if (MLX5_CAP_GEN(mdev, vport_group_manager)) { 2172 netdev->netdev_ops = &mlx5e_netdev_ops_basic;
2144 mlx5e_netdev_ops.ndo_set_vf_mac = mlx5e_set_vf_mac;
2145 mlx5e_netdev_ops.ndo_set_vf_vlan = mlx5e_set_vf_vlan;
2146 mlx5e_netdev_ops.ndo_get_vf_config = mlx5e_get_vf_config;
2147 mlx5e_netdev_ops.ndo_set_vf_link_state = mlx5e_set_vf_link_state;
2148 mlx5e_netdev_ops.ndo_get_vf_stats = mlx5e_get_vf_stats;
2149 }
2150 2173
2151 netdev->netdev_ops = &mlx5e_netdev_ops;
2152 netdev->watchdog_timeo = 15 * HZ; 2174 netdev->watchdog_timeo = 15 * HZ;
2153 2175
2154 netdev->ethtool_ops = &mlx5e_ethtool_ops; 2176 netdev->ethtool_ops = &mlx5e_ethtool_ops;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index dd959d929aad..59658b9d05d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -230,10 +230,6 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
230 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); 230 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
231 int work_done; 231 int work_done;
232 232
233 /* avoid accessing cq (dma coherent memory) if not needed */
234 if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
235 return 0;
236
237 for (work_done = 0; work_done < budget; work_done++) { 233 for (work_done = 0; work_done < budget; work_done++) {
238 struct mlx5e_rx_wqe *wqe; 234 struct mlx5e_rx_wqe *wqe;
239 struct mlx5_cqe64 *cqe; 235 struct mlx5_cqe64 *cqe;
@@ -267,6 +263,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
267 263
268 mlx5e_build_rx_skb(cqe, rq, skb); 264 mlx5e_build_rx_skb(cqe, rq, skb);
269 rq->stats.packets++; 265 rq->stats.packets++;
266 rq->stats.bytes += be32_to_cpu(cqe->byte_cnt);
270 napi_gro_receive(cq->napi, skb); 267 napi_gro_receive(cq->napi, skb);
271 268
272wq_ll_pop: 269wq_ll_pop:
@@ -279,8 +276,5 @@ wq_ll_pop:
279 /* ensure cq space is freed before enabling more cqes */ 276 /* ensure cq space is freed before enabling more cqes */
280 wmb(); 277 wmb();
281 278
282 if (work_done == budget)
283 set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
284
285 return work_done; 279 return work_done;
286} 280}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 2c3fba0fff54..bb4eeeb007de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -179,6 +179,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
179 unsigned int skb_len = skb->len; 179 unsigned int skb_len = skb->len;
180 u8 opcode = MLX5_OPCODE_SEND; 180 u8 opcode = MLX5_OPCODE_SEND;
181 dma_addr_t dma_addr = 0; 181 dma_addr_t dma_addr = 0;
182 unsigned int num_bytes;
182 bool bf = false; 183 bool bf = false;
183 u16 headlen; 184 u16 headlen;
184 u16 ds_cnt; 185 u16 ds_cnt;
@@ -204,8 +205,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
204 opcode = MLX5_OPCODE_LSO; 205 opcode = MLX5_OPCODE_LSO;
205 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); 206 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
206 payload_len = skb->len - ihs; 207 payload_len = skb->len - ihs;
207 wi->num_bytes = skb->len + 208 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
208 (skb_shinfo(skb)->gso_segs - 1) * ihs;
209 sq->stats.tso_packets++; 209 sq->stats.tso_packets++;
210 sq->stats.tso_bytes += payload_len; 210 sq->stats.tso_bytes += payload_len;
211 } else { 211 } else {
@@ -213,9 +213,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
213 !skb->xmit_more && 213 !skb->xmit_more &&
214 !skb_shinfo(skb)->nr_frags; 214 !skb_shinfo(skb)->nr_frags;
215 ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); 215 ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
216 wi->num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); 216 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
217 } 217 }
218 218
219 wi->num_bytes = num_bytes;
220
219 if (skb_vlan_tag_present(skb)) { 221 if (skb_vlan_tag_present(skb)) {
220 mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data, 222 mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data,
221 &skb_len); 223 &skb_len);
@@ -307,6 +309,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
307 sq->bf_budget = bf ? sq->bf_budget - 1 : 0; 309 sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
308 310
309 sq->stats.packets++; 311 sq->stats.packets++;
312 sq->stats.bytes += num_bytes;
310 return NETDEV_TX_OK; 313 return NETDEV_TX_OK;
311 314
312dma_unmap_wqe_err: 315dma_unmap_wqe_err:
@@ -335,10 +338,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
335 u16 sqcc; 338 u16 sqcc;
336 int i; 339 int i;
337 340
338 /* avoid accessing cq (dma coherent memory) if not needed */
339 if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
340 return false;
341
342 sq = container_of(cq, struct mlx5e_sq, cq); 341 sq = container_of(cq, struct mlx5e_sq, cq);
343 342
344 npkts = 0; 343 npkts = 0;
@@ -422,10 +421,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
422 netif_tx_wake_queue(sq->txq); 421 netif_tx_wake_queue(sq->txq);
423 sq->stats.wake++; 422 sq->stats.wake++;
424 } 423 }
425 if (i == MLX5E_TX_CQ_POLL_BUDGET) {
426 set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
427 return true;
428 }
429 424
430 return false; 425 return (i == MLX5E_TX_CQ_POLL_BUDGET);
431} 426}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 4ac8d716dbdd..66d51a77609e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -88,7 +88,6 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq)
88{ 88{
89 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); 89 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
90 90
91 set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
92 set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags); 91 set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
93 barrier(); 92 barrier();
94 napi_schedule(cq->napi); 93 napi_schedule(cq->napi);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index c071077aafbd..7992c553c1f5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -215,7 +215,7 @@ mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
215{ 215{
216 int index = q->producer_counter & (q->count - 1); 216 int index = q->producer_counter & (q->count - 1);
217 217
218 if ((q->producer_counter - q->consumer_counter) == q->count) 218 if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
219 return NULL; 219 return NULL;
220 return mlxsw_pci_queue_elem_info_get(q, index); 220 return mlxsw_pci_queue_elem_info_get(q, index);
221} 221}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index 726f5435b32f..ae65b9940aed 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -49,7 +49,7 @@
49#define MLXSW_PORT_MID 0xd000 49#define MLXSW_PORT_MID 0xd000
50 50
51#define MLXSW_PORT_MAX_PHY_PORTS 0x40 51#define MLXSW_PORT_MAX_PHY_PORTS 0x40
52#define MLXSW_PORT_MAX_PORTS MLXSW_PORT_MAX_PHY_PORTS 52#define MLXSW_PORT_MAX_PORTS (MLXSW_PORT_MAX_PHY_PORTS + 1)
53 53
54#define MLXSW_PORT_DEVID_BITS_OFFSET 10 54#define MLXSW_PORT_DEVID_BITS_OFFSET 10
55#define MLXSW_PORT_PHY_BITS_OFFSET 4 55#define MLXSW_PORT_PHY_BITS_OFFSET 4
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 0c5237264e3e..ffe4c0305733 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -873,6 +873,62 @@ static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port,
873 } 873 }
874} 874}
875 875
876/* SPAFT - Switch Port Acceptable Frame Types
877 * ------------------------------------------
878 * The Switch Port Acceptable Frame Types register configures the frame
879 * admittance of the port.
880 */
881#define MLXSW_REG_SPAFT_ID 0x2010
882#define MLXSW_REG_SPAFT_LEN 0x08
883
884static const struct mlxsw_reg_info mlxsw_reg_spaft = {
885 .id = MLXSW_REG_SPAFT_ID,
886 .len = MLXSW_REG_SPAFT_LEN,
887};
888
889/* reg_spaft_local_port
890 * Local port number.
891 * Access: Index
892 *
893 * Note: CPU port is not supported (all tag types are allowed).
894 */
895MLXSW_ITEM32(reg, spaft, local_port, 0x00, 16, 8);
896
897/* reg_spaft_sub_port
898 * Virtual port within the physical port.
899 * Should be set to 0 when virtual ports are not enabled on the port.
900 * Access: RW
901 */
902MLXSW_ITEM32(reg, spaft, sub_port, 0x00, 8, 8);
903
904/* reg_spaft_allow_untagged
905 * When set, untagged frames on the ingress are allowed (default).
906 * Access: RW
907 */
908MLXSW_ITEM32(reg, spaft, allow_untagged, 0x04, 31, 1);
909
910/* reg_spaft_allow_prio_tagged
911 * When set, priority tagged frames on the ingress are allowed (default).
912 * Access: RW
913 */
914MLXSW_ITEM32(reg, spaft, allow_prio_tagged, 0x04, 30, 1);
915
916/* reg_spaft_allow_tagged
917 * When set, tagged frames on the ingress are allowed (default).
918 * Access: RW
919 */
920MLXSW_ITEM32(reg, spaft, allow_tagged, 0x04, 29, 1);
921
922static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port,
923 bool allow_untagged)
924{
925 MLXSW_REG_ZERO(spaft, payload);
926 mlxsw_reg_spaft_local_port_set(payload, local_port);
927 mlxsw_reg_spaft_allow_untagged_set(payload, allow_untagged);
928 mlxsw_reg_spaft_allow_prio_tagged_set(payload, true);
929 mlxsw_reg_spaft_allow_tagged_set(payload, true);
930}
931
876/* SFGC - Switch Flooding Group Configuration 932/* SFGC - Switch Flooding Group Configuration
877 * ------------------------------------------ 933 * ------------------------------------------
878 * The following register controls the association of flooding tables and MIDs 934 * The following register controls the association of flooding tables and MIDs
@@ -1044,6 +1100,92 @@ static inline void mlxsw_reg_sftr_pack(char *payload,
1044 mlxsw_reg_sftr_port_mask_set(payload, port, 1); 1100 mlxsw_reg_sftr_port_mask_set(payload, port, 1);
1045} 1101}
1046 1102
1103/* SFDF - Switch Filtering DB Flush
1104 * --------------------------------
1105 * The switch filtering DB flush register is used to flush the FDB.
1106 * Note that FDB notifications are flushed as well.
1107 */
1108#define MLXSW_REG_SFDF_ID 0x2013
1109#define MLXSW_REG_SFDF_LEN 0x14
1110
1111static const struct mlxsw_reg_info mlxsw_reg_sfdf = {
1112 .id = MLXSW_REG_SFDF_ID,
1113 .len = MLXSW_REG_SFDF_LEN,
1114};
1115
1116/* reg_sfdf_swid
1117 * Switch partition ID.
1118 * Access: Index
1119 */
1120MLXSW_ITEM32(reg, sfdf, swid, 0x00, 24, 8);
1121
1122enum mlxsw_reg_sfdf_flush_type {
1123 MLXSW_REG_SFDF_FLUSH_PER_SWID,
1124 MLXSW_REG_SFDF_FLUSH_PER_FID,
1125 MLXSW_REG_SFDF_FLUSH_PER_PORT,
1126 MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID,
1127 MLXSW_REG_SFDF_FLUSH_PER_LAG,
1128 MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID,
1129};
1130
1131/* reg_sfdf_flush_type
1132 * Flush type.
1133 * 0 - All SWID dynamic entries are flushed.
1134 * 1 - All FID dynamic entries are flushed.
1135 * 2 - All dynamic entries pointing to port are flushed.
1136 * 3 - All FID dynamic entries pointing to port are flushed.
1137 * 4 - All dynamic entries pointing to LAG are flushed.
1138 * 5 - All FID dynamic entries pointing to LAG are flushed.
1139 * Access: RW
1140 */
1141MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4);
1142
1143/* reg_sfdf_flush_static
1144 * Static.
1145 * 0 - Flush only dynamic entries.
1146 * 1 - Flush both dynamic and static entries.
1147 * Access: RW
1148 */
1149MLXSW_ITEM32(reg, sfdf, flush_static, 0x04, 24, 1);
1150
1151static inline void mlxsw_reg_sfdf_pack(char *payload,
1152 enum mlxsw_reg_sfdf_flush_type type)
1153{
1154 MLXSW_REG_ZERO(sfdf, payload);
1155 mlxsw_reg_sfdf_flush_type_set(payload, type);
1156 mlxsw_reg_sfdf_flush_static_set(payload, true);
1157}
1158
1159/* reg_sfdf_fid
1160 * FID to flush.
1161 * Access: RW
1162 */
1163MLXSW_ITEM32(reg, sfdf, fid, 0x0C, 0, 16);
1164
1165/* reg_sfdf_system_port
1166 * Port to flush.
1167 * Access: RW
1168 */
1169MLXSW_ITEM32(reg, sfdf, system_port, 0x0C, 0, 16);
1170
1171/* reg_sfdf_port_fid_system_port
1172 * Port to flush, pointed to by FID.
1173 * Access: RW
1174 */
1175MLXSW_ITEM32(reg, sfdf, port_fid_system_port, 0x08, 0, 16);
1176
1177/* reg_sfdf_lag_id
1178 * LAG ID to flush.
1179 * Access: RW
1180 */
1181MLXSW_ITEM32(reg, sfdf, lag_id, 0x0C, 0, 10);
1182
1183/* reg_sfdf_lag_fid_lag_id
1184 * LAG ID to flush, pointed to by FID.
1185 * Access: RW
1186 */
1187MLXSW_ITEM32(reg, sfdf, lag_fid_lag_id, 0x08, 0, 10);
1188
1047/* SLDR - Switch LAG Descriptor Register 1189/* SLDR - Switch LAG Descriptor Register
1048 * ----------------------------------------- 1190 * -----------------------------------------
1049 * The switch LAG descriptor register is populated by LAG descriptors. 1191 * The switch LAG descriptor register is populated by LAG descriptors.
@@ -1701,20 +1843,20 @@ MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
1701 * Module number. 1843 * Module number.
1702 * Access: RW 1844 * Access: RW
1703 */ 1845 */
1704MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false); 1846MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false);
1705 1847
1706/* reg_pmlp_tx_lane 1848/* reg_pmlp_tx_lane
1707 * Tx Lane. When rxtx field is cleared, this field is used for Rx as well. 1849 * Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
1708 * Access: RW 1850 * Access: RW
1709 */ 1851 */
1710MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false); 1852MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 0x00, false);
1711 1853
1712/* reg_pmlp_rx_lane 1854/* reg_pmlp_rx_lane
1713 * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is 1855 * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
1714 * equal to Tx lane. 1856 * equal to Tx lane.
1715 * Access: RW 1857 * Access: RW
1716 */ 1858 */
1717MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false); 1859MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 0x00, false);
1718 1860
1719static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port) 1861static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
1720{ 1862{
@@ -3117,10 +3259,14 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
3117 return "SPVID"; 3259 return "SPVID";
3118 case MLXSW_REG_SPVM_ID: 3260 case MLXSW_REG_SPVM_ID:
3119 return "SPVM"; 3261 return "SPVM";
3262 case MLXSW_REG_SPAFT_ID:
3263 return "SPAFT";
3120 case MLXSW_REG_SFGC_ID: 3264 case MLXSW_REG_SFGC_ID:
3121 return "SFGC"; 3265 return "SFGC";
3122 case MLXSW_REG_SFTR_ID: 3266 case MLXSW_REG_SFTR_ID:
3123 return "SFTR"; 3267 return "SFTR";
3268 case MLXSW_REG_SFDF_ID:
3269 return "SFDF";
3124 case MLXSW_REG_SLDR_ID: 3270 case MLXSW_REG_SLDR_ID:
3125 return "SLDR"; 3271 return "SLDR";
3126 case MLXSW_REG_SLCR_ID: 3272 case MLXSW_REG_SLCR_ID:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index ce6845d534a8..a94daa8c346c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1979,6 +1979,115 @@ static struct mlxsw_driver mlxsw_sp_driver = {
1979 .profile = &mlxsw_sp_config_profile, 1979 .profile = &mlxsw_sp_config_profile,
1980}; 1980};
1981 1981
1982static int
1983mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
1984{
1985 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1986 char sfdf_pl[MLXSW_REG_SFDF_LEN];
1987
1988 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
1989 mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
1990
1991 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1992}
1993
1994static int
1995mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
1996 u16 fid)
1997{
1998 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1999 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2000
2001 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2002 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2003 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2004 mlxsw_sp_port->local_port);
2005
2006 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2007}
2008
2009static int
2010mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
2011{
2012 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2013 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2014
2015 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
2016 mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2017
2018 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2019}
2020
2021static int
2022mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2023 u16 fid)
2024{
2025 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2026 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2027
2028 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
2029 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2030 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2031
2032 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2033}
2034
2035static int
2036__mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
2037{
2038 int err, last_err = 0;
2039 u16 vid;
2040
2041 for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2042 err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
2043 if (err)
2044 last_err = err;
2045 }
2046
2047 return last_err;
2048}
2049
2050static int
2051__mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
2052{
2053 int err, last_err = 0;
2054 u16 vid;
2055
2056 for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2057 err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
2058 if (err)
2059 last_err = err;
2060 }
2061
2062 return last_err;
2063}
2064
2065static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
2066{
2067 if (!list_empty(&mlxsw_sp_port->vports_list))
2068 if (mlxsw_sp_port->lagged)
2069 return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
2070 else
2071 return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
2072 else
2073 if (mlxsw_sp_port->lagged)
2074 return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
2075 else
2076 return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
2077}
2078
2079static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
2080{
2081 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
2082 u16 fid = mlxsw_sp_vfid_to_fid(vfid);
2083
2084 if (mlxsw_sp_vport->lagged)
2085 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
2086 fid);
2087 else
2088 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
2089}
2090
1982static bool mlxsw_sp_port_dev_check(const struct net_device *dev) 2091static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
1983{ 2092{
1984 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 2093 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
@@ -2006,10 +2115,16 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
2006 return 0; 2115 return 0;
2007} 2116}
2008 2117
2009static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) 2118static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2119 bool flush_fdb)
2010{ 2120{
2011 struct net_device *dev = mlxsw_sp_port->dev; 2121 struct net_device *dev = mlxsw_sp_port->dev;
2012 2122
2123 if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2124 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2125
2126 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2127
2013 mlxsw_sp_port->learning = 0; 2128 mlxsw_sp_port->learning = 0;
2014 mlxsw_sp_port->learning_sync = 0; 2129 mlxsw_sp_port->learning_sync = 0;
2015 mlxsw_sp_port->uc_flood = 0; 2130 mlxsw_sp_port->uc_flood = 0;
@@ -2200,10 +2315,15 @@ err_col_port_enable:
2200 return err; 2315 return err;
2201} 2316}
2202 2317
2318static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2319 struct net_device *br_dev,
2320 bool flush_fdb);
2321
2203static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 2322static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2204 struct net_device *lag_dev) 2323 struct net_device *lag_dev)
2205{ 2324{
2206 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2325 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2326 struct mlxsw_sp_port *mlxsw_sp_vport;
2207 struct mlxsw_sp_upper *lag; 2327 struct mlxsw_sp_upper *lag;
2208 u16 lag_id = mlxsw_sp_port->lag_id; 2328 u16 lag_id = mlxsw_sp_port->lag_id;
2209 int err; 2329 int err;
@@ -2220,7 +2340,30 @@ static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2220 if (err) 2340 if (err)
2221 return err; 2341 return err;
2222 2342
2343 /* In case we leave a LAG device that has bridges built on top,
2344 * then their teardown sequence is never issued and we need to
2345 * invoke the necessary cleanup routines ourselves.
2346 */
2347 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2348 vport.list) {
2349 struct net_device *br_dev;
2350
2351 if (!mlxsw_sp_vport->bridged)
2352 continue;
2353
2354 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2355 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
2356 }
2357
2358 if (mlxsw_sp_port->bridged) {
2359 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
2360 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
2361 mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
2362 }
2363
2223 if (lag->ref_count == 1) { 2364 if (lag->ref_count == 1) {
2365 if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
2366 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2224 err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 2367 err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2225 if (err) 2368 if (err)
2226 return err; 2369 return err;
@@ -2272,9 +2415,6 @@ static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2272 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 2415 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2273} 2416}
2274 2417
2275static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2276 struct net_device *br_dev);
2277
2278static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, 2418static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2279 struct net_device *vlan_dev) 2419 struct net_device *vlan_dev)
2280{ 2420{
@@ -2312,7 +2452,7 @@ static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2312 struct net_device *br_dev; 2452 struct net_device *br_dev;
2313 2453
2314 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); 2454 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2315 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev); 2455 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
2316 } 2456 }
2317 2457
2318 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 2458 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
@@ -2374,7 +2514,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
2374 } 2514 }
2375 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); 2515 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
2376 } else { 2516 } else {
2377 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port); 2517 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
2518 true);
2378 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); 2519 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
2379 if (err) { 2520 if (err) {
2380 netdev_err(dev, "Failed to leave bridge\n"); 2521 netdev_err(dev, "Failed to leave bridge\n");
@@ -2541,7 +2682,8 @@ static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
2541} 2682}
2542 2683
2543static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, 2684static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2544 struct net_device *br_dev) 2685 struct net_device *br_dev,
2686 bool flush_fdb)
2545{ 2687{
2546 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 2688 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2547 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 2689 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
@@ -2604,6 +2746,16 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2604 goto err_vport_flood_set; 2746 goto err_vport_flood_set;
2605 } 2747 }
2606 2748
2749 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
2750 MLXSW_REG_SPMS_STATE_FORWARDING);
2751 if (err) {
2752 netdev_err(dev, "Failed to set STP state\n");
2753 goto err_port_stp_state_set;
2754 }
2755
2756 if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
2757 netdev_err(dev, "Failed to flush FDB\n");
2758
2607 /* Switch between the vFIDs and destroy the old one if needed. */ 2759 /* Switch between the vFIDs and destroy the old one if needed. */
2608 new_vfid->nr_vports++; 2760 new_vfid->nr_vports++;
2609 mlxsw_sp_vport->vport.vfid = new_vfid; 2761 mlxsw_sp_vport->vport.vfid = new_vfid;
@@ -2618,6 +2770,7 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2618 2770
2619 return 0; 2771 return 0;
2620 2772
2773err_port_stp_state_set:
2621err_vport_flood_set: 2774err_vport_flood_set:
2622err_port_vid_learning_set: 2775err_port_vid_learning_set:
2623err_port_vid_to_fid_validate: 2776err_port_vid_to_fid_validate:
@@ -2777,7 +2930,7 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
2777 if (!mlxsw_sp_vport) 2930 if (!mlxsw_sp_vport)
2778 return NOTIFY_DONE; 2931 return NOTIFY_DONE;
2779 err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, 2932 err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
2780 upper_dev); 2933 upper_dev, true);
2781 if (err) { 2934 if (err) {
2782 netdev_err(dev, "Failed to leave bridge\n"); 2935 netdev_err(dev, "Failed to leave bridge\n");
2783 return NOTIFY_BAD; 2936 return NOTIFY_BAD;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a23dc610d259..3b89ed2f3c76 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -120,7 +120,6 @@ struct mlxsw_sp {
120 } fdb_notify; 120 } fdb_notify;
121#define MLXSW_SP_DEFAULT_AGEING_TIME 300 121#define MLXSW_SP_DEFAULT_AGEING_TIME 300
122 u32 ageing_time; 122 u32 ageing_time;
123 struct mutex fdb_lock; /* Make sure FDB sessions are atomic. */
124 struct mlxsw_sp_upper master_bridge; 123 struct mlxsw_sp_upper master_bridge;
125 struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX]; 124 struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
126}; 125};
@@ -254,5 +253,7 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
254 __be16 __always_unused proto, u16 vid); 253 __be16 __always_unused proto, u16 vid);
255int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, 254int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
256 bool set, bool only_uc); 255 bool set, bool only_uc);
256void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
257int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
257 258
258#endif 259#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 45479ef5bcf4..7b56098acc58 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -45,6 +45,7 @@
45#include <linux/if_bridge.h> 45#include <linux/if_bridge.h>
46#include <linux/workqueue.h> 46#include <linux/workqueue.h>
47#include <linux/jiffies.h> 47#include <linux/jiffies.h>
48#include <linux/rtnetlink.h>
48#include <net/switchdev.h> 49#include <net/switchdev.h>
49 50
50#include "spectrum.h" 51#include "spectrum.h"
@@ -124,14 +125,14 @@ static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
124 int err; 125 int err;
125 126
126 switch (state) { 127 switch (state) {
127 case BR_STATE_DISABLED: /* fall-through */
128 case BR_STATE_FORWARDING: 128 case BR_STATE_FORWARDING:
129 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; 129 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
130 break; 130 break;
131 case BR_STATE_LISTENING: /* fall-through */
132 case BR_STATE_LEARNING: 131 case BR_STATE_LEARNING:
133 spms_state = MLXSW_REG_SPMS_STATE_LEARNING; 132 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
134 break; 133 break;
134 case BR_STATE_LISTENING: /* fall-through */
135 case BR_STATE_DISABLED: /* fall-through */
135 case BR_STATE_BLOCKING: 136 case BR_STATE_BLOCKING:
136 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; 137 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
137 break; 138 break;
@@ -369,7 +370,8 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
369 return err; 370 return err;
370} 371}
371 372
372static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 373static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
374 u16 vid)
373{ 375{
374 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 376 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
375 char spvid_pl[MLXSW_REG_SPVID_LEN]; 377 char spvid_pl[MLXSW_REG_SPVID_LEN];
@@ -378,6 +380,53 @@ static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
378 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 380 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
379} 381}
380 382
383static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
384 bool allow)
385{
386 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
387 char spaft_pl[MLXSW_REG_SPAFT_LEN];
388
389 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
390 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
391}
392
393int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
394{
395 struct net_device *dev = mlxsw_sp_port->dev;
396 int err;
397
398 if (!vid) {
399 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
400 if (err) {
401 netdev_err(dev, "Failed to disallow untagged traffic\n");
402 return err;
403 }
404 } else {
405 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
406 if (err) {
407 netdev_err(dev, "Failed to set PVID\n");
408 return err;
409 }
410
411 /* Only allow if not already allowed. */
412 if (!mlxsw_sp_port->pvid) {
413 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
414 true);
415 if (err) {
416 netdev_err(dev, "Failed to allow untagged traffic\n");
417 goto err_port_allow_untagged_set;
418 }
419 }
420 }
421
422 mlxsw_sp_port->pvid = vid;
423 return 0;
424
425err_port_allow_untagged_set:
426 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
427 return err;
428}
429
381static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) 430static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
382{ 431{
383 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 432 char sfmr_pl[MLXSW_REG_SFMR_LEN];
@@ -539,7 +588,12 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
539 netdev_err(dev, "Unable to add PVID %d\n", vid_begin); 588 netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
540 goto err_port_pvid_set; 589 goto err_port_pvid_set;
541 } 590 }
542 mlxsw_sp_port->pvid = vid_begin; 591 } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
592 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
593 if (err) {
594 netdev_err(dev, "Unable to del PVID\n");
595 goto err_port_pvid_set;
596 }
543 } 597 }
544 598
545 /* Changing activity bits only if HW operation succeded */ 599 /* Changing activity bits only if HW operation succeded */
@@ -891,20 +945,18 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
891 return err; 945 return err;
892 } 946 }
893 947
948 if (init)
949 goto out;
950
894 pvid = mlxsw_sp_port->pvid; 951 pvid = mlxsw_sp_port->pvid;
895 if (pvid >= vid_begin && pvid <= vid_end && pvid != 1) { 952 if (pvid >= vid_begin && pvid <= vid_end) {
896 /* Default VLAN is always 1 */ 953 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
897 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
898 if (err) { 954 if (err) {
899 netdev_err(dev, "Unable to del PVID %d\n", pvid); 955 netdev_err(dev, "Unable to del PVID %d\n", pvid);
900 return err; 956 return err;
901 } 957 }
902 mlxsw_sp_port->pvid = 1;
903 } 958 }
904 959
905 if (init)
906 goto out;
907
908 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, 960 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
909 false, false); 961 false, false);
910 if (err) { 962 if (err) {
@@ -936,6 +988,14 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
936 vlan->vid_begin, vlan->vid_end, false); 988 vlan->vid_begin, vlan->vid_end, false);
937} 989}
938 990
991void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
992{
993 u16 vid;
994
995 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
996 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false);
997}
998
939static int 999static int
940mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port, 1000mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
941 const struct switchdev_obj_port_fdb *fdb) 1001 const struct switchdev_obj_port_fdb *fdb)
@@ -1040,10 +1100,12 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1040 1100
1041static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, 1101static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1042 struct switchdev_obj_port_fdb *fdb, 1102 struct switchdev_obj_port_fdb *fdb,
1043 switchdev_obj_dump_cb_t *cb) 1103 switchdev_obj_dump_cb_t *cb,
1104 struct net_device *orig_dev)
1044{ 1105{
1045 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1106 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1046 u16 vport_vid = 0, vport_fid = 0; 1107 struct mlxsw_sp_port *tmp;
1108 u16 vport_fid = 0;
1047 char *sfd_pl; 1109 char *sfd_pl;
1048 char mac[ETH_ALEN]; 1110 char mac[ETH_ALEN];
1049 u16 fid; 1111 u16 fid;
@@ -1058,13 +1120,11 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1058 if (!sfd_pl) 1120 if (!sfd_pl)
1059 return -ENOMEM; 1121 return -ENOMEM;
1060 1122
1061 mutex_lock(&mlxsw_sp_port->mlxsw_sp->fdb_lock);
1062 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 1123 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1063 u16 tmp; 1124 u16 tmp;
1064 1125
1065 tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); 1126 tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
1066 vport_fid = mlxsw_sp_vfid_to_fid(tmp); 1127 vport_fid = mlxsw_sp_vfid_to_fid(tmp);
1067 vport_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1068 } 1128 }
1069 1129
1070 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); 1130 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
@@ -1088,12 +1148,13 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1088 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid, 1148 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
1089 &local_port); 1149 &local_port);
1090 if (local_port == mlxsw_sp_port->local_port) { 1150 if (local_port == mlxsw_sp_port->local_port) {
1091 if (vport_fid && vport_fid != fid) 1151 if (vport_fid && vport_fid == fid)
1092 continue; 1152 fdb->vid = 0;
1093 else if (vport_fid) 1153 else if (!vport_fid &&
1094 fdb->vid = vport_vid; 1154 !mlxsw_sp_fid_is_vfid(fid))
1095 else
1096 fdb->vid = fid; 1155 fdb->vid = fid;
1156 else
1157 continue;
1097 ether_addr_copy(fdb->addr, mac); 1158 ether_addr_copy(fdb->addr, mac);
1098 fdb->ndm_state = NUD_REACHABLE; 1159 fdb->ndm_state = NUD_REACHABLE;
1099 err = cb(&fdb->obj); 1160 err = cb(&fdb->obj);
@@ -1104,14 +1165,22 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1104 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG: 1165 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
1105 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i, 1166 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
1106 mac, &fid, &lag_id); 1167 mac, &fid, &lag_id);
1107 if (mlxsw_sp_port == 1168 tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1108 mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id)) { 1169 if (tmp && tmp->local_port ==
1109 if (vport_fid && vport_fid != fid) 1170 mlxsw_sp_port->local_port) {
1171 /* LAG records can only point to LAG
1172 * devices or VLAN devices on top.
1173 */
1174 if (!netif_is_lag_master(orig_dev) &&
1175 !is_vlan_dev(orig_dev))
1110 continue; 1176 continue;
1111 else if (vport_fid) 1177 if (vport_fid && vport_fid == fid)
1112 fdb->vid = vport_vid; 1178 fdb->vid = 0;
1113 else 1179 else if (!vport_fid &&
1180 !mlxsw_sp_fid_is_vfid(fid))
1114 fdb->vid = fid; 1181 fdb->vid = fid;
1182 else
1183 continue;
1115 ether_addr_copy(fdb->addr, mac); 1184 ether_addr_copy(fdb->addr, mac);
1116 fdb->ndm_state = NUD_REACHABLE; 1185 fdb->ndm_state = NUD_REACHABLE;
1117 err = cb(&fdb->obj); 1186 err = cb(&fdb->obj);
@@ -1124,7 +1193,6 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1124 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT); 1193 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
1125 1194
1126out: 1195out:
1127 mutex_unlock(&mlxsw_sp_port->mlxsw_sp->fdb_lock);
1128 kfree(sfd_pl); 1196 kfree(sfd_pl);
1129 return stored_err ? stored_err : err; 1197 return stored_err ? stored_err : err;
1130} 1198}
@@ -1176,7 +1244,8 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev,
1176 break; 1244 break;
1177 case SWITCHDEV_OBJ_ID_PORT_FDB: 1245 case SWITCHDEV_OBJ_ID_PORT_FDB:
1178 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port, 1246 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
1179 SWITCHDEV_OBJ_PORT_FDB(obj), cb); 1247 SWITCHDEV_OBJ_PORT_FDB(obj), cb,
1248 obj->orig_dev);
1180 break; 1249 break;
1181 default: 1250 default:
1182 err = -EOPNOTSUPP; 1251 err = -EOPNOTSUPP;
@@ -1194,14 +1263,14 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1194 .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump, 1263 .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
1195}; 1264};
1196 1265
1197static void mlxsw_sp_fdb_call_notifiers(bool learning, bool learning_sync, 1266static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
1198 bool adding, char *mac, u16 vid, 1267 char *mac, u16 vid,
1199 struct net_device *dev) 1268 struct net_device *dev)
1200{ 1269{
1201 struct switchdev_notifier_fdb_info info; 1270 struct switchdev_notifier_fdb_info info;
1202 unsigned long notifier_type; 1271 unsigned long notifier_type;
1203 1272
1204 if (learning && learning_sync) { 1273 if (learning_sync) {
1205 info.addr = mac; 1274 info.addr = mac;
1206 info.vid = vid; 1275 info.vid = vid;
1207 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; 1276 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
@@ -1237,7 +1306,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1237 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); 1306 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1238 goto just_remove; 1307 goto just_remove;
1239 } 1308 }
1240 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 1309 vid = 0;
1241 /* Override the physical port with the vPort. */ 1310 /* Override the physical port with the vPort. */
1242 mlxsw_sp_port = mlxsw_sp_vport; 1311 mlxsw_sp_port = mlxsw_sp_vport;
1243 } else { 1312 } else {
@@ -1257,8 +1326,7 @@ do_fdb_op:
1257 1326
1258 if (!do_notification) 1327 if (!do_notification)
1259 return; 1328 return;
1260 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning, 1329 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
1261 mlxsw_sp_port->learning_sync,
1262 adding, mac, vid, mlxsw_sp_port->dev); 1330 adding, mac, vid, mlxsw_sp_port->dev);
1263 return; 1331 return;
1264 1332
@@ -1273,6 +1341,7 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
1273 bool adding) 1341 bool adding)
1274{ 1342{
1275 struct mlxsw_sp_port *mlxsw_sp_port; 1343 struct mlxsw_sp_port *mlxsw_sp_port;
1344 struct net_device *dev;
1276 char mac[ETH_ALEN]; 1345 char mac[ETH_ALEN];
1277 u16 lag_vid = 0; 1346 u16 lag_vid = 0;
1278 u16 lag_id; 1347 u16 lag_id;
@@ -1298,11 +1367,13 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
1298 goto just_remove; 1367 goto just_remove;
1299 } 1368 }
1300 1369
1301 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 1370 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1302 lag_vid = vid; 1371 dev = mlxsw_sp_vport->dev;
1372 vid = 0;
1303 /* Override the physical port with the vPort. */ 1373 /* Override the physical port with the vPort. */
1304 mlxsw_sp_port = mlxsw_sp_vport; 1374 mlxsw_sp_port = mlxsw_sp_vport;
1305 } else { 1375 } else {
1376 dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
1306 vid = fid; 1377 vid = fid;
1307 } 1378 }
1308 1379
@@ -1319,10 +1390,8 @@ do_fdb_op:
1319 1390
1320 if (!do_notification) 1391 if (!do_notification)
1321 return; 1392 return;
1322 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning, 1393 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
1323 mlxsw_sp_port->learning_sync, 1394 vid, dev);
1324 adding, mac, vid,
1325 mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev);
1326 return; 1395 return;
1327 1396
1328just_remove: 1397just_remove:
@@ -1374,7 +1443,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
1374 1443
1375 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work); 1444 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
1376 1445
1377 mutex_lock(&mlxsw_sp->fdb_lock); 1446 rtnl_lock();
1378 do { 1447 do {
1379 mlxsw_reg_sfn_pack(sfn_pl); 1448 mlxsw_reg_sfn_pack(sfn_pl);
1380 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); 1449 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
@@ -1387,7 +1456,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
1387 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); 1456 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
1388 1457
1389 } while (num_rec); 1458 } while (num_rec);
1390 mutex_unlock(&mlxsw_sp->fdb_lock); 1459 rtnl_unlock();
1391 1460
1392 kfree(sfn_pl); 1461 kfree(sfn_pl);
1393 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 1462 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
@@ -1402,7 +1471,6 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
1402 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n"); 1471 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
1403 return err; 1472 return err;
1404 } 1473 }
1405 mutex_init(&mlxsw_sp->fdb_lock);
1406 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work); 1474 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
1407 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; 1475 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
1408 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 1476 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index a10c928bbd6b..3e67f451f2ab 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -28,6 +28,16 @@
28 28
29#include "moxart_ether.h" 29#include "moxart_ether.h"
30 30
31static inline void moxart_desc_write(u32 data, u32 *desc)
32{
33 *desc = cpu_to_le32(data);
34}
35
36static inline u32 moxart_desc_read(u32 *desc)
37{
38 return le32_to_cpu(*desc);
39}
40
31static inline void moxart_emac_write(struct net_device *ndev, 41static inline void moxart_emac_write(struct net_device *ndev,
32 unsigned int reg, unsigned long value) 42 unsigned int reg, unsigned long value)
33{ 43{
@@ -112,7 +122,7 @@ static void moxart_mac_enable(struct net_device *ndev)
112static void moxart_mac_setup_desc_ring(struct net_device *ndev) 122static void moxart_mac_setup_desc_ring(struct net_device *ndev)
113{ 123{
114 struct moxart_mac_priv_t *priv = netdev_priv(ndev); 124 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
115 void __iomem *desc; 125 void *desc;
116 int i; 126 int i;
117 127
118 for (i = 0; i < TX_DESC_NUM; i++) { 128 for (i = 0; i < TX_DESC_NUM; i++) {
@@ -121,7 +131,7 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
121 131
122 priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i; 132 priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i;
123 } 133 }
124 writel(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1); 134 moxart_desc_write(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
125 135
126 priv->tx_head = 0; 136 priv->tx_head = 0;
127 priv->tx_tail = 0; 137 priv->tx_tail = 0;
@@ -129,8 +139,8 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
129 for (i = 0; i < RX_DESC_NUM; i++) { 139 for (i = 0; i < RX_DESC_NUM; i++) {
130 desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE; 140 desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE;
131 memset(desc, 0, RX_REG_DESC_SIZE); 141 memset(desc, 0, RX_REG_DESC_SIZE);
132 writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); 142 moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
133 writel(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK, 143 moxart_desc_write(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
134 desc + RX_REG_OFFSET_DESC1); 144 desc + RX_REG_OFFSET_DESC1);
135 145
136 priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i; 146 priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
@@ -141,12 +151,12 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
141 if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i])) 151 if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
142 netdev_err(ndev, "DMA mapping error\n"); 152 netdev_err(ndev, "DMA mapping error\n");
143 153
144 writel(priv->rx_mapping[i], 154 moxart_desc_write(priv->rx_mapping[i],
145 desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS); 155 desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS);
146 writel(priv->rx_buf[i], 156 moxart_desc_write((uintptr_t)priv->rx_buf[i],
147 desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT); 157 desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT);
148 } 158 }
149 writel(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1); 159 moxart_desc_write(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
150 160
151 priv->rx_head = 0; 161 priv->rx_head = 0;
152 162
@@ -201,14 +211,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
201 napi); 211 napi);
202 struct net_device *ndev = priv->ndev; 212 struct net_device *ndev = priv->ndev;
203 struct sk_buff *skb; 213 struct sk_buff *skb;
204 void __iomem *desc; 214 void *desc;
205 unsigned int desc0, len; 215 unsigned int desc0, len;
206 int rx_head = priv->rx_head; 216 int rx_head = priv->rx_head;
207 int rx = 0; 217 int rx = 0;
208 218
209 while (rx < budget) { 219 while (rx < budget) {
210 desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head); 220 desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
211 desc0 = readl(desc + RX_REG_OFFSET_DESC0); 221 desc0 = moxart_desc_read(desc + RX_REG_OFFSET_DESC0);
222 rmb(); /* ensure desc0 is up to date */
212 223
213 if (desc0 & RX_DESC0_DMA_OWN) 224 if (desc0 & RX_DESC0_DMA_OWN)
214 break; 225 break;
@@ -250,7 +261,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
250 priv->stats.multicast++; 261 priv->stats.multicast++;
251 262
252rx_next: 263rx_next:
253 writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); 264 wmb(); /* prevent setting ownership back too early */
265 moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
254 266
255 rx_head = RX_NEXT(rx_head); 267 rx_head = RX_NEXT(rx_head);
256 priv->rx_head = rx_head; 268 priv->rx_head = rx_head;
@@ -310,7 +322,7 @@ static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
310static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) 322static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
311{ 323{
312 struct moxart_mac_priv_t *priv = netdev_priv(ndev); 324 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
313 void __iomem *desc; 325 void *desc;
314 unsigned int len; 326 unsigned int len;
315 unsigned int tx_head = priv->tx_head; 327 unsigned int tx_head = priv->tx_head;
316 u32 txdes1; 328 u32 txdes1;
@@ -319,11 +331,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
319 desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head); 331 desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
320 332
321 spin_lock_irq(&priv->txlock); 333 spin_lock_irq(&priv->txlock);
322 if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) { 334 if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
323 net_dbg_ratelimited("no TX space for packet\n"); 335 net_dbg_ratelimited("no TX space for packet\n");
324 priv->stats.tx_dropped++; 336 priv->stats.tx_dropped++;
325 goto out_unlock; 337 goto out_unlock;
326 } 338 }
339 rmb(); /* ensure data is only read that had TX_DESC0_DMA_OWN cleared */
327 340
328 len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len; 341 len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
329 342
@@ -337,9 +350,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
337 priv->tx_len[tx_head] = len; 350 priv->tx_len[tx_head] = len;
338 priv->tx_skb[tx_head] = skb; 351 priv->tx_skb[tx_head] = skb;
339 352
340 writel(priv->tx_mapping[tx_head], 353 moxart_desc_write(priv->tx_mapping[tx_head],
341 desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS); 354 desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS);
342 writel(skb->data, 355 moxart_desc_write((uintptr_t)skb->data,
343 desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT); 356 desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT);
344 357
345 if (skb->len < ETH_ZLEN) { 358 if (skb->len < ETH_ZLEN) {
@@ -354,8 +367,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
354 txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK); 367 txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
355 if (tx_head == TX_DESC_NUM_MASK) 368 if (tx_head == TX_DESC_NUM_MASK)
356 txdes1 |= TX_DESC1_END; 369 txdes1 |= TX_DESC1_END;
357 writel(txdes1, desc + TX_REG_OFFSET_DESC1); 370 moxart_desc_write(txdes1, desc + TX_REG_OFFSET_DESC1);
358 writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0); 371 wmb(); /* flush descriptor before transferring ownership */
372 moxart_desc_write(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
359 373
360 /* start to send packet */ 374 /* start to send packet */
361 writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND); 375 writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND);
@@ -460,9 +474,9 @@ static int moxart_mac_probe(struct platform_device *pdev)
460 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 474 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
461 ndev->base_addr = res->start; 475 ndev->base_addr = res->start;
462 priv->base = devm_ioremap_resource(p_dev, res); 476 priv->base = devm_ioremap_resource(p_dev, res);
463 ret = IS_ERR(priv->base); 477 if (IS_ERR(priv->base)) {
464 if (ret) {
465 dev_err(p_dev, "devm_ioremap_resource failed\n"); 478 dev_err(p_dev, "devm_ioremap_resource failed\n");
479 ret = PTR_ERR(priv->base);
466 goto init_fail; 480 goto init_fail;
467 } 481 }
468 482
diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h
index 2be9280d608c..93a9563ac7c6 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.h
+++ b/drivers/net/ethernet/moxa/moxart_ether.h
@@ -300,7 +300,7 @@ struct moxart_mac_priv_t {
300 300
301 dma_addr_t rx_base; 301 dma_addr_t rx_base;
302 dma_addr_t rx_mapping[RX_DESC_NUM]; 302 dma_addr_t rx_mapping[RX_DESC_NUM];
303 void __iomem *rx_desc_base; 303 void *rx_desc_base;
304 unsigned char *rx_buf_base; 304 unsigned char *rx_buf_base;
305 unsigned char *rx_buf[RX_DESC_NUM]; 305 unsigned char *rx_buf[RX_DESC_NUM];
306 unsigned int rx_head; 306 unsigned int rx_head;
@@ -308,7 +308,7 @@ struct moxart_mac_priv_t {
308 308
309 dma_addr_t tx_base; 309 dma_addr_t tx_base;
310 dma_addr_t tx_mapping[TX_DESC_NUM]; 310 dma_addr_t tx_mapping[TX_DESC_NUM];
311 void __iomem *tx_desc_base; 311 void *tx_desc_base;
312 unsigned char *tx_buf_base; 312 unsigned char *tx_buf_base;
313 unsigned char *tx_buf[RX_DESC_NUM]; 313 unsigned char *tx_buf[RX_DESC_NUM];
314 unsigned int tx_head; 314 unsigned int tx_head;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 50d5604833ed..e0993eba5df3 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2223,8 +2223,6 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2223 return IRQ_NONE; 2223 return IRQ_NONE;
2224} 2224}
2225 2225
2226#ifdef CONFIG_PCI_MSI
2227
2228static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id) 2226static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
2229{ 2227{
2230 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; 2228 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
@@ -2442,16 +2440,13 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
2442 if (vdev->config.intr_type == MSI_X) 2440 if (vdev->config.intr_type == MSI_X)
2443 pci_disable_msix(vdev->pdev); 2441 pci_disable_msix(vdev->pdev);
2444} 2442}
2445#endif
2446 2443
2447static void vxge_rem_isr(struct vxgedev *vdev) 2444static void vxge_rem_isr(struct vxgedev *vdev)
2448{ 2445{
2449#ifdef CONFIG_PCI_MSI 2446 if (IS_ENABLED(CONFIG_PCI_MSI) &&
2450 if (vdev->config.intr_type == MSI_X) { 2447 vdev->config.intr_type == MSI_X) {
2451 vxge_rem_msix_isr(vdev); 2448 vxge_rem_msix_isr(vdev);
2452 } else 2449 } else if (vdev->config.intr_type == INTA) {
2453#endif
2454 if (vdev->config.intr_type == INTA) {
2455 synchronize_irq(vdev->pdev->irq); 2450 synchronize_irq(vdev->pdev->irq);
2456 free_irq(vdev->pdev->irq, vdev); 2451 free_irq(vdev->pdev->irq, vdev);
2457 } 2452 }
@@ -2460,11 +2455,10 @@ static void vxge_rem_isr(struct vxgedev *vdev)
2460static int vxge_add_isr(struct vxgedev *vdev) 2455static int vxge_add_isr(struct vxgedev *vdev)
2461{ 2456{
2462 int ret = 0; 2457 int ret = 0;
2463#ifdef CONFIG_PCI_MSI
2464 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0; 2458 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
2465 int pci_fun = PCI_FUNC(vdev->pdev->devfn); 2459 int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2466 2460
2467 if (vdev->config.intr_type == MSI_X) 2461 if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X)
2468 ret = vxge_enable_msix(vdev); 2462 ret = vxge_enable_msix(vdev);
2469 2463
2470 if (ret) { 2464 if (ret) {
@@ -2475,7 +2469,7 @@ static int vxge_add_isr(struct vxgedev *vdev)
2475 vdev->config.intr_type = INTA; 2469 vdev->config.intr_type = INTA;
2476 } 2470 }
2477 2471
2478 if (vdev->config.intr_type == MSI_X) { 2472 if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) {
2479 for (intr_idx = 0; 2473 for (intr_idx = 0;
2480 intr_idx < (vdev->no_of_vpath * 2474 intr_idx < (vdev->no_of_vpath *
2481 VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) { 2475 VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
@@ -2576,9 +2570,8 @@ static int vxge_add_isr(struct vxgedev *vdev)
2576 vdev->vxge_entries[intr_cnt].in_use = 1; 2570 vdev->vxge_entries[intr_cnt].in_use = 1;
2577 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0]; 2571 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
2578 } 2572 }
2579INTA_MODE:
2580#endif
2581 2573
2574INTA_MODE:
2582 if (vdev->config.intr_type == INTA) { 2575 if (vdev->config.intr_type == INTA) {
2583 snprintf(vdev->desc[0], VXGE_INTR_STRLEN, 2576 snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
2584 "%s:vxge:INTA", vdev->ndev->name); 2577 "%s:vxge:INTA", vdev->ndev->name);
@@ -3889,12 +3882,12 @@ static void vxge_device_config_init(struct vxge_hw_device_config *device_config,
3889 if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT) 3882 if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
3890 max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT; 3883 max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
3891 3884
3892#ifndef CONFIG_PCI_MSI 3885 if (!IS_ENABLED(CONFIG_PCI_MSI)) {
3893 vxge_debug_init(VXGE_ERR, 3886 vxge_debug_init(VXGE_ERR,
3894 "%s: This Kernel does not support " 3887 "%s: This Kernel does not support "
3895 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME); 3888 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
3896 *intr_type = INTA; 3889 *intr_type = INTA;
3897#endif 3890 }
3898 3891
3899 /* Configure whether MSI-X or IRQL. */ 3892 /* Configure whether MSI-X or IRQL. */
3900 switch (*intr_type) { 3893 switch (*intr_type) {
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 689a4a5c8dcf..1ef03939d25f 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -811,7 +811,7 @@ qcaspi_netdev_setup(struct net_device *dev)
811 dev->netdev_ops = &qcaspi_netdev_ops; 811 dev->netdev_ops = &qcaspi_netdev_ops;
812 qcaspi_set_ethtool_ops(dev); 812 qcaspi_set_ethtool_ops(dev);
813 dev->watchdog_timeo = QCASPI_TX_TIMEOUT; 813 dev->watchdog_timeo = QCASPI_TX_TIMEOUT;
814 dev->flags = IFF_MULTICAST; 814 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
815 dev->tx_queue_len = 100; 815 dev->tx_queue_len = 100;
816 816
817 qca = netdev_priv(dev); 817 qca = netdev_priv(dev);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 17d5571d0432..dd2cf3738b73 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4933,8 +4933,6 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4933 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); 4933 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4934 break; 4934 break;
4935 case RTL_GIGA_MAC_VER_40: 4935 case RTL_GIGA_MAC_VER_40:
4936 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
4937 break;
4938 case RTL_GIGA_MAC_VER_41: 4936 case RTL_GIGA_MAC_VER_41:
4939 case RTL_GIGA_MAC_VER_42: 4937 case RTL_GIGA_MAC_VER_42:
4940 case RTL_GIGA_MAC_VER_43: 4938 case RTL_GIGA_MAC_VER_43:
@@ -4943,8 +4941,6 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4943 case RTL_GIGA_MAC_VER_46: 4941 case RTL_GIGA_MAC_VER_46:
4944 case RTL_GIGA_MAC_VER_47: 4942 case RTL_GIGA_MAC_VER_47:
4945 case RTL_GIGA_MAC_VER_48: 4943 case RTL_GIGA_MAC_VER_48:
4946 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
4947 break;
4948 case RTL_GIGA_MAC_VER_49: 4944 case RTL_GIGA_MAC_VER_49:
4949 case RTL_GIGA_MAC_VER_50: 4945 case RTL_GIGA_MAC_VER_50:
4950 case RTL_GIGA_MAC_VER_51: 4946 case RTL_GIGA_MAC_VER_51:
@@ -6137,28 +6133,28 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
6137 sw_cnt_1ms_ini = 16000000/rg_saw_cnt; 6133 sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
6138 sw_cnt_1ms_ini &= 0x0fff; 6134 sw_cnt_1ms_ini &= 0x0fff;
6139 data = r8168_mac_ocp_read(tp, 0xd412); 6135 data = r8168_mac_ocp_read(tp, 0xd412);
6140 data &= 0x0fff; 6136 data &= ~0x0fff;
6141 data |= sw_cnt_1ms_ini; 6137 data |= sw_cnt_1ms_ini;
6142 r8168_mac_ocp_write(tp, 0xd412, data); 6138 r8168_mac_ocp_write(tp, 0xd412, data);
6143 } 6139 }
6144 6140
6145 data = r8168_mac_ocp_read(tp, 0xe056); 6141 data = r8168_mac_ocp_read(tp, 0xe056);
6146 data &= 0xf0; 6142 data &= ~0xf0;
6147 data |= 0x07; 6143 data |= 0x70;
6148 r8168_mac_ocp_write(tp, 0xe056, data); 6144 r8168_mac_ocp_write(tp, 0xe056, data);
6149 6145
6150 data = r8168_mac_ocp_read(tp, 0xe052); 6146 data = r8168_mac_ocp_read(tp, 0xe052);
6151 data &= 0x8008; 6147 data &= ~0x6000;
6152 data |= 0x6000; 6148 data |= 0x8008;
6153 r8168_mac_ocp_write(tp, 0xe052, data); 6149 r8168_mac_ocp_write(tp, 0xe052, data);
6154 6150
6155 data = r8168_mac_ocp_read(tp, 0xe0d6); 6151 data = r8168_mac_ocp_read(tp, 0xe0d6);
6156 data &= 0x01ff; 6152 data &= ~0x01ff;
6157 data |= 0x017f; 6153 data |= 0x017f;
6158 r8168_mac_ocp_write(tp, 0xe0d6, data); 6154 r8168_mac_ocp_write(tp, 0xe0d6, data);
6159 6155
6160 data = r8168_mac_ocp_read(tp, 0xd420); 6156 data = r8168_mac_ocp_read(tp, 0xd420);
6161 data &= 0x0fff; 6157 data &= ~0x0fff;
6162 data |= 0x047f; 6158 data |= 0x047f;
6163 r8168_mac_ocp_write(tp, 0xd420, data); 6159 r8168_mac_ocp_write(tp, 0xd420, data);
6164 6160
@@ -7730,10 +7726,13 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7730{ 7726{
7731 struct rtl8169_private *tp = netdev_priv(dev); 7727 struct rtl8169_private *tp = netdev_priv(dev);
7732 void __iomem *ioaddr = tp->mmio_addr; 7728 void __iomem *ioaddr = tp->mmio_addr;
7729 struct pci_dev *pdev = tp->pci_dev;
7733 struct rtl8169_counters *counters = tp->counters; 7730 struct rtl8169_counters *counters = tp->counters;
7734 unsigned int start; 7731 unsigned int start;
7735 7732
7736 if (netif_running(dev)) 7733 pm_runtime_get_noresume(&pdev->dev);
7734
7735 if (netif_running(dev) && pm_runtime_active(&pdev->dev))
7737 rtl8169_rx_missed(dev, ioaddr); 7736 rtl8169_rx_missed(dev, ioaddr);
7738 7737
7739 do { 7738 do {
@@ -7761,7 +7760,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7761 * Fetch additonal counter values missing in stats collected by driver 7760 * Fetch additonal counter values missing in stats collected by driver
7762 * from tally counters. 7761 * from tally counters.
7763 */ 7762 */
7764 rtl8169_update_counters(dev); 7763 if (pm_runtime_active(&pdev->dev))
7764 rtl8169_update_counters(dev);
7765 7765
7766 /* 7766 /*
7767 * Subtract values fetched during initalization. 7767 * Subtract values fetched during initalization.
@@ -7774,6 +7774,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7774 stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) - 7774 stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
7775 le16_to_cpu(tp->tc_offset.tx_aborted); 7775 le16_to_cpu(tp->tc_offset.tx_aborted);
7776 7776
7777 pm_runtime_put_noidle(&pdev->dev);
7778
7777 return stats; 7779 return stats;
7778} 7780}
7779 7781
@@ -7853,6 +7855,10 @@ static int rtl8169_runtime_suspend(struct device *device)
7853 7855
7854 rtl8169_net_suspend(dev); 7856 rtl8169_net_suspend(dev);
7855 7857
7858 /* Update counters before going runtime suspend */
7859 rtl8169_rx_missed(dev, tp->mmio_addr);
7860 rtl8169_update_counters(dev);
7861
7856 return 0; 7862 return 0;
7857} 7863}
7858 7864
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ac43ed914fcf..86449c357168 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1139,7 +1139,8 @@ static int ravb_set_ringparam(struct net_device *ndev,
1139 if (netif_running(ndev)) { 1139 if (netif_running(ndev)) {
1140 netif_device_detach(ndev); 1140 netif_device_detach(ndev);
1141 /* Stop PTP Clock driver */ 1141 /* Stop PTP Clock driver */
1142 ravb_ptp_stop(ndev); 1142 if (priv->chip_id == RCAR_GEN2)
1143 ravb_ptp_stop(ndev);
1143 /* Wait for DMA stopping */ 1144 /* Wait for DMA stopping */
1144 error = ravb_stop_dma(ndev); 1145 error = ravb_stop_dma(ndev);
1145 if (error) { 1146 if (error) {
@@ -1170,7 +1171,8 @@ static int ravb_set_ringparam(struct net_device *ndev,
1170 ravb_emac_init(ndev); 1171 ravb_emac_init(ndev);
1171 1172
1172 /* Initialise PTP Clock driver */ 1173 /* Initialise PTP Clock driver */
1173 ravb_ptp_init(ndev, priv->pdev); 1174 if (priv->chip_id == RCAR_GEN2)
1175 ravb_ptp_init(ndev, priv->pdev);
1174 1176
1175 netif_device_attach(ndev); 1177 netif_device_attach(ndev);
1176 } 1178 }
@@ -1298,7 +1300,8 @@ static void ravb_tx_timeout_work(struct work_struct *work)
1298 netif_tx_stop_all_queues(ndev); 1300 netif_tx_stop_all_queues(ndev);
1299 1301
1300 /* Stop PTP Clock driver */ 1302 /* Stop PTP Clock driver */
1301 ravb_ptp_stop(ndev); 1303 if (priv->chip_id == RCAR_GEN2)
1304 ravb_ptp_stop(ndev);
1302 1305
1303 /* Wait for DMA stopping */ 1306 /* Wait for DMA stopping */
1304 ravb_stop_dma(ndev); 1307 ravb_stop_dma(ndev);
@@ -1311,7 +1314,8 @@ static void ravb_tx_timeout_work(struct work_struct *work)
1311 ravb_emac_init(ndev); 1314 ravb_emac_init(ndev);
1312 1315
1313 /* Initialise PTP Clock driver */ 1316 /* Initialise PTP Clock driver */
1314 ravb_ptp_init(ndev, priv->pdev); 1317 if (priv->chip_id == RCAR_GEN2)
1318 ravb_ptp_init(ndev, priv->pdev);
1315 1319
1316 netif_tx_start_all_queues(ndev); 1320 netif_tx_start_all_queues(ndev);
1317} 1321}
@@ -1718,7 +1722,6 @@ static int ravb_set_gti(struct net_device *ndev)
1718static int ravb_probe(struct platform_device *pdev) 1722static int ravb_probe(struct platform_device *pdev)
1719{ 1723{
1720 struct device_node *np = pdev->dev.of_node; 1724 struct device_node *np = pdev->dev.of_node;
1721 const struct of_device_id *match;
1722 struct ravb_private *priv; 1725 struct ravb_private *priv;
1723 enum ravb_chip_id chip_id; 1726 enum ravb_chip_id chip_id;
1724 struct net_device *ndev; 1727 struct net_device *ndev;
@@ -1750,8 +1753,7 @@ static int ravb_probe(struct platform_device *pdev)
1750 ndev->base_addr = res->start; 1753 ndev->base_addr = res->start;
1751 ndev->dma = -1; 1754 ndev->dma = -1;
1752 1755
1753 match = of_match_device(of_match_ptr(ravb_match_table), &pdev->dev); 1756 chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev);
1754 chip_id = (enum ravb_chip_id)match->data;
1755 1757
1756 if (chip_id == RCAR_GEN3) 1758 if (chip_id == RCAR_GEN3)
1757 irq = platform_get_irq_byname(pdev, "ch22"); 1759 irq = platform_get_irq_byname(pdev, "ch22");
@@ -1814,10 +1816,6 @@ static int ravb_probe(struct platform_device *pdev)
1814 CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC); 1816 CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC);
1815 } 1817 }
1816 1818
1817 /* Set CSEL value */
1818 ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB,
1819 CCC);
1820
1821 /* Set GTI value */ 1819 /* Set GTI value */
1822 error = ravb_set_gti(ndev); 1820 error = ravb_set_gti(ndev);
1823 if (error) 1821 if (error)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index dfa9e59c9442..738449992876 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3061,15 +3061,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
3061 mdp->ether_link_active_low = pd->ether_link_active_low; 3061 mdp->ether_link_active_low = pd->ether_link_active_low;
3062 3062
3063 /* set cpu data */ 3063 /* set cpu data */
3064 if (id) { 3064 if (id)
3065 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; 3065 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
3066 } else { 3066 else
3067 const struct of_device_id *match; 3067 mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev);
3068 3068
3069 match = of_match_device(of_match_ptr(sh_eth_match_table),
3070 &pdev->dev);
3071 mdp->cd = (struct sh_eth_cpu_data *)match->data;
3072 }
3073 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); 3069 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
3074 if (!mdp->reg_offset) { 3070 if (!mdp->reg_offset) {
3075 dev_err(&pdev->dev, "Unknown register type (%d)\n", 3071 dev_err(&pdev->dev, "Unknown register type (%d)\n",
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index a4ab71d43e4e..166a7fc87e2f 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -3531,12 +3531,14 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
3531 info.addr = lw->addr; 3531 info.addr = lw->addr;
3532 info.vid = lw->vid; 3532 info.vid = lw->vid;
3533 3533
3534 rtnl_lock();
3534 if (learned && removing) 3535 if (learned && removing)
3535 call_switchdev_notifiers(SWITCHDEV_FDB_DEL, 3536 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
3536 lw->rocker_port->dev, &info.info); 3537 lw->rocker_port->dev, &info.info);
3537 else if (learned && !removing) 3538 else if (learned && !removing)
3538 call_switchdev_notifiers(SWITCHDEV_FDB_ADD, 3539 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
3539 lw->rocker_port->dev, &info.info); 3540 lw->rocker_port->dev, &info.info);
3541 rtnl_unlock();
3540 3542
3541 rocker_port_kfree(lw->trans, work); 3543 rocker_port_kfree(lw->trans, work);
3542} 3544}
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 0e2fc1a844ab..db7db8ac4ca3 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2342,8 +2342,8 @@ static int smc_drv_probe(struct platform_device *pdev)
2342 } 2342 }
2343 2343
2344 ndev->irq = platform_get_irq(pdev, 0); 2344 ndev->irq = platform_get_irq(pdev, 0);
2345 if (ndev->irq <= 0) { 2345 if (ndev->irq < 0) {
2346 ret = -ENODEV; 2346 ret = ndev->irq;
2347 goto out_release_io; 2347 goto out_release_io;
2348 } 2348 }
2349 /* 2349 /*
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 0faf16336035..efb54f356a67 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -199,21 +199,12 @@ int stmmac_mdio_register(struct net_device *ndev)
199 struct stmmac_priv *priv = netdev_priv(ndev); 199 struct stmmac_priv *priv = netdev_priv(ndev);
200 struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; 200 struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
201 int addr, found; 201 int addr, found;
202 struct device_node *mdio_node = NULL; 202 struct device_node *mdio_node = priv->plat->mdio_node;
203 struct device_node *child_node = NULL;
204 203
205 if (!mdio_bus_data) 204 if (!mdio_bus_data)
206 return 0; 205 return 0;
207 206
208 if (IS_ENABLED(CONFIG_OF)) { 207 if (IS_ENABLED(CONFIG_OF)) {
209 for_each_child_of_node(priv->device->of_node, child_node) {
210 if (of_device_is_compatible(child_node,
211 "snps,dwmac-mdio")) {
212 mdio_node = child_node;
213 break;
214 }
215 }
216
217 if (mdio_node) { 208 if (mdio_node) {
218 netdev_dbg(ndev, "FOUND MDIO subnode\n"); 209 netdev_dbg(ndev, "FOUND MDIO subnode\n");
219 } else { 210 } else {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 6a52fa18cbf2..4514ba73d961 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -110,6 +110,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
110 struct device_node *np = pdev->dev.of_node; 110 struct device_node *np = pdev->dev.of_node;
111 struct plat_stmmacenet_data *plat; 111 struct plat_stmmacenet_data *plat;
112 struct stmmac_dma_cfg *dma_cfg; 112 struct stmmac_dma_cfg *dma_cfg;
113 struct device_node *child_node = NULL;
113 114
114 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); 115 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
115 if (!plat) 116 if (!plat)
@@ -140,13 +141,19 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
140 plat->phy_node = of_node_get(np); 141 plat->phy_node = of_node_get(np);
141 } 142 }
142 143
144 for_each_child_of_node(np, child_node)
145 if (of_device_is_compatible(child_node, "snps,dwmac-mdio")) {
146 plat->mdio_node = child_node;
147 break;
148 }
149
143 /* "snps,phy-addr" is not a standard property. Mark it as deprecated 150 /* "snps,phy-addr" is not a standard property. Mark it as deprecated
144 * and warn of its use. Remove this when phy node support is added. 151 * and warn of its use. Remove this when phy node support is added.
145 */ 152 */
146 if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) 153 if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
147 dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); 154 dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
148 155
149 if ((plat->phy_node && !of_phy_is_fixed_link(np)) || plat->phy_bus_name) 156 if ((plat->phy_node && !of_phy_is_fixed_link(np)) || !plat->mdio_node)
150 plat->mdio_bus_data = NULL; 157 plat->mdio_bus_data = NULL;
151 else 158 else
152 plat->mdio_bus_data = 159 plat->mdio_bus_data =
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index cc106d892e29..23fa29877f5b 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -389,17 +389,27 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
389 if (vio_version_after_eq(&port->vio, 1, 8)) { 389 if (vio_version_after_eq(&port->vio, 1, 8)) {
390 struct vio_net_dext *dext = vio_net_ext(desc); 390 struct vio_net_dext *dext = vio_net_ext(desc);
391 391
392 skb_reset_network_header(skb);
393
392 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) { 394 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
393 if (skb->protocol == ETH_P_IP) { 395 if (skb->protocol == ETH_P_IP) {
394 struct iphdr *iph = (struct iphdr *)skb->data; 396 struct iphdr *iph = ip_hdr(skb);
395 397
396 iph->check = 0; 398 iph->check = 0;
397 ip_send_check(iph); 399 ip_send_check(iph);
398 } 400 }
399 } 401 }
400 if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) && 402 if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
401 skb->ip_summed == CHECKSUM_NONE) 403 skb->ip_summed == CHECKSUM_NONE) {
402 vnet_fullcsum(skb); 404 if (skb->protocol == htons(ETH_P_IP)) {
405 struct iphdr *iph = ip_hdr(skb);
406 int ihl = iph->ihl * 4;
407
408 skb_reset_transport_header(skb);
409 skb_set_transport_header(skb, ihl);
410 vnet_fullcsum(skb);
411 }
412 }
403 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { 413 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
404 skb->ip_summed = CHECKSUM_PARTIAL; 414 skb->ip_summed = CHECKSUM_PARTIAL;
405 skb->csum_level = 0; 415 skb->csum_level = 0;
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 70814b7386b3..af11ed1e0bcc 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -426,7 +426,7 @@
426#define DWC_MMC_RXOCTETCOUNT_GB 0x0784 426#define DWC_MMC_RXOCTETCOUNT_GB 0x0784
427#define DWC_MMC_RXPACKETCOUNT_GB 0x0780 427#define DWC_MMC_RXPACKETCOUNT_GB 0x0780
428 428
429static int debug = 3; 429static int debug = -1;
430module_param(debug, int, 0); 430module_param(debug, int, 0);
431MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)"); 431MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)");
432 432
@@ -650,6 +650,11 @@ struct net_local {
650 u32 mmc_tx_counters_mask; 650 u32 mmc_tx_counters_mask;
651 651
652 struct dwceqos_flowcontrol flowcontrol; 652 struct dwceqos_flowcontrol flowcontrol;
653
654 /* Tracks the intermediate state of phy started but hardware
655 * init not finished yet.
656 */
657 bool phy_defer;
653}; 658};
654 659
655static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, 660static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
@@ -901,6 +906,9 @@ static void dwceqos_adjust_link(struct net_device *ndev)
901 struct phy_device *phydev = lp->phy_dev; 906 struct phy_device *phydev = lp->phy_dev;
902 int status_change = 0; 907 int status_change = 0;
903 908
909 if (lp->phy_defer)
910 return;
911
904 if (phydev->link) { 912 if (phydev->link) {
905 if ((lp->speed != phydev->speed) || 913 if ((lp->speed != phydev->speed) ||
906 (lp->duplex != phydev->duplex)) { 914 (lp->duplex != phydev->duplex)) {
@@ -1113,7 +1121,7 @@ static int dwceqos_descriptor_init(struct net_local *lp)
1113 /* Allocate DMA descriptors */ 1121 /* Allocate DMA descriptors */
1114 size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc); 1122 size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
1115 lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, 1123 lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
1116 &lp->rx_descs_addr, 0); 1124 &lp->rx_descs_addr, GFP_KERNEL);
1117 if (!lp->rx_descs) 1125 if (!lp->rx_descs)
1118 goto err_out; 1126 goto err_out;
1119 lp->rx_descs_tail_addr = lp->rx_descs_addr + 1127 lp->rx_descs_tail_addr = lp->rx_descs_addr +
@@ -1121,7 +1129,7 @@ static int dwceqos_descriptor_init(struct net_local *lp)
1121 1129
1122 size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc); 1130 size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
1123 lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, 1131 lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
1124 &lp->tx_descs_addr, 0); 1132 &lp->tx_descs_addr, GFP_KERNEL);
1125 if (!lp->tx_descs) 1133 if (!lp->tx_descs)
1126 goto err_out; 1134 goto err_out;
1127 lp->tx_descs_tail_addr = lp->tx_descs_addr + 1135 lp->tx_descs_tail_addr = lp->tx_descs_addr +
@@ -1635,6 +1643,12 @@ static void dwceqos_init_hw(struct net_local *lp)
1635 regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); 1643 regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
1636 dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, 1644 dwceqos_write(lp, REG_DWCEQOS_MAC_CFG,
1637 regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); 1645 regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
1646
1647 lp->phy_defer = false;
1648 mutex_lock(&lp->phy_dev->lock);
1649 phy_read_status(lp->phy_dev);
1650 dwceqos_adjust_link(lp->ndev);
1651 mutex_unlock(&lp->phy_dev->lock);
1638} 1652}
1639 1653
1640static void dwceqos_tx_reclaim(unsigned long data) 1654static void dwceqos_tx_reclaim(unsigned long data)
@@ -1880,9 +1894,13 @@ static int dwceqos_open(struct net_device *ndev)
1880 } 1894 }
1881 netdev_reset_queue(ndev); 1895 netdev_reset_queue(ndev);
1882 1896
1883 napi_enable(&lp->napi); 1897 /* The dwceqos reset state machine requires all phy clocks to complete,
1898 * hence the unusual init order with phy_start first.
1899 */
1900 lp->phy_defer = true;
1884 phy_start(lp->phy_dev); 1901 phy_start(lp->phy_dev);
1885 dwceqos_init_hw(lp); 1902 dwceqos_init_hw(lp);
1903 napi_enable(&lp->napi);
1886 1904
1887 netif_start_queue(ndev); 1905 netif_start_queue(ndev);
1888 tasklet_enable(&lp->tx_bdreclaim_tasklet); 1906 tasklet_enable(&lp->tx_bdreclaim_tasklet);
@@ -1915,18 +1933,19 @@ static int dwceqos_stop(struct net_device *ndev)
1915{ 1933{
1916 struct net_local *lp = netdev_priv(ndev); 1934 struct net_local *lp = netdev_priv(ndev);
1917 1935
1918 phy_stop(lp->phy_dev);
1919
1920 tasklet_disable(&lp->tx_bdreclaim_tasklet); 1936 tasklet_disable(&lp->tx_bdreclaim_tasklet);
1921 netif_stop_queue(ndev);
1922 napi_disable(&lp->napi); 1937 napi_disable(&lp->napi);
1923 1938
1924 dwceqos_drain_dma(lp); 1939 /* Stop all tx before we drain the tx dma. */
1940 netif_tx_lock_bh(lp->ndev);
1941 netif_stop_queue(ndev);
1942 netif_tx_unlock_bh(lp->ndev);
1925 1943
1926 netif_tx_lock(lp->ndev); 1944 dwceqos_drain_dma(lp);
1927 dwceqos_reset_hw(lp); 1945 dwceqos_reset_hw(lp);
1946 phy_stop(lp->phy_dev);
1947
1928 dwceqos_descriptor_free(lp); 1948 dwceqos_descriptor_free(lp);
1929 netif_tx_unlock(lp->ndev);
1930 1949
1931 return 0; 1950 return 0;
1932} 1951}
@@ -2178,12 +2197,10 @@ static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2178 ((trans.initial_descriptor + trans.nr_descriptors) % 2197 ((trans.initial_descriptor + trans.nr_descriptors) %
2179 DWCEQOS_TX_DCNT)); 2198 DWCEQOS_TX_DCNT));
2180 2199
2181 dwceqos_tx_finalize(skb, lp, &trans);
2182
2183 netdev_sent_queue(ndev, skb->len);
2184
2185 spin_lock_bh(&lp->tx_lock); 2200 spin_lock_bh(&lp->tx_lock);
2186 lp->tx_free -= trans.nr_descriptors; 2201 lp->tx_free -= trans.nr_descriptors;
2202 dwceqos_tx_finalize(skb, lp, &trans);
2203 netdev_sent_queue(ndev, skb->len);
2187 spin_unlock_bh(&lp->tx_lock); 2204 spin_unlock_bh(&lp->tx_lock);
2188 2205
2189 ndev->trans_start = jiffies; 2206 ndev->trans_start = jiffies;
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index e9cc61e1ec74..c3e85acfdc70 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -63,8 +63,12 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
63 mode = AM33XX_GMII_SEL_MODE_RGMII; 63 mode = AM33XX_GMII_SEL_MODE_RGMII;
64 break; 64 break;
65 65
66 case PHY_INTERFACE_MODE_MII:
67 default: 66 default:
67 dev_warn(priv->dev,
68 "Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
69 phy_modes(phy_mode));
70 /* fallthrough */
71 case PHY_INTERFACE_MODE_MII:
68 mode = AM33XX_GMII_SEL_MODE_MII; 72 mode = AM33XX_GMII_SEL_MODE_MII;
69 break; 73 break;
70 }; 74 };
@@ -106,8 +110,12 @@ static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv,
106 mode = AM33XX_GMII_SEL_MODE_RGMII; 110 mode = AM33XX_GMII_SEL_MODE_RGMII;
107 break; 111 break;
108 112
109 case PHY_INTERFACE_MODE_MII:
110 default: 113 default:
114 dev_warn(priv->dev,
115 "Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
116 phy_modes(phy_mode));
117 /* fallthrough */
118 case PHY_INTERFACE_MODE_MII:
111 mode = AM33XX_GMII_SEL_MODE_MII; 119 mode = AM33XX_GMII_SEL_MODE_MII;
112 break; 120 break;
113 }; 121 };
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 657b65bf5cac..18bf3a8fdc50 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -82,7 +82,7 @@ struct cpdma_desc {
82 82
83struct cpdma_desc_pool { 83struct cpdma_desc_pool {
84 phys_addr_t phys; 84 phys_addr_t phys;
85 u32 hw_addr; 85 dma_addr_t hw_addr;
86 void __iomem *iomap; /* ioremap map */ 86 void __iomem *iomap; /* ioremap map */
87 void *cpumap; /* dma_alloc map */ 87 void *cpumap; /* dma_alloc map */
88 int desc_size, mem_size; 88 int desc_size, mem_size;
@@ -152,7 +152,7 @@ struct cpdma_chan {
152 * abstract out these details 152 * abstract out these details
153 */ 153 */
154static struct cpdma_desc_pool * 154static struct cpdma_desc_pool *
155cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, 155cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
156 int size, int align) 156 int size, int align)
157{ 157{
158 int bitmap_size; 158 int bitmap_size;
@@ -176,13 +176,13 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
176 176
177 if (phys) { 177 if (phys) {
178 pool->phys = phys; 178 pool->phys = phys;
179 pool->iomap = ioremap(phys, size); 179 pool->iomap = ioremap(phys, size); /* should be memremap? */
180 pool->hw_addr = hw_addr; 180 pool->hw_addr = hw_addr;
181 } else { 181 } else {
182 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, 182 pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr,
183 GFP_KERNEL); 183 GFP_KERNEL);
184 pool->iomap = pool->cpumap; 184 pool->iomap = (void __iomem __force *)pool->cpumap;
185 pool->hw_addr = pool->phys; 185 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
186 } 186 }
187 187
188 if (pool->iomap) 188 if (pool->iomap)
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index c61d66d38634..029841f98c32 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -117,21 +117,17 @@ static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc,
117 *ndesc = le32_to_cpu(desc->next_desc); 117 *ndesc = le32_to_cpu(desc->next_desc);
118} 118}
119 119
120static void get_pad_info(u32 *pad0, u32 *pad1, u32 *pad2, struct knav_dma_desc *desc) 120static u32 get_sw_data(int index, struct knav_dma_desc *desc)
121{ 121{
122 *pad0 = le32_to_cpu(desc->pad[0]); 122 /* No Endian conversion needed as this data is untouched by hw */
123 *pad1 = le32_to_cpu(desc->pad[1]); 123 return desc->sw_data[index];
124 *pad2 = le32_to_cpu(desc->pad[2]);
125} 124}
126 125
127static void get_pad_ptr(void **padptr, struct knav_dma_desc *desc) 126/* use these macros to get sw data */
128{ 127#define GET_SW_DATA0(desc) get_sw_data(0, desc)
129 u64 pad64; 128#define GET_SW_DATA1(desc) get_sw_data(1, desc)
130 129#define GET_SW_DATA2(desc) get_sw_data(2, desc)
131 pad64 = le32_to_cpu(desc->pad[0]) + 130#define GET_SW_DATA3(desc) get_sw_data(3, desc)
132 ((u64)le32_to_cpu(desc->pad[1]) << 32);
133 *padptr = (void *)(uintptr_t)pad64;
134}
135 131
136static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len, 132static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len,
137 struct knav_dma_desc *desc) 133 struct knav_dma_desc *desc)
@@ -163,13 +159,18 @@ static void set_desc_info(u32 desc_info, u32 pkt_info,
163 desc->packet_info = cpu_to_le32(pkt_info); 159 desc->packet_info = cpu_to_le32(pkt_info);
164} 160}
165 161
166static void set_pad_info(u32 pad0, u32 pad1, u32 pad2, struct knav_dma_desc *desc) 162static void set_sw_data(int index, u32 data, struct knav_dma_desc *desc)
167{ 163{
168 desc->pad[0] = cpu_to_le32(pad0); 164 /* No Endian conversion needed as this data is untouched by hw */
169 desc->pad[1] = cpu_to_le32(pad1); 165 desc->sw_data[index] = data;
170 desc->pad[2] = cpu_to_le32(pad1);
171} 166}
172 167
168/* use these macros to set sw data */
169#define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc)
170#define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc)
171#define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc)
172#define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc)
173
173static void set_org_pkt_info(dma_addr_t buff, u32 buff_len, 174static void set_org_pkt_info(dma_addr_t buff, u32 buff_len,
174 struct knav_dma_desc *desc) 175 struct knav_dma_desc *desc)
175{ 176{
@@ -581,7 +582,6 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
581 dma_addr_t dma_desc, dma_buf; 582 dma_addr_t dma_desc, dma_buf;
582 unsigned int buf_len, dma_sz = sizeof(*ndesc); 583 unsigned int buf_len, dma_sz = sizeof(*ndesc);
583 void *buf_ptr; 584 void *buf_ptr;
584 u32 pad[2];
585 u32 tmp; 585 u32 tmp;
586 586
587 get_words(&dma_desc, 1, &desc->next_desc); 587 get_words(&dma_desc, 1, &desc->next_desc);
@@ -593,14 +593,20 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
593 break; 593 break;
594 } 594 }
595 get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc); 595 get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
596 get_pad_ptr(&buf_ptr, ndesc); 596 /* warning!!!! We are retrieving the virtual ptr in the sw_data
597 * field as a 32bit value. Will not work on 64bit machines
598 */
599 buf_ptr = (void *)GET_SW_DATA0(ndesc);
600 buf_len = (int)GET_SW_DATA1(desc);
597 dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE); 601 dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
598 __free_page(buf_ptr); 602 __free_page(buf_ptr);
599 knav_pool_desc_put(netcp->rx_pool, desc); 603 knav_pool_desc_put(netcp->rx_pool, desc);
600 } 604 }
601 605 /* warning!!!! We are retrieving the virtual ptr in the sw_data
602 get_pad_info(&pad[0], &pad[1], &buf_len, desc); 606 * field as a 32bit value. Will not work on 64bit machines
603 buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); 607 */
608 buf_ptr = (void *)GET_SW_DATA0(desc);
609 buf_len = (int)GET_SW_DATA1(desc);
604 610
605 if (buf_ptr) 611 if (buf_ptr)
606 netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr); 612 netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
@@ -639,7 +645,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
639 dma_addr_t dma_desc, dma_buff; 645 dma_addr_t dma_desc, dma_buff;
640 struct netcp_packet p_info; 646 struct netcp_packet p_info;
641 struct sk_buff *skb; 647 struct sk_buff *skb;
642 u32 pad[2];
643 void *org_buf_ptr; 648 void *org_buf_ptr;
644 649
645 dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); 650 dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
@@ -653,8 +658,11 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
653 } 658 }
654 659
655 get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc); 660 get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
656 get_pad_info(&pad[0], &pad[1], &org_buf_len, desc); 661 /* warning!!!! We are retrieving the virtual ptr in the sw_data
657 org_buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); 662 * field as a 32bit value. Will not work on 64bit machines
663 */
664 org_buf_ptr = (void *)GET_SW_DATA0(desc);
665 org_buf_len = (int)GET_SW_DATA1(desc);
658 666
659 if (unlikely(!org_buf_ptr)) { 667 if (unlikely(!org_buf_ptr)) {
660 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); 668 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
@@ -679,7 +687,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
679 /* Fill in the page fragment list */ 687 /* Fill in the page fragment list */
680 while (dma_desc) { 688 while (dma_desc) {
681 struct page *page; 689 struct page *page;
682 void *ptr;
683 690
684 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); 691 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
685 if (unlikely(!ndesc)) { 692 if (unlikely(!ndesc)) {
@@ -688,8 +695,10 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
688 } 695 }
689 696
690 get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc); 697 get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
691 get_pad_ptr(&ptr, ndesc); 698 /* warning!!!! We are retrieving the virtual ptr in the sw_data
692 page = ptr; 699 * field as a 32bit value. Will not work on 64bit machines
700 */
701 page = (struct page *)GET_SW_DATA0(desc);
693 702
694 if (likely(dma_buff && buf_len && page)) { 703 if (likely(dma_buff && buf_len && page)) {
695 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, 704 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
@@ -777,7 +786,10 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
777 } 786 }
778 787
779 get_org_pkt_info(&dma, &buf_len, desc); 788 get_org_pkt_info(&dma, &buf_len, desc);
780 get_pad_ptr(&buf_ptr, desc); 789 /* warning!!!! We are retrieving the virtual ptr in the sw_data
790 * field as a 32bit value. Will not work on 64bit machines
791 */
792 buf_ptr = (void *)GET_SW_DATA0(desc);
781 793
782 if (unlikely(!dma)) { 794 if (unlikely(!dma)) {
783 dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n"); 795 dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
@@ -829,7 +841,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
829 struct page *page; 841 struct page *page;
830 dma_addr_t dma; 842 dma_addr_t dma;
831 void *bufptr; 843 void *bufptr;
832 u32 pad[3]; 844 u32 sw_data[2];
833 845
834 /* Allocate descriptor */ 846 /* Allocate descriptor */
835 hwdesc = knav_pool_desc_get(netcp->rx_pool); 847 hwdesc = knav_pool_desc_get(netcp->rx_pool);
@@ -846,7 +858,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
846 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 858 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
847 859
848 bufptr = netdev_alloc_frag(primary_buf_len); 860 bufptr = netdev_alloc_frag(primary_buf_len);
849 pad[2] = primary_buf_len; 861 sw_data[1] = primary_buf_len;
850 862
851 if (unlikely(!bufptr)) { 863 if (unlikely(!bufptr)) {
852 dev_warn_ratelimited(netcp->ndev_dev, 864 dev_warn_ratelimited(netcp->ndev_dev,
@@ -858,9 +870,10 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
858 if (unlikely(dma_mapping_error(netcp->dev, dma))) 870 if (unlikely(dma_mapping_error(netcp->dev, dma)))
859 goto fail; 871 goto fail;
860 872
861 pad[0] = lower_32_bits((uintptr_t)bufptr); 873 /* warning!!!! We are saving the virtual ptr in the sw_data
862 pad[1] = upper_32_bits((uintptr_t)bufptr); 874 * field as a 32bit value. Will not work on 64bit machines
863 875 */
876 sw_data[0] = (u32)bufptr;
864 } else { 877 } else {
865 /* Allocate a secondary receive queue entry */ 878 /* Allocate a secondary receive queue entry */
866 page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD); 879 page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
@@ -870,9 +883,11 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
870 } 883 }
871 buf_len = PAGE_SIZE; 884 buf_len = PAGE_SIZE;
872 dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE); 885 dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
873 pad[0] = lower_32_bits(dma); 886 /* warning!!!! We are saving the virtual ptr in the sw_data
874 pad[1] = upper_32_bits(dma); 887 * field as a 32bit value. Will not work on 64bit machines
875 pad[2] = 0; 888 */
889 sw_data[0] = (u32)page;
890 sw_data[1] = 0;
876 } 891 }
877 892
878 desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC; 893 desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC;
@@ -882,7 +897,8 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
882 pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) << 897 pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
883 KNAV_DMA_DESC_RETQ_SHIFT; 898 KNAV_DMA_DESC_RETQ_SHIFT;
884 set_org_pkt_info(dma, buf_len, hwdesc); 899 set_org_pkt_info(dma, buf_len, hwdesc);
885 set_pad_info(pad[0], pad[1], pad[2], hwdesc); 900 SET_SW_DATA0(sw_data[0], hwdesc);
901 SET_SW_DATA1(sw_data[1], hwdesc);
886 set_desc_info(desc_info, pkt_info, hwdesc); 902 set_desc_info(desc_info, pkt_info, hwdesc);
887 903
888 /* Push to FDQs */ 904 /* Push to FDQs */
@@ -971,7 +987,6 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
971 unsigned int budget) 987 unsigned int budget)
972{ 988{
973 struct knav_dma_desc *desc; 989 struct knav_dma_desc *desc;
974 void *ptr;
975 struct sk_buff *skb; 990 struct sk_buff *skb;
976 unsigned int dma_sz; 991 unsigned int dma_sz;
977 dma_addr_t dma; 992 dma_addr_t dma;
@@ -988,8 +1003,10 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
988 continue; 1003 continue;
989 } 1004 }
990 1005
991 get_pad_ptr(&ptr, desc); 1006 /* warning!!!! We are retrieving the virtual ptr in the sw_data
992 skb = ptr; 1007 * field as a 32bit value. Will not work on 64bit machines
1008 */
1009 skb = (struct sk_buff *)GET_SW_DATA0(desc);
993 netcp_free_tx_desc_chain(netcp, desc, dma_sz); 1010 netcp_free_tx_desc_chain(netcp, desc, dma_sz);
994 if (!skb) { 1011 if (!skb) {
995 dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); 1012 dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
@@ -1194,10 +1211,10 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
1194 } 1211 }
1195 1212
1196 set_words(&tmp, 1, &desc->packet_info); 1213 set_words(&tmp, 1, &desc->packet_info);
1197 tmp = lower_32_bits((uintptr_t)&skb); 1214 /* warning!!!! We are saving the virtual ptr in the sw_data
1198 set_words(&tmp, 1, &desc->pad[0]); 1215 * field as a 32bit value. Will not work on 64bit machines
1199 tmp = upper_32_bits((uintptr_t)&skb); 1216 */
1200 set_words(&tmp, 1, &desc->pad[1]); 1217 SET_SW_DATA0((u32)skb, desc);
1201 1218
1202 if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) { 1219 if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) {
1203 tmp = tx_pipe->switch_to_port; 1220 tmp = tx_pipe->switch_to_port;
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 7f975a2c8990..b0de8ecd7fe8 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -533,8 +533,8 @@ static int dfx_register(struct device *bdev)
533 const char *print_name = dev_name(bdev); 533 const char *print_name = dev_name(bdev);
534 struct net_device *dev; 534 struct net_device *dev;
535 DFX_board_t *bp; /* board pointer */ 535 DFX_board_t *bp; /* board pointer */
536 resource_size_t bar_start[3]; /* pointers to ports */ 536 resource_size_t bar_start[3] = {0}; /* pointers to ports */
537 resource_size_t bar_len[3]; /* resource length */ 537 resource_size_t bar_len[3] = {0}; /* resource length */
538 int alloc_size; /* total buffer size used */ 538 int alloc_size; /* total buffer size used */
539 struct resource *region; 539 struct resource *region;
540 int err = 0; 540 int err = 0;
@@ -3697,8 +3697,8 @@ static void dfx_unregister(struct device *bdev)
3697 int dfx_bus_pci = dev_is_pci(bdev); 3697 int dfx_bus_pci = dev_is_pci(bdev);
3698 int dfx_bus_tc = DFX_BUS_TC(bdev); 3698 int dfx_bus_tc = DFX_BUS_TC(bdev);
3699 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; 3699 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
3700 resource_size_t bar_start[3]; /* pointers to ports */ 3700 resource_size_t bar_start[3] = {0}; /* pointers to ports */
3701 resource_size_t bar_len[3]; /* resource lengths */ 3701 resource_size_t bar_len[3] = {0}; /* resource lengths */
3702 int alloc_size; /* total buffer size used */ 3702 int alloc_size; /* total buffer size used */
3703 3703
3704 unregister_netdev(dev); 3704 unregister_netdev(dev);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 7456569f53c1..0bf7edd99573 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -980,9 +980,9 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
980 opts = ip_tunnel_info_opts(info); 980 opts = ip_tunnel_info_opts(info);
981 981
982 if (key->tun_flags & TUNNEL_CSUM) 982 if (key->tun_flags & TUNNEL_CSUM)
983 flags |= GENEVE_F_UDP_CSUM; 983 flags &= ~GENEVE_F_UDP_ZERO_CSUM6_TX;
984 else 984 else
985 flags &= ~GENEVE_F_UDP_CSUM; 985 flags |= GENEVE_F_UDP_ZERO_CSUM6_TX;
986 986
987 err = geneve6_build_skb(dst, skb, key->tun_flags, vni, 987 err = geneve6_build_skb(dst, skb, key->tun_flags, vni,
988 info->options_len, opts, 988 info->options_len, opts,
@@ -1039,6 +1039,34 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
1039 return geneve_xmit_skb(skb, dev, info); 1039 return geneve_xmit_skb(skb, dev, info);
1040} 1040}
1041 1041
1042static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
1043{
1044 /* The max_mtu calculation does not take account of GENEVE
1045 * options, to avoid excluding potentially valid
1046 * configurations.
1047 */
1048 int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr)
1049 - dev->hard_header_len;
1050
1051 if (new_mtu < 68)
1052 return -EINVAL;
1053
1054 if (new_mtu > max_mtu) {
1055 if (strict)
1056 return -EINVAL;
1057
1058 new_mtu = max_mtu;
1059 }
1060
1061 dev->mtu = new_mtu;
1062 return 0;
1063}
1064
1065static int geneve_change_mtu(struct net_device *dev, int new_mtu)
1066{
1067 return __geneve_change_mtu(dev, new_mtu, true);
1068}
1069
1042static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 1070static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
1043{ 1071{
1044 struct ip_tunnel_info *info = skb_tunnel_info(skb); 1072 struct ip_tunnel_info *info = skb_tunnel_info(skb);
@@ -1083,7 +1111,7 @@ static const struct net_device_ops geneve_netdev_ops = {
1083 .ndo_stop = geneve_stop, 1111 .ndo_stop = geneve_stop,
1084 .ndo_start_xmit = geneve_xmit, 1112 .ndo_start_xmit = geneve_xmit,
1085 .ndo_get_stats64 = ip_tunnel_get_stats64, 1113 .ndo_get_stats64 = ip_tunnel_get_stats64,
1086 .ndo_change_mtu = eth_change_mtu, 1114 .ndo_change_mtu = geneve_change_mtu,
1087 .ndo_validate_addr = eth_validate_addr, 1115 .ndo_validate_addr = eth_validate_addr,
1088 .ndo_set_mac_address = eth_mac_addr, 1116 .ndo_set_mac_address = eth_mac_addr,
1089 .ndo_fill_metadata_dst = geneve_fill_metadata_dst, 1117 .ndo_fill_metadata_dst = geneve_fill_metadata_dst,
@@ -1150,6 +1178,7 @@ static void geneve_setup(struct net_device *dev)
1150 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 1178 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1151 1179
1152 netif_keep_dst(dev); 1180 netif_keep_dst(dev);
1181 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1153 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 1182 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
1154 eth_hw_addr_random(dev); 1183 eth_hw_addr_random(dev);
1155} 1184}
@@ -1441,12 +1470,23 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
1441 return dev; 1470 return dev;
1442 1471
1443 err = geneve_configure(net, dev, &geneve_remote_unspec, 1472 err = geneve_configure(net, dev, &geneve_remote_unspec,
1444 0, 0, 0, htons(dst_port), true, 0); 1473 0, 0, 0, htons(dst_port), true,
1445 if (err) { 1474 GENEVE_F_UDP_ZERO_CSUM6_RX);
1446 free_netdev(dev); 1475 if (err)
1447 return ERR_PTR(err); 1476 goto err;
1448 } 1477
1478 /* openvswitch users expect packet sizes to be unrestricted,
1479 * so set the largest MTU we can.
1480 */
1481 err = __geneve_change_mtu(dev, IP_MAX_MTU, false);
1482 if (err)
1483 goto err;
1484
1449 return dev; 1485 return dev;
1486
1487 err:
1488 free_netdev(dev);
1489 return ERR_PTR(err);
1450} 1490}
1451EXPORT_SYMBOL_GPL(geneve_dev_create_fb); 1491EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
1452 1492
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index f4130af09244..fcb92c0d0eb9 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -624,6 +624,7 @@ struct nvsp_message {
624#define RNDIS_PKT_ALIGN_DEFAULT 8 624#define RNDIS_PKT_ALIGN_DEFAULT 8
625 625
626struct multi_send_data { 626struct multi_send_data {
627 struct sk_buff *skb; /* skb containing the pkt */
627 struct hv_netvsc_packet *pkt; /* netvsc pkt pending */ 628 struct hv_netvsc_packet *pkt; /* netvsc pkt pending */
628 u32 count; /* counter of batched packets */ 629 u32 count; /* counter of batched packets */
629}; 630};
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 059fc5231601..ec313fc08d82 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -841,6 +841,18 @@ static inline int netvsc_send_pkt(
841 return ret; 841 return ret;
842} 842}
843 843
844/* Move packet out of multi send data (msd), and clear msd */
845static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
846 struct sk_buff **msd_skb,
847 struct multi_send_data *msdp)
848{
849 *msd_skb = msdp->skb;
850 *msd_send = msdp->pkt;
851 msdp->skb = NULL;
852 msdp->pkt = NULL;
853 msdp->count = 0;
854}
855
844int netvsc_send(struct hv_device *device, 856int netvsc_send(struct hv_device *device,
845 struct hv_netvsc_packet *packet, 857 struct hv_netvsc_packet *packet,
846 struct rndis_message *rndis_msg, 858 struct rndis_message *rndis_msg,
@@ -855,6 +867,7 @@ int netvsc_send(struct hv_device *device,
855 unsigned int section_index = NETVSC_INVALID_INDEX; 867 unsigned int section_index = NETVSC_INVALID_INDEX;
856 struct multi_send_data *msdp; 868 struct multi_send_data *msdp;
857 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; 869 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
870 struct sk_buff *msd_skb = NULL;
858 bool try_batch; 871 bool try_batch;
859 bool xmit_more = (skb != NULL) ? skb->xmit_more : false; 872 bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
860 873
@@ -897,10 +910,8 @@ int netvsc_send(struct hv_device *device,
897 net_device->send_section_size) { 910 net_device->send_section_size) {
898 section_index = netvsc_get_next_send_section(net_device); 911 section_index = netvsc_get_next_send_section(net_device);
899 if (section_index != NETVSC_INVALID_INDEX) { 912 if (section_index != NETVSC_INVALID_INDEX) {
900 msd_send = msdp->pkt; 913 move_pkt_msd(&msd_send, &msd_skb, msdp);
901 msdp->pkt = NULL; 914 msd_len = 0;
902 msdp->count = 0;
903 msd_len = 0;
904 } 915 }
905 } 916 }
906 917
@@ -919,31 +930,31 @@ int netvsc_send(struct hv_device *device,
919 packet->total_data_buflen += msd_len; 930 packet->total_data_buflen += msd_len;
920 } 931 }
921 932
922 if (msdp->pkt) 933 if (msdp->skb)
923 dev_kfree_skb_any(skb); 934 dev_kfree_skb_any(msdp->skb);
924 935
925 if (xmit_more && !packet->cp_partial) { 936 if (xmit_more && !packet->cp_partial) {
937 msdp->skb = skb;
926 msdp->pkt = packet; 938 msdp->pkt = packet;
927 msdp->count++; 939 msdp->count++;
928 } else { 940 } else {
929 cur_send = packet; 941 cur_send = packet;
942 msdp->skb = NULL;
930 msdp->pkt = NULL; 943 msdp->pkt = NULL;
931 msdp->count = 0; 944 msdp->count = 0;
932 } 945 }
933 } else { 946 } else {
934 msd_send = msdp->pkt; 947 move_pkt_msd(&msd_send, &msd_skb, msdp);
935 msdp->pkt = NULL;
936 msdp->count = 0;
937 cur_send = packet; 948 cur_send = packet;
938 } 949 }
939 950
940 if (msd_send) { 951 if (msd_send) {
941 m_ret = netvsc_send_pkt(msd_send, net_device, pb, skb); 952 m_ret = netvsc_send_pkt(msd_send, net_device, NULL, msd_skb);
942 953
943 if (m_ret != 0) { 954 if (m_ret != 0) {
944 netvsc_free_send_slot(net_device, 955 netvsc_free_send_slot(net_device,
945 msd_send->send_buf_index); 956 msd_send->send_buf_index);
946 dev_kfree_skb_any(skb); 957 dev_kfree_skb_any(msd_skb);
947 } 958 }
948 } 959 }
949 960
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 1c8db9afdcda..98e34fee45c7 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -196,65 +196,6 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
196 return ppi; 196 return ppi;
197} 197}
198 198
199union sub_key {
200 u64 k;
201 struct {
202 u8 pad[3];
203 u8 kb;
204 u32 ka;
205 };
206};
207
208/* Toeplitz hash function
209 * data: network byte order
210 * return: host byte order
211 */
212static u32 comp_hash(u8 *key, int klen, void *data, int dlen)
213{
214 union sub_key subk;
215 int k_next = 4;
216 u8 dt;
217 int i, j;
218 u32 ret = 0;
219
220 subk.k = 0;
221 subk.ka = ntohl(*(u32 *)key);
222
223 for (i = 0; i < dlen; i++) {
224 subk.kb = key[k_next];
225 k_next = (k_next + 1) % klen;
226 dt = ((u8 *)data)[i];
227 for (j = 0; j < 8; j++) {
228 if (dt & 0x80)
229 ret ^= subk.ka;
230 dt <<= 1;
231 subk.k <<= 1;
232 }
233 }
234
235 return ret;
236}
237
238static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
239{
240 struct flow_keys flow;
241 int data_len;
242
243 if (!skb_flow_dissect_flow_keys(skb, &flow, 0) ||
244 !(flow.basic.n_proto == htons(ETH_P_IP) ||
245 flow.basic.n_proto == htons(ETH_P_IPV6)))
246 return false;
247
248 if (flow.basic.ip_proto == IPPROTO_TCP)
249 data_len = 12;
250 else
251 data_len = 8;
252
253 *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len);
254
255 return true;
256}
257
258static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, 199static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
259 void *accel_priv, select_queue_fallback_t fallback) 200 void *accel_priv, select_queue_fallback_t fallback)
260{ 201{
@@ -267,11 +208,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
267 if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1) 208 if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
268 return 0; 209 return 0;
269 210
270 if (netvsc_set_hash(&hash, skb)) { 211 hash = skb_get_hash(skb);
271 q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] % 212 q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
272 ndev->real_num_tx_queues; 213 ndev->real_num_tx_queues;
273 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
274 }
275 214
276 if (!nvsc_dev->chn_table[q_idx]) 215 if (!nvsc_dev->chn_table[q_idx])
277 q_idx = 0; 216 q_idx = 0;
@@ -1150,6 +1089,9 @@ static int netvsc_probe(struct hv_device *dev,
1150 net->ethtool_ops = &ethtool_ops; 1089 net->ethtool_ops = &ethtool_ops;
1151 SET_NETDEV_DEV(net, &dev->device); 1090 SET_NETDEV_DEV(net, &dev->device);
1152 1091
1092 /* We always need headroom for rndis header */
1093 net->needed_headroom = RNDIS_AND_PPI_SIZE;
1094
1153 /* Notify the netvsc driver of the new device */ 1095 /* Notify the netvsc driver of the new device */
1154 memset(&device_info, 0, sizeof(device_info)); 1096 memset(&device_info, 0, sizeof(device_info));
1155 device_info.ring_size = ring_size; 1097 device_info.ring_size = ring_size;
diff --git a/drivers/net/irda/bfin_sir.h b/drivers/net/irda/bfin_sir.h
index 29cbde8501ed..d47cf14bb4a5 100644
--- a/drivers/net/irda/bfin_sir.h
+++ b/drivers/net/irda/bfin_sir.h
@@ -82,9 +82,6 @@ struct bfin_sir_self {
82 82
83#define DRIVER_NAME "bfin_sir" 83#define DRIVER_NAME "bfin_sir"
84 84
85#define port_membase(port) (((struct bfin_sir_port *)(port))->membase)
86#define get_lsr_cache(port) (((struct bfin_sir_port *)(port))->lsr)
87#define put_lsr_cache(port, v) (((struct bfin_sir_port *)(port))->lsr = (v))
88#include <asm/bfin_serial.h> 85#include <asm/bfin_serial.h>
89 86
90static const unsigned short per[][4] = { 87static const unsigned short per[][4] = {
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 6a57a005e0ca..94e688805dd2 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1323,6 +1323,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
1323 1323
1324 list_add_tail_rcu(&vlan->list, &port->vlans); 1324 list_add_tail_rcu(&vlan->list, &port->vlans);
1325 netif_stacked_transfer_operstate(lowerdev, dev); 1325 netif_stacked_transfer_operstate(lowerdev, dev);
1326 linkwatch_fire_event(dev);
1326 1327
1327 return 0; 1328 return 0;
1328 1329
@@ -1522,6 +1523,7 @@ static int macvlan_device_event(struct notifier_block *unused,
1522 port = macvlan_port_get_rtnl(dev); 1523 port = macvlan_port_get_rtnl(dev);
1523 1524
1524 switch (event) { 1525 switch (event) {
1526 case NETDEV_UP:
1525 case NETDEV_CHANGE: 1527 case NETDEV_CHANGE:
1526 list_for_each_entry(vlan, &port->vlans, list) 1528 list_for_each_entry(vlan, &port->vlans, list)
1527 netif_stacked_transfer_operstate(vlan->lowerdev, 1529 netif_stacked_transfer_operstate(vlan->lowerdev,
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 60994a83a0d6..f0a77020037a 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -186,6 +186,7 @@ config MDIO_GPIO
186config MDIO_OCTEON 186config MDIO_OCTEON
187 tristate "Support for MDIO buses on Octeon and ThunderX SOCs" 187 tristate "Support for MDIO buses on Octeon and ThunderX SOCs"
188 depends on 64BIT 188 depends on 64BIT
189 depends on HAS_IOMEM
189 help 190 help
190 191
191 This module provides a driver for the Octeon and ThunderX MDIO 192 This module provides a driver for the Octeon and ThunderX MDIO
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index bf241a3ec5e5..db507e3bcab9 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -250,10 +250,6 @@ static int bcm7xxx_config_init(struct phy_device *phydev)
250 phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO); 250 phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO);
251 phy_read(phydev, MII_BCM7XXX_AUX_MODE); 251 phy_read(phydev, MII_BCM7XXX_AUX_MODE);
252 252
253 /* Workaround only required for 100Mbits/sec capable PHYs */
254 if (phydev->supported & PHY_GBIT_FEATURES)
255 return 0;
256
257 /* set shadow mode 2 */ 253 /* set shadow mode 2 */
258 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 254 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
259 MII_BCM7XXX_SHD_MODE_2, MII_BCM7XXX_SHD_MODE_2); 255 MII_BCM7XXX_SHD_MODE_2, MII_BCM7XXX_SHD_MODE_2);
@@ -270,7 +266,7 @@ static int bcm7xxx_config_init(struct phy_device *phydev)
270 phy_write(phydev, MII_BCM7XXX_100TX_FALSE_CAR, 0x7555); 266 phy_write(phydev, MII_BCM7XXX_100TX_FALSE_CAR, 0x7555);
271 267
272 /* reset shadow mode 2 */ 268 /* reset shadow mode 2 */
273 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, MII_BCM7XXX_SHD_MODE_2, 0); 269 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0, MII_BCM7XXX_SHD_MODE_2);
274 if (ret < 0) 270 if (ret < 0)
275 return ret; 271 return ret;
276 272
@@ -307,11 +303,6 @@ static int bcm7xxx_suspend(struct phy_device *phydev)
307 return 0; 303 return 0;
308} 304}
309 305
310static int bcm7xxx_dummy_config_init(struct phy_device *phydev)
311{
312 return 0;
313}
314
315#define BCM7XXX_28NM_GPHY(_oui, _name) \ 306#define BCM7XXX_28NM_GPHY(_oui, _name) \
316{ \ 307{ \
317 .phy_id = (_oui), \ 308 .phy_id = (_oui), \
@@ -337,7 +328,7 @@ static struct phy_driver bcm7xxx_driver[] = {
337 .phy_id = PHY_ID_BCM7425, 328 .phy_id = PHY_ID_BCM7425,
338 .phy_id_mask = 0xfffffff0, 329 .phy_id_mask = 0xfffffff0,
339 .name = "Broadcom BCM7425", 330 .name = "Broadcom BCM7425",
340 .features = PHY_GBIT_FEATURES | 331 .features = PHY_BASIC_FEATURES |
341 SUPPORTED_Pause | SUPPORTED_Asym_Pause, 332 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
342 .flags = PHY_IS_INTERNAL, 333 .flags = PHY_IS_INTERNAL,
343 .config_init = bcm7xxx_config_init, 334 .config_init = bcm7xxx_config_init,
@@ -349,7 +340,7 @@ static struct phy_driver bcm7xxx_driver[] = {
349 .phy_id = PHY_ID_BCM7429, 340 .phy_id = PHY_ID_BCM7429,
350 .phy_id_mask = 0xfffffff0, 341 .phy_id_mask = 0xfffffff0,
351 .name = "Broadcom BCM7429", 342 .name = "Broadcom BCM7429",
352 .features = PHY_GBIT_FEATURES | 343 .features = PHY_BASIC_FEATURES |
353 SUPPORTED_Pause | SUPPORTED_Asym_Pause, 344 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
354 .flags = PHY_IS_INTERNAL, 345 .flags = PHY_IS_INTERNAL,
355 .config_init = bcm7xxx_config_init, 346 .config_init = bcm7xxx_config_init,
@@ -361,7 +352,7 @@ static struct phy_driver bcm7xxx_driver[] = {
361 .phy_id = PHY_ID_BCM7435, 352 .phy_id = PHY_ID_BCM7435,
362 .phy_id_mask = 0xfffffff0, 353 .phy_id_mask = 0xfffffff0,
363 .name = "Broadcom BCM7435", 354 .name = "Broadcom BCM7435",
364 .features = PHY_GBIT_FEATURES | 355 .features = PHY_BASIC_FEATURES |
365 SUPPORTED_Pause | SUPPORTED_Asym_Pause, 356 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
366 .flags = PHY_IS_INTERNAL, 357 .flags = PHY_IS_INTERNAL,
367 .config_init = bcm7xxx_config_init, 358 .config_init = bcm7xxx_config_init,
@@ -369,30 +360,6 @@ static struct phy_driver bcm7xxx_driver[] = {
369 .read_status = genphy_read_status, 360 .read_status = genphy_read_status,
370 .suspend = bcm7xxx_suspend, 361 .suspend = bcm7xxx_suspend,
371 .resume = bcm7xxx_config_init, 362 .resume = bcm7xxx_config_init,
372}, {
373 .phy_id = PHY_BCM_OUI_4,
374 .phy_id_mask = 0xffff0000,
375 .name = "Broadcom BCM7XXX 40nm",
376 .features = PHY_GBIT_FEATURES |
377 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
378 .flags = PHY_IS_INTERNAL,
379 .config_init = bcm7xxx_config_init,
380 .config_aneg = genphy_config_aneg,
381 .read_status = genphy_read_status,
382 .suspend = bcm7xxx_suspend,
383 .resume = bcm7xxx_config_init,
384}, {
385 .phy_id = PHY_BCM_OUI_5,
386 .phy_id_mask = 0xffffff00,
387 .name = "Broadcom BCM7XXX 65nm",
388 .features = PHY_BASIC_FEATURES |
389 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
390 .flags = PHY_IS_INTERNAL,
391 .config_init = bcm7xxx_dummy_config_init,
392 .config_aneg = genphy_config_aneg,
393 .read_status = genphy_read_status,
394 .suspend = bcm7xxx_suspend,
395 .resume = bcm7xxx_config_init,
396} }; 363} };
397 364
398static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { 365static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
@@ -404,8 +371,6 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
404 { PHY_ID_BCM7439, 0xfffffff0, }, 371 { PHY_ID_BCM7439, 0xfffffff0, },
405 { PHY_ID_BCM7435, 0xfffffff0, }, 372 { PHY_ID_BCM7435, 0xfffffff0, },
406 { PHY_ID_BCM7445, 0xfffffff0, }, 373 { PHY_ID_BCM7445, 0xfffffff0, },
407 { PHY_BCM_OUI_4, 0xffff0000 },
408 { PHY_BCM_OUI_5, 0xffffff00 },
409 { } 374 { }
410}; 375};
411 376
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 180f69952779..7a240fce3a7e 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -846,6 +846,11 @@ static void decode_rxts(struct dp83640_private *dp83640,
846 struct skb_shared_hwtstamps *shhwtstamps = NULL; 846 struct skb_shared_hwtstamps *shhwtstamps = NULL;
847 struct sk_buff *skb; 847 struct sk_buff *skb;
848 unsigned long flags; 848 unsigned long flags;
849 u8 overflow;
850
851 overflow = (phy_rxts->ns_hi >> 14) & 0x3;
852 if (overflow)
853 pr_debug("rx timestamp queue overflow, count %d\n", overflow);
849 854
850 spin_lock_irqsave(&dp83640->rx_lock, flags); 855 spin_lock_irqsave(&dp83640->rx_lock, flags);
851 856
@@ -888,6 +893,7 @@ static void decode_txts(struct dp83640_private *dp83640,
888 struct skb_shared_hwtstamps shhwtstamps; 893 struct skb_shared_hwtstamps shhwtstamps;
889 struct sk_buff *skb; 894 struct sk_buff *skb;
890 u64 ns; 895 u64 ns;
896 u8 overflow;
891 897
892 /* We must already have the skb that triggered this. */ 898 /* We must already have the skb that triggered this. */
893 899
@@ -897,6 +903,17 @@ static void decode_txts(struct dp83640_private *dp83640,
897 pr_debug("have timestamp but tx_queue empty\n"); 903 pr_debug("have timestamp but tx_queue empty\n");
898 return; 904 return;
899 } 905 }
906
907 overflow = (phy_txts->ns_hi >> 14) & 0x3;
908 if (overflow) {
909 pr_debug("tx timestamp queue overflow, count %d\n", overflow);
910 while (skb) {
911 skb_complete_tx_timestamp(skb, NULL);
912 skb = skb_dequeue(&dp83640->tx_queue);
913 }
914 return;
915 }
916
900 ns = phy2txts(phy_txts); 917 ns = phy2txts(phy_txts);
901 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 918 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
902 shhwtstamps.hwtstamp = ns_to_ktime(ns); 919 shhwtstamps.hwtstamp = ns_to_ktime(ns);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e3eb96443c97..ab1d0fcaf1d9 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -446,6 +446,12 @@ static int m88e1510_config_aneg(struct phy_device *phydev)
446 if (err < 0) 446 if (err < 0)
447 return err; 447 return err;
448 448
449 return 0;
450}
451
452static int marvell_config_init(struct phy_device *phydev)
453{
454 /* Set registers from marvell,reg-init DT property */
449 return marvell_of_reg_init(phydev); 455 return marvell_of_reg_init(phydev);
450} 456}
451 457
@@ -495,7 +501,7 @@ static int m88e1116r_config_init(struct phy_device *phydev)
495 501
496 mdelay(500); 502 mdelay(500);
497 503
498 return 0; 504 return marvell_config_init(phydev);
499} 505}
500 506
501static int m88e3016_config_init(struct phy_device *phydev) 507static int m88e3016_config_init(struct phy_device *phydev)
@@ -514,7 +520,7 @@ static int m88e3016_config_init(struct phy_device *phydev)
514 if (reg < 0) 520 if (reg < 0)
515 return reg; 521 return reg;
516 522
517 return 0; 523 return marvell_config_init(phydev);
518} 524}
519 525
520static int m88e1111_config_init(struct phy_device *phydev) 526static int m88e1111_config_init(struct phy_device *phydev)
@@ -1078,6 +1084,7 @@ static struct phy_driver marvell_drivers[] = {
1078 .features = PHY_GBIT_FEATURES, 1084 .features = PHY_GBIT_FEATURES,
1079 .probe = marvell_probe, 1085 .probe = marvell_probe,
1080 .flags = PHY_HAS_INTERRUPT, 1086 .flags = PHY_HAS_INTERRUPT,
1087 .config_init = &marvell_config_init,
1081 .config_aneg = &marvell_config_aneg, 1088 .config_aneg = &marvell_config_aneg,
1082 .read_status = &genphy_read_status, 1089 .read_status = &genphy_read_status,
1083 .ack_interrupt = &marvell_ack_interrupt, 1090 .ack_interrupt = &marvell_ack_interrupt,
@@ -1149,6 +1156,7 @@ static struct phy_driver marvell_drivers[] = {
1149 .features = PHY_GBIT_FEATURES, 1156 .features = PHY_GBIT_FEATURES,
1150 .flags = PHY_HAS_INTERRUPT, 1157 .flags = PHY_HAS_INTERRUPT,
1151 .probe = marvell_probe, 1158 .probe = marvell_probe,
1159 .config_init = &marvell_config_init,
1152 .config_aneg = &m88e1121_config_aneg, 1160 .config_aneg = &m88e1121_config_aneg,
1153 .read_status = &marvell_read_status, 1161 .read_status = &marvell_read_status,
1154 .ack_interrupt = &marvell_ack_interrupt, 1162 .ack_interrupt = &marvell_ack_interrupt,
@@ -1167,6 +1175,7 @@ static struct phy_driver marvell_drivers[] = {
1167 .features = PHY_GBIT_FEATURES, 1175 .features = PHY_GBIT_FEATURES,
1168 .flags = PHY_HAS_INTERRUPT, 1176 .flags = PHY_HAS_INTERRUPT,
1169 .probe = marvell_probe, 1177 .probe = marvell_probe,
1178 .config_init = &marvell_config_init,
1170 .config_aneg = &m88e1318_config_aneg, 1179 .config_aneg = &m88e1318_config_aneg,
1171 .read_status = &marvell_read_status, 1180 .read_status = &marvell_read_status,
1172 .ack_interrupt = &marvell_ack_interrupt, 1181 .ack_interrupt = &marvell_ack_interrupt,
@@ -1259,6 +1268,7 @@ static struct phy_driver marvell_drivers[] = {
1259 .features = PHY_GBIT_FEATURES, 1268 .features = PHY_GBIT_FEATURES,
1260 .flags = PHY_HAS_INTERRUPT, 1269 .flags = PHY_HAS_INTERRUPT,
1261 .probe = marvell_probe, 1270 .probe = marvell_probe,
1271 .config_init = &marvell_config_init,
1262 .config_aneg = &m88e1510_config_aneg, 1272 .config_aneg = &m88e1510_config_aneg,
1263 .read_status = &marvell_read_status, 1273 .read_status = &marvell_read_status,
1264 .ack_interrupt = &marvell_ack_interrupt, 1274 .ack_interrupt = &marvell_ack_interrupt,
@@ -1277,6 +1287,7 @@ static struct phy_driver marvell_drivers[] = {
1277 .features = PHY_GBIT_FEATURES, 1287 .features = PHY_GBIT_FEATURES,
1278 .flags = PHY_HAS_INTERRUPT, 1288 .flags = PHY_HAS_INTERRUPT,
1279 .probe = marvell_probe, 1289 .probe = marvell_probe,
1290 .config_init = &marvell_config_init,
1280 .config_aneg = &m88e1510_config_aneg, 1291 .config_aneg = &m88e1510_config_aneg,
1281 .read_status = &marvell_read_status, 1292 .read_status = &marvell_read_status,
1282 .ack_interrupt = &marvell_ack_interrupt, 1293 .ack_interrupt = &marvell_ack_interrupt,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 03833dbfca67..dc85f7095e51 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -297,6 +297,17 @@ static int kszphy_config_init(struct phy_device *phydev)
297 if (priv->led_mode >= 0) 297 if (priv->led_mode >= 0)
298 kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode); 298 kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode);
299 299
300 if (phy_interrupt_is_valid(phydev)) {
301 int ctl = phy_read(phydev, MII_BMCR);
302
303 if (ctl < 0)
304 return ctl;
305
306 ret = phy_write(phydev, MII_BMCR, ctl & ~BMCR_ANENABLE);
307 if (ret < 0)
308 return ret;
309 }
310
300 return 0; 311 return 0;
301} 312}
302 313
@@ -635,6 +646,21 @@ static void kszphy_get_stats(struct phy_device *phydev,
635 data[i] = kszphy_get_stat(phydev, i); 646 data[i] = kszphy_get_stat(phydev, i);
636} 647}
637 648
649static int kszphy_resume(struct phy_device *phydev)
650{
651 int value;
652
653 mutex_lock(&phydev->lock);
654
655 value = phy_read(phydev, MII_BMCR);
656 phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
657
658 kszphy_config_intr(phydev);
659 mutex_unlock(&phydev->lock);
660
661 return 0;
662}
663
638static int kszphy_probe(struct phy_device *phydev) 664static int kszphy_probe(struct phy_device *phydev)
639{ 665{
640 const struct kszphy_type *type = phydev->drv->driver_data; 666 const struct kszphy_type *type = phydev->drv->driver_data;
@@ -844,7 +870,7 @@ static struct phy_driver ksphy_driver[] = {
844 .get_strings = kszphy_get_strings, 870 .get_strings = kszphy_get_strings,
845 .get_stats = kszphy_get_stats, 871 .get_stats = kszphy_get_stats,
846 .suspend = genphy_suspend, 872 .suspend = genphy_suspend,
847 .resume = genphy_resume, 873 .resume = kszphy_resume,
848}, { 874}, {
849 .phy_id = PHY_ID_KSZ8061, 875 .phy_id = PHY_ID_KSZ8061,
850 .name = "Micrel KSZ8061", 876 .name = "Micrel KSZ8061",
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 8763bb20988a..5590b9c182c9 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -692,25 +692,29 @@ void phy_change(struct work_struct *work)
692 struct phy_device *phydev = 692 struct phy_device *phydev =
693 container_of(work, struct phy_device, phy_queue); 693 container_of(work, struct phy_device, phy_queue);
694 694
695 if (phydev->drv->did_interrupt && 695 if (phy_interrupt_is_valid(phydev)) {
696 !phydev->drv->did_interrupt(phydev)) 696 if (phydev->drv->did_interrupt &&
697 goto ignore; 697 !phydev->drv->did_interrupt(phydev))
698 goto ignore;
698 699
699 if (phy_disable_interrupts(phydev)) 700 if (phy_disable_interrupts(phydev))
700 goto phy_err; 701 goto phy_err;
702 }
701 703
702 mutex_lock(&phydev->lock); 704 mutex_lock(&phydev->lock);
703 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) 705 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
704 phydev->state = PHY_CHANGELINK; 706 phydev->state = PHY_CHANGELINK;
705 mutex_unlock(&phydev->lock); 707 mutex_unlock(&phydev->lock);
706 708
707 atomic_dec(&phydev->irq_disable); 709 if (phy_interrupt_is_valid(phydev)) {
708 enable_irq(phydev->irq); 710 atomic_dec(&phydev->irq_disable);
711 enable_irq(phydev->irq);
709 712
710 /* Reenable interrupts */ 713 /* Reenable interrupts */
711 if (PHY_HALTED != phydev->state && 714 if (PHY_HALTED != phydev->state &&
712 phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED)) 715 phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
713 goto irq_enable_err; 716 goto irq_enable_err;
717 }
714 718
715 /* reschedule state queue work to run as soon as possible */ 719 /* reschedule state queue work to run as soon as possible */
716 cancel_delayed_work_sync(&phydev->state_queue); 720 cancel_delayed_work_sync(&phydev->state_queue);
@@ -905,10 +909,10 @@ void phy_state_machine(struct work_struct *work)
905 phydev->adjust_link(phydev->attached_dev); 909 phydev->adjust_link(phydev->attached_dev);
906 break; 910 break;
907 case PHY_RUNNING: 911 case PHY_RUNNING:
908 /* Only register a CHANGE if we are polling or ignoring 912 /* Only register a CHANGE if we are polling and link changed
909 * interrupts and link changed since latest checking. 913 * since latest checking.
910 */ 914 */
911 if (!phy_interrupt_is_valid(phydev)) { 915 if (phydev->irq == PHY_POLL) {
912 old_link = phydev->link; 916 old_link = phydev->link;
913 err = phy_read_status(phydev); 917 err = phy_read_status(phydev);
914 if (err) 918 if (err)
@@ -1000,15 +1004,21 @@ void phy_state_machine(struct work_struct *work)
1000 phy_state_to_str(old_state), 1004 phy_state_to_str(old_state),
1001 phy_state_to_str(phydev->state)); 1005 phy_state_to_str(phydev->state));
1002 1006
1003 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 1007 /* Only re-schedule a PHY state machine change if we are polling the
1004 PHY_STATE_TIME * HZ); 1008 * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
1009 * between states from phy_mac_interrupt()
1010 */
1011 if (phydev->irq == PHY_POLL)
1012 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
1013 PHY_STATE_TIME * HZ);
1005} 1014}
1006 1015
1007void phy_mac_interrupt(struct phy_device *phydev, int new_link) 1016void phy_mac_interrupt(struct phy_device *phydev, int new_link)
1008{ 1017{
1009 cancel_work_sync(&phydev->phy_queue);
1010 phydev->link = new_link; 1018 phydev->link = new_link;
1011 schedule_work(&phydev->phy_queue); 1019
1020 /* Trigger a state machine change */
1021 queue_work(system_power_efficient_wq, &phydev->phy_queue);
1012} 1022}
1013EXPORT_SYMBOL(phy_mac_interrupt); 1023EXPORT_SYMBOL(phy_mac_interrupt);
1014 1024
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index bad3f005faee..e551f3a89cfd 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1410,7 +1410,7 @@ int genphy_config_init(struct phy_device *phydev)
1410 1410
1411 features = (SUPPORTED_TP | SUPPORTED_MII 1411 features = (SUPPORTED_TP | SUPPORTED_MII
1412 | SUPPORTED_AUI | SUPPORTED_FIBRE | 1412 | SUPPORTED_AUI | SUPPORTED_FIBRE |
1413 SUPPORTED_BNC); 1413 SUPPORTED_BNC | SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1414 1414
1415 /* Do we support autonegotiation? */ 1415 /* Do we support autonegotiation? */
1416 val = phy_read(phydev, MII_BMSR); 1416 val = phy_read(phydev, MII_BMSR);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index e485f2653c82..2e21e9366f76 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -24,6 +24,10 @@
24#include <linux/netdevice.h> 24#include <linux/netdevice.h>
25#include <linux/smscphy.h> 25#include <linux/smscphy.h>
26 26
27struct smsc_phy_priv {
28 bool energy_enable;
29};
30
27static int smsc_phy_config_intr(struct phy_device *phydev) 31static int smsc_phy_config_intr(struct phy_device *phydev)
28{ 32{
29 int rc = phy_write (phydev, MII_LAN83C185_IM, 33 int rc = phy_write (phydev, MII_LAN83C185_IM,
@@ -43,19 +47,14 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
43 47
44static int smsc_phy_config_init(struct phy_device *phydev) 48static int smsc_phy_config_init(struct phy_device *phydev)
45{ 49{
46 int __maybe_unused len; 50 struct smsc_phy_priv *priv = phydev->priv;
47 struct device *dev __maybe_unused = &phydev->mdio.dev; 51
48 struct device_node *of_node __maybe_unused = dev->of_node;
49 int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); 52 int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
50 int enable_energy = 1;
51 53
52 if (rc < 0) 54 if (rc < 0)
53 return rc; 55 return rc;
54 56
55 if (of_find_property(of_node, "smsc,disable-energy-detect", &len)) 57 if (priv->energy_enable) {
56 enable_energy = 0;
57
58 if (enable_energy) {
59 /* Enable energy detect mode for this SMSC Transceivers */ 58 /* Enable energy detect mode for this SMSC Transceivers */
60 rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, 59 rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
61 rc | MII_LAN83C185_EDPWRDOWN); 60 rc | MII_LAN83C185_EDPWRDOWN);
@@ -110,10 +109,13 @@ static int lan911x_config_init(struct phy_device *phydev)
110 */ 109 */
111static int lan87xx_read_status(struct phy_device *phydev) 110static int lan87xx_read_status(struct phy_device *phydev)
112{ 111{
112 struct smsc_phy_priv *priv = phydev->priv;
113
113 int err = genphy_read_status(phydev); 114 int err = genphy_read_status(phydev);
114 int i;
115 115
116 if (!phydev->link) { 116 if (!phydev->link && priv->energy_enable) {
117 int i;
118
117 /* Disable EDPD to wake up PHY */ 119 /* Disable EDPD to wake up PHY */
118 int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); 120 int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
119 if (rc < 0) 121 if (rc < 0)
@@ -149,6 +151,26 @@ static int lan87xx_read_status(struct phy_device *phydev)
149 return err; 151 return err;
150} 152}
151 153
154static int smsc_phy_probe(struct phy_device *phydev)
155{
156 struct device *dev = &phydev->mdio.dev;
157 struct device_node *of_node = dev->of_node;
158 struct smsc_phy_priv *priv;
159
160 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
161 if (!priv)
162 return -ENOMEM;
163
164 priv->energy_enable = true;
165
166 if (of_property_read_bool(of_node, "smsc,disable-energy-detect"))
167 priv->energy_enable = false;
168
169 phydev->priv = priv;
170
171 return 0;
172}
173
152static struct phy_driver smsc_phy_driver[] = { 174static struct phy_driver smsc_phy_driver[] = {
153{ 175{
154 .phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */ 176 .phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
@@ -159,6 +181,8 @@ static struct phy_driver smsc_phy_driver[] = {
159 | SUPPORTED_Asym_Pause), 181 | SUPPORTED_Asym_Pause),
160 .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, 182 .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
161 183
184 .probe = smsc_phy_probe,
185
162 /* basic functions */ 186 /* basic functions */
163 .config_aneg = genphy_config_aneg, 187 .config_aneg = genphy_config_aneg,
164 .read_status = genphy_read_status, 188 .read_status = genphy_read_status,
@@ -180,6 +204,8 @@ static struct phy_driver smsc_phy_driver[] = {
180 | SUPPORTED_Asym_Pause), 204 | SUPPORTED_Asym_Pause),
181 .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, 205 .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
182 206
207 .probe = smsc_phy_probe,
208
183 /* basic functions */ 209 /* basic functions */
184 .config_aneg = genphy_config_aneg, 210 .config_aneg = genphy_config_aneg,
185 .read_status = genphy_read_status, 211 .read_status = genphy_read_status,
@@ -201,6 +227,8 @@ static struct phy_driver smsc_phy_driver[] = {
201 | SUPPORTED_Asym_Pause), 227 | SUPPORTED_Asym_Pause),
202 .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, 228 .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
203 229
230 .probe = smsc_phy_probe,
231
204 /* basic functions */ 232 /* basic functions */
205 .config_aneg = genphy_config_aneg, 233 .config_aneg = genphy_config_aneg,
206 .read_status = lan87xx_read_status, 234 .read_status = lan87xx_read_status,
@@ -222,6 +250,8 @@ static struct phy_driver smsc_phy_driver[] = {
222 | SUPPORTED_Asym_Pause), 250 | SUPPORTED_Asym_Pause),
223 .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, 251 .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
224 252
253 .probe = smsc_phy_probe,
254
225 /* basic functions */ 255 /* basic functions */
226 .config_aneg = genphy_config_aneg, 256 .config_aneg = genphy_config_aneg,
227 .read_status = genphy_read_status, 257 .read_status = genphy_read_status,
@@ -242,6 +272,8 @@ static struct phy_driver smsc_phy_driver[] = {
242 | SUPPORTED_Asym_Pause), 272 | SUPPORTED_Asym_Pause),
243 .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, 273 .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
244 274
275 .probe = smsc_phy_probe,
276
245 /* basic functions */ 277 /* basic functions */
246 .config_aneg = genphy_config_aneg, 278 .config_aneg = genphy_config_aneg,
247 .read_status = lan87xx_read_status, 279 .read_status = lan87xx_read_status,
@@ -263,6 +295,8 @@ static struct phy_driver smsc_phy_driver[] = {
263 | SUPPORTED_Asym_Pause), 295 | SUPPORTED_Asym_Pause),
264 .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, 296 .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
265 297
298 .probe = smsc_phy_probe,
299
266 /* basic functions */ 300 /* basic functions */
267 .config_aneg = genphy_config_aneg, 301 .config_aneg = genphy_config_aneg,
268 .read_status = lan87xx_read_status, 302 .read_status = lan87xx_read_status,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index fc8ad001bc94..d61da9ece3ba 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -443,9 +443,14 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
443 * network traffic (demand mode). 443 * network traffic (demand mode).
444 */ 444 */
445 struct ppp *ppp = PF_TO_PPP(pf); 445 struct ppp *ppp = PF_TO_PPP(pf);
446
447 ppp_recv_lock(ppp);
446 if (ppp->n_channels == 0 && 448 if (ppp->n_channels == 0 &&
447 (ppp->flags & SC_LOOP_TRAFFIC) == 0) 449 (ppp->flags & SC_LOOP_TRAFFIC) == 0) {
450 ppp_recv_unlock(ppp);
448 break; 451 break;
452 }
453 ppp_recv_unlock(ppp);
449 } 454 }
450 ret = -EAGAIN; 455 ret = -EAGAIN;
451 if (file->f_flags & O_NONBLOCK) 456 if (file->f_flags & O_NONBLOCK)
@@ -532,9 +537,12 @@ static unsigned int ppp_poll(struct file *file, poll_table *wait)
532 else if (pf->kind == INTERFACE) { 537 else if (pf->kind == INTERFACE) {
533 /* see comment in ppp_read */ 538 /* see comment in ppp_read */
534 struct ppp *ppp = PF_TO_PPP(pf); 539 struct ppp *ppp = PF_TO_PPP(pf);
540
541 ppp_recv_lock(ppp);
535 if (ppp->n_channels == 0 && 542 if (ppp->n_channels == 0 &&
536 (ppp->flags & SC_LOOP_TRAFFIC) == 0) 543 (ppp->flags & SC_LOOP_TRAFFIC) == 0)
537 mask |= POLLIN | POLLRDNORM; 544 mask |= POLLIN | POLLRDNORM;
545 ppp_recv_unlock(ppp);
538 } 546 }
539 547
540 return mask; 548 return mask;
@@ -2808,6 +2816,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
2808 2816
2809out2: 2817out2:
2810 mutex_unlock(&pn->all_ppp_mutex); 2818 mutex_unlock(&pn->all_ppp_mutex);
2819 rtnl_unlock();
2811 free_netdev(dev); 2820 free_netdev(dev);
2812out1: 2821out1:
2813 *retp = ret; 2822 *retp = ret;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index f3c63022eb3c..4ddae8118c85 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -395,6 +395,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
395 395
396 if (!__pppoe_xmit(sk_pppox(relay_po), skb)) 396 if (!__pppoe_xmit(sk_pppox(relay_po), skb))
397 goto abort_put; 397 goto abort_put;
398
399 sock_put(sk_pppox(relay_po));
398 } else { 400 } else {
399 if (sock_queue_rcv_skb(sk, skb)) 401 if (sock_queue_rcv_skb(sk, skb))
400 goto abort_kfree; 402 goto abort_kfree;
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 90868ca5e341..ae0905ed4a32 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -129,24 +129,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
129 return i < MAX_CALLID; 129 return i < MAX_CALLID;
130} 130}
131 131
132static int add_chan(struct pppox_sock *sock) 132static int add_chan(struct pppox_sock *sock,
133 struct pptp_addr *sa)
133{ 134{
134 static int call_id; 135 static int call_id;
135 136
136 spin_lock(&chan_lock); 137 spin_lock(&chan_lock);
137 if (!sock->proto.pptp.src_addr.call_id) { 138 if (!sa->call_id) {
138 call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1); 139 call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
139 if (call_id == MAX_CALLID) { 140 if (call_id == MAX_CALLID) {
140 call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1); 141 call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
141 if (call_id == MAX_CALLID) 142 if (call_id == MAX_CALLID)
142 goto out_err; 143 goto out_err;
143 } 144 }
144 sock->proto.pptp.src_addr.call_id = call_id; 145 sa->call_id = call_id;
145 } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap)) 146 } else if (test_bit(sa->call_id, callid_bitmap)) {
146 goto out_err; 147 goto out_err;
148 }
147 149
148 set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap); 150 sock->proto.pptp.src_addr = *sa;
149 rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock); 151 set_bit(sa->call_id, callid_bitmap);
152 rcu_assign_pointer(callid_sock[sa->call_id], sock);
150 spin_unlock(&chan_lock); 153 spin_unlock(&chan_lock);
151 154
152 return 0; 155 return 0;
@@ -416,7 +419,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
416 struct sock *sk = sock->sk; 419 struct sock *sk = sock->sk;
417 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; 420 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
418 struct pppox_sock *po = pppox_sk(sk); 421 struct pppox_sock *po = pppox_sk(sk);
419 struct pptp_opt *opt = &po->proto.pptp;
420 int error = 0; 422 int error = 0;
421 423
422 if (sockaddr_len < sizeof(struct sockaddr_pppox)) 424 if (sockaddr_len < sizeof(struct sockaddr_pppox))
@@ -424,10 +426,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
424 426
425 lock_sock(sk); 427 lock_sock(sk);
426 428
427 opt->src_addr = sp->sa_addr.pptp; 429 if (sk->sk_state & PPPOX_DEAD) {
428 if (add_chan(po)) 430 error = -EALREADY;
431 goto out;
432 }
433
434 if (sk->sk_state & PPPOX_BOUND) {
429 error = -EBUSY; 435 error = -EBUSY;
436 goto out;
437 }
438
439 if (add_chan(po, &sp->sa_addr.pptp))
440 error = -EBUSY;
441 else
442 sk->sk_state |= PPPOX_BOUND;
430 443
444out:
431 release_sock(sk); 445 release_sock(sk);
432 return error; 446 return error;
433} 447}
@@ -498,7 +512,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
498 } 512 }
499 513
500 opt->dst_addr = sp->sa_addr.pptp; 514 opt->dst_addr = sp->sa_addr.pptp;
501 sk->sk_state = PPPOX_CONNECTED; 515 sk->sk_state |= PPPOX_CONNECTED;
502 516
503 end: 517 end:
504 release_sock(sk); 518 release_sock(sk);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 7f83504dfa69..cdde59089f72 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -395,6 +395,10 @@ config USB_NET_RNDIS_HOST
395 The protocol specification is incomplete, and is controlled by 395 The protocol specification is incomplete, and is controlled by
396 (and for) Microsoft; it isn't an "Open" ecosystem or market. 396 (and for) Microsoft; it isn't an "Open" ecosystem or market.
397 397
398config USB_NET_CDC_SUBSET_ENABLE
399 tristate
400 depends on USB_NET_CDC_SUBSET
401
398config USB_NET_CDC_SUBSET 402config USB_NET_CDC_SUBSET
399 tristate "Simple USB Network Links (CDC Ethernet subset)" 403 tristate "Simple USB Network Links (CDC Ethernet subset)"
400 depends on USB_USBNET 404 depends on USB_USBNET
@@ -413,6 +417,7 @@ config USB_NET_CDC_SUBSET
413config USB_ALI_M5632 417config USB_ALI_M5632
414 bool "ALi M5632 based 'USB 2.0 Data Link' cables" 418 bool "ALi M5632 based 'USB 2.0 Data Link' cables"
415 depends on USB_NET_CDC_SUBSET 419 depends on USB_NET_CDC_SUBSET
420 select USB_NET_CDC_SUBSET_ENABLE
416 help 421 help
417 Choose this option if you're using a host-to-host cable 422 Choose this option if you're using a host-to-host cable
418 based on this design, which supports USB 2.0 high speed. 423 based on this design, which supports USB 2.0 high speed.
@@ -420,6 +425,7 @@ config USB_ALI_M5632
420config USB_AN2720 425config USB_AN2720
421 bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)" 426 bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)"
422 depends on USB_NET_CDC_SUBSET 427 depends on USB_NET_CDC_SUBSET
428 select USB_NET_CDC_SUBSET_ENABLE
423 help 429 help
424 Choose this option if you're using a host-to-host cable 430 Choose this option if you're using a host-to-host cable
425 based on this design. Note that AnchorChips is now a 431 based on this design. Note that AnchorChips is now a
@@ -428,6 +434,7 @@ config USB_AN2720
428config USB_BELKIN 434config USB_BELKIN
429 bool "eTEK based host-to-host cables (Advance, Belkin, ...)" 435 bool "eTEK based host-to-host cables (Advance, Belkin, ...)"
430 depends on USB_NET_CDC_SUBSET 436 depends on USB_NET_CDC_SUBSET
437 select USB_NET_CDC_SUBSET_ENABLE
431 default y 438 default y
432 help 439 help
433 Choose this option if you're using a host-to-host cable 440 Choose this option if you're using a host-to-host cable
@@ -437,6 +444,7 @@ config USB_BELKIN
437config USB_ARMLINUX 444config USB_ARMLINUX
438 bool "Embedded ARM Linux links (iPaq, ...)" 445 bool "Embedded ARM Linux links (iPaq, ...)"
439 depends on USB_NET_CDC_SUBSET 446 depends on USB_NET_CDC_SUBSET
447 select USB_NET_CDC_SUBSET_ENABLE
440 default y 448 default y
441 help 449 help
442 Choose this option to support the "usb-eth" networking driver 450 Choose this option to support the "usb-eth" networking driver
@@ -454,6 +462,7 @@ config USB_ARMLINUX
454config USB_EPSON2888 462config USB_EPSON2888
455 bool "Epson 2888 based firmware (DEVELOPMENT)" 463 bool "Epson 2888 based firmware (DEVELOPMENT)"
456 depends on USB_NET_CDC_SUBSET 464 depends on USB_NET_CDC_SUBSET
465 select USB_NET_CDC_SUBSET_ENABLE
457 help 466 help
458 Choose this option to support the usb networking links used 467 Choose this option to support the usb networking links used
459 by some sample firmware from Epson. 468 by some sample firmware from Epson.
@@ -461,6 +470,7 @@ config USB_EPSON2888
461config USB_KC2190 470config USB_KC2190
462 bool "KT Technology KC2190 based cables (InstaNet)" 471 bool "KT Technology KC2190 based cables (InstaNet)"
463 depends on USB_NET_CDC_SUBSET 472 depends on USB_NET_CDC_SUBSET
473 select USB_NET_CDC_SUBSET_ENABLE
464 help 474 help
465 Choose this option if you're using a host-to-host cable 475 Choose this option if you're using a host-to-host cable
466 with one of these chips. 476 with one of these chips.
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b5f04068dbe4..37fb46aee341 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -23,7 +23,7 @@ obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
23obj-$(CONFIG_USB_NET_NET1080) += net1080.o 23obj-$(CONFIG_USB_NET_NET1080) += net1080.o
24obj-$(CONFIG_USB_NET_PLUSB) += plusb.o 24obj-$(CONFIG_USB_NET_PLUSB) += plusb.o
25obj-$(CONFIG_USB_NET_RNDIS_HOST) += rndis_host.o 25obj-$(CONFIG_USB_NET_RNDIS_HOST) += rndis_host.o
26obj-$(CONFIG_USB_NET_CDC_SUBSET) += cdc_subset.o 26obj-$(CONFIG_USB_NET_CDC_SUBSET_ENABLE) += cdc_subset.o
27obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o 27obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o
28obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o 28obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
29obj-$(CONFIG_USB_USBNET) += usbnet.o 29obj-$(CONFIG_USB_USBNET) += usbnet.o
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 224e7d82de6d..cf77f2dffa69 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -134,7 +134,6 @@ static void ax88172a_remove_mdio(struct usbnet *dev)
134 134
135 netdev_info(dev->net, "deregistering mdio bus %s\n", priv->mdio->id); 135 netdev_info(dev->net, "deregistering mdio bus %s\n", priv->mdio->id);
136 mdiobus_unregister(priv->mdio); 136 mdiobus_unregister(priv->mdio);
137 kfree(priv->mdio->irq);
138 mdiobus_free(priv->mdio); 137 mdiobus_free(priv->mdio);
139} 138}
140 139
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index dc0212c3cc28..86ba30ba35e8 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -837,7 +837,11 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
837 837
838 iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber; 838 iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
839 839
840 /* reset data interface */ 840 /* Reset data interface. Some devices will not reset properly
841 * unless they are configured first. Toggle the altsetting to
842 * force a reset
843 */
844 usb_set_interface(dev->udev, iface_no, data_altsetting);
841 temp = usb_set_interface(dev->udev, iface_no, 0); 845 temp = usb_set_interface(dev->udev, iface_no, 0);
842 if (temp) { 846 if (temp) {
843 dev_dbg(&intf->dev, "set interface failed\n"); 847 dev_dbg(&intf->dev, "set interface failed\n");
@@ -984,8 +988,6 @@ EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
984 988
985static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) 989static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
986{ 990{
987 int ret;
988
989 /* MBIM backwards compatible function? */ 991 /* MBIM backwards compatible function? */
990 if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM) 992 if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM)
991 return -ENODEV; 993 return -ENODEV;
@@ -994,16 +996,7 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
994 * Additionally, generic NCM devices are assumed to accept arbitrarily 996 * Additionally, generic NCM devices are assumed to accept arbitrarily
995 * placed NDP. 997 * placed NDP.
996 */ 998 */
997 ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0); 999 return cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0);
998
999 /*
1000 * We should get an event when network connection is "connected" or
1001 * "disconnected". Set network connection in "disconnected" state
1002 * (carrier is OFF) during attach, so the IP network stack does not
1003 * start IPv6 negotiation and more.
1004 */
1005 usbnet_link_change(dev, 0, 0);
1006 return ret;
1007} 1000}
1008 1001
1009static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max) 1002static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max)
@@ -1586,7 +1579,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1586 1579
1587static const struct driver_info cdc_ncm_info = { 1580static const struct driver_info cdc_ncm_info = {
1588 .description = "CDC NCM", 1581 .description = "CDC NCM",
1589 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET, 1582 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
1583 | FLAG_LINK_INTR,
1590 .bind = cdc_ncm_bind, 1584 .bind = cdc_ncm_bind,
1591 .unbind = cdc_ncm_unbind, 1585 .unbind = cdc_ncm_unbind,
1592 .manage_power = usbnet_manage_power, 1586 .manage_power = usbnet_manage_power,
@@ -1599,7 +1593,7 @@ static const struct driver_info cdc_ncm_info = {
1599static const struct driver_info wwan_info = { 1593static const struct driver_info wwan_info = {
1600 .description = "Mobile Broadband Network Device", 1594 .description = "Mobile Broadband Network Device",
1601 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET 1595 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
1602 | FLAG_WWAN, 1596 | FLAG_LINK_INTR | FLAG_WWAN,
1603 .bind = cdc_ncm_bind, 1597 .bind = cdc_ncm_bind,
1604 .unbind = cdc_ncm_unbind, 1598 .unbind = cdc_ncm_unbind,
1605 .manage_power = usbnet_manage_power, 1599 .manage_power = usbnet_manage_power,
@@ -1612,7 +1606,7 @@ static const struct driver_info wwan_info = {
1612static const struct driver_info wwan_noarp_info = { 1606static const struct driver_info wwan_noarp_info = {
1613 .description = "Mobile Broadband Network Device (NO ARP)", 1607 .description = "Mobile Broadband Network Device (NO ARP)",
1614 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET 1608 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
1615 | FLAG_WWAN | FLAG_NOARP, 1609 | FLAG_LINK_INTR | FLAG_WWAN | FLAG_NOARP,
1616 .bind = cdc_ncm_bind, 1610 .bind = cdc_ncm_bind,
1617 .unbind = cdc_ncm_unbind, 1611 .unbind = cdc_ncm_unbind,
1618 .manage_power = usbnet_manage_power, 1612 .manage_power = usbnet_manage_power,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 2ed53331bfb2..1c299b8a162d 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -36,7 +36,7 @@
36#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" 36#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
37#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices" 37#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
38#define DRIVER_NAME "lan78xx" 38#define DRIVER_NAME "lan78xx"
39#define DRIVER_VERSION "1.0.1" 39#define DRIVER_VERSION "1.0.2"
40 40
41#define TX_TIMEOUT_JIFFIES (5 * HZ) 41#define TX_TIMEOUT_JIFFIES (5 * HZ)
42#define THROTTLE_JIFFIES (HZ / 8) 42#define THROTTLE_JIFFIES (HZ / 8)
@@ -462,32 +462,53 @@ static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
462 u32 length, u8 *data) 462 u32 length, u8 *data)
463{ 463{
464 u32 val; 464 u32 val;
465 u32 saved;
465 int i, ret; 466 int i, ret;
467 int retval;
466 468
467 ret = lan78xx_eeprom_confirm_not_busy(dev); 469 /* depends on chip, some EEPROM pins are muxed with LED function.
468 if (ret) 470 * disable & restore LED function to access EEPROM.
469 return ret; 471 */
472 ret = lan78xx_read_reg(dev, HW_CFG, &val);
473 saved = val;
474 if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) {
475 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
476 ret = lan78xx_write_reg(dev, HW_CFG, val);
477 }
478
479 retval = lan78xx_eeprom_confirm_not_busy(dev);
480 if (retval)
481 return retval;
470 482
471 for (i = 0; i < length; i++) { 483 for (i = 0; i < length; i++) {
472 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_; 484 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
473 val |= (offset & E2P_CMD_EPC_ADDR_MASK_); 485 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
474 ret = lan78xx_write_reg(dev, E2P_CMD, val); 486 ret = lan78xx_write_reg(dev, E2P_CMD, val);
475 if (unlikely(ret < 0)) 487 if (unlikely(ret < 0)) {
476 return -EIO; 488 retval = -EIO;
489 goto exit;
490 }
477 491
478 ret = lan78xx_wait_eeprom(dev); 492 retval = lan78xx_wait_eeprom(dev);
479 if (ret < 0) 493 if (retval < 0)
480 return ret; 494 goto exit;
481 495
482 ret = lan78xx_read_reg(dev, E2P_DATA, &val); 496 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
483 if (unlikely(ret < 0)) 497 if (unlikely(ret < 0)) {
484 return -EIO; 498 retval = -EIO;
499 goto exit;
500 }
485 501
486 data[i] = val & 0xFF; 502 data[i] = val & 0xFF;
487 offset++; 503 offset++;
488 } 504 }
489 505
490 return 0; 506 retval = 0;
507exit:
508 if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000)
509 ret = lan78xx_write_reg(dev, HW_CFG, saved);
510
511 return retval;
491} 512}
492 513
493static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset, 514static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
@@ -509,44 +530,67 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
509 u32 length, u8 *data) 530 u32 length, u8 *data)
510{ 531{
511 u32 val; 532 u32 val;
533 u32 saved;
512 int i, ret; 534 int i, ret;
535 int retval;
513 536
514 ret = lan78xx_eeprom_confirm_not_busy(dev); 537 /* depends on chip, some EEPROM pins are muxed with LED function.
515 if (ret) 538 * disable & restore LED function to access EEPROM.
516 return ret; 539 */
540 ret = lan78xx_read_reg(dev, HW_CFG, &val);
541 saved = val;
542 if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) {
543 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
544 ret = lan78xx_write_reg(dev, HW_CFG, val);
545 }
546
547 retval = lan78xx_eeprom_confirm_not_busy(dev);
548 if (retval)
549 goto exit;
517 550
518 /* Issue write/erase enable command */ 551 /* Issue write/erase enable command */
519 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_; 552 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
520 ret = lan78xx_write_reg(dev, E2P_CMD, val); 553 ret = lan78xx_write_reg(dev, E2P_CMD, val);
521 if (unlikely(ret < 0)) 554 if (unlikely(ret < 0)) {
522 return -EIO; 555 retval = -EIO;
556 goto exit;
557 }
523 558
524 ret = lan78xx_wait_eeprom(dev); 559 retval = lan78xx_wait_eeprom(dev);
525 if (ret < 0) 560 if (retval < 0)
526 return ret; 561 goto exit;
527 562
528 for (i = 0; i < length; i++) { 563 for (i = 0; i < length; i++) {
529 /* Fill data register */ 564 /* Fill data register */
530 val = data[i]; 565 val = data[i];
531 ret = lan78xx_write_reg(dev, E2P_DATA, val); 566 ret = lan78xx_write_reg(dev, E2P_DATA, val);
532 if (ret < 0) 567 if (ret < 0) {
533 return ret; 568 retval = -EIO;
569 goto exit;
570 }
534 571
535 /* Send "write" command */ 572 /* Send "write" command */
536 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_; 573 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
537 val |= (offset & E2P_CMD_EPC_ADDR_MASK_); 574 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
538 ret = lan78xx_write_reg(dev, E2P_CMD, val); 575 ret = lan78xx_write_reg(dev, E2P_CMD, val);
539 if (ret < 0) 576 if (ret < 0) {
540 return ret; 577 retval = -EIO;
578 goto exit;
579 }
541 580
542 ret = lan78xx_wait_eeprom(dev); 581 retval = lan78xx_wait_eeprom(dev);
543 if (ret < 0) 582 if (retval < 0)
544 return ret; 583 goto exit;
545 584
546 offset++; 585 offset++;
547 } 586 }
548 587
549 return 0; 588 retval = 0;
589exit:
590 if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000)
591 ret = lan78xx_write_reg(dev, HW_CFG, saved);
592
593 return retval;
550} 594}
551 595
552static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset, 596static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
@@ -904,7 +948,6 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
904 948
905 if (!phydev->link && dev->link_on) { 949 if (!phydev->link && dev->link_on) {
906 dev->link_on = false; 950 dev->link_on = false;
907 netif_carrier_off(dev->net);
908 951
909 /* reset MAC */ 952 /* reset MAC */
910 ret = lan78xx_read_reg(dev, MAC_CR, &buf); 953 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
@@ -914,6 +957,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
914 ret = lan78xx_write_reg(dev, MAC_CR, buf); 957 ret = lan78xx_write_reg(dev, MAC_CR, buf);
915 if (unlikely(ret < 0)) 958 if (unlikely(ret < 0))
916 return -EIO; 959 return -EIO;
960
961 phy_mac_interrupt(phydev, 0);
917 } else if (phydev->link && !dev->link_on) { 962 } else if (phydev->link && !dev->link_on) {
918 dev->link_on = true; 963 dev->link_on = true;
919 964
@@ -953,7 +998,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
953 ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv); 998 ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
954 999
955 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv); 1000 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
956 netif_carrier_on(dev->net); 1001 phy_mac_interrupt(phydev, 1);
957 } 1002 }
958 1003
959 return ret; 1004 return ret;
@@ -1495,7 +1540,6 @@ done:
1495static int lan78xx_mdio_init(struct lan78xx_net *dev) 1540static int lan78xx_mdio_init(struct lan78xx_net *dev)
1496{ 1541{
1497 int ret; 1542 int ret;
1498 int i;
1499 1543
1500 dev->mdiobus = mdiobus_alloc(); 1544 dev->mdiobus = mdiobus_alloc();
1501 if (!dev->mdiobus) { 1545 if (!dev->mdiobus) {
@@ -1511,10 +1555,6 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev)
1511 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", 1555 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1512 dev->udev->bus->busnum, dev->udev->devnum); 1556 dev->udev->bus->busnum, dev->udev->devnum);
1513 1557
1514 /* handle our own interrupt */
1515 for (i = 0; i < PHY_MAX_ADDR; i++)
1516 dev->mdiobus->irq[i] = PHY_IGNORE_INTERRUPT;
1517
1518 switch (dev->devid & ID_REV_CHIP_ID_MASK_) { 1558 switch (dev->devid & ID_REV_CHIP_ID_MASK_) {
1519 case 0x78000000: 1559 case 0x78000000:
1520 case 0x78500000: 1560 case 0x78500000:
@@ -1558,6 +1598,16 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
1558 return -EIO; 1598 return -EIO;
1559 } 1599 }
1560 1600
1601 /* Enable PHY interrupts.
1602 * We handle our own interrupt
1603 */
1604 ret = phy_read(phydev, LAN88XX_INT_STS);
1605 ret = phy_write(phydev, LAN88XX_INT_MASK,
1606 LAN88XX_INT_MASK_MDINTPIN_EN_ |
1607 LAN88XX_INT_MASK_LINK_CHANGE_);
1608
1609 phydev->irq = PHY_IGNORE_INTERRUPT;
1610
1561 ret = phy_connect_direct(dev->net, phydev, 1611 ret = phy_connect_direct(dev->net, phydev,
1562 lan78xx_link_status_change, 1612 lan78xx_link_status_change,
1563 PHY_INTERFACE_MODE_GMII); 1613 PHY_INTERFACE_MODE_GMII);
@@ -1580,14 +1630,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
1580 SUPPORTED_Pause | SUPPORTED_Asym_Pause); 1630 SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1581 genphy_config_aneg(phydev); 1631 genphy_config_aneg(phydev);
1582 1632
1583 /* Workaround to enable PHY interrupt.
1584 * phy_start_interrupts() is API for requesting and enabling
1585 * PHY interrupt. However, USB-to-Ethernet device can't use
1586 * request_irq() called in phy_start_interrupts().
1587 * Set PHY to PHY_HALTED and call phy_start()
1588 * to make a call to phy_enable_interrupts()
1589 */
1590 phy_stop(phydev);
1591 phy_start(phydev); 1633 phy_start(phydev);
1592 1634
1593 netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); 1635 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
@@ -2221,7 +2263,9 @@ netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2221 if (skb2) { 2263 if (skb2) {
2222 skb_queue_tail(&dev->txq_pend, skb2); 2264 skb_queue_tail(&dev->txq_pend, skb2);
2223 2265
2224 if (skb_queue_len(&dev->txq_pend) > 10) 2266 /* throttle TX patch at slower than SUPER SPEED USB */
2267 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2268 (skb_queue_len(&dev->txq_pend) > 10))
2225 netif_stop_queue(net); 2269 netif_stop_queue(net);
2226 } else { 2270 } else {
2227 netif_dbg(dev, tx_err, dev->net, 2271 netif_dbg(dev, tx_err, dev->net,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 23e9880791fc..a3a4ccf7cf52 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -637,6 +637,7 @@ static const struct usb_device_id products[] = {
637 637
638 /* 3. Combined interface devices matching on interface number */ 638 /* 3. Combined interface devices matching on interface number */
639 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ 639 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
640 {QMI_FIXED_INTF(0x05c6, 0x6001, 3)}, /* 4G LTE usb-modem U901 */
640 {QMI_FIXED_INTF(0x05c6, 0x7000, 0)}, 641 {QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
641 {QMI_FIXED_INTF(0x05c6, 0x7001, 1)}, 642 {QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
642 {QMI_FIXED_INTF(0x05c6, 0x7002, 1)}, 643 {QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
@@ -860,8 +861,10 @@ static const struct usb_device_id products[] = {
860 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ 861 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
861 {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, 862 {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
862 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ 863 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
863 {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx/EM74xx */ 864 {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
864 {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx/EM74xx */ 865 {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
866 {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
867 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
865 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 868 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
866 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 869 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
867 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 870 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
@@ -884,6 +887,7 @@ static const struct usb_device_id products[] = {
884 {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ 887 {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
885 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 888 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
886 {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ 889 {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
890 {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
887 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ 891 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
888 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ 892 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
889 {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ 893 {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 0b0ba7ef14e4..10798128c03f 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1769,6 +1769,13 @@ out3:
1769 if (info->unbind) 1769 if (info->unbind)
1770 info->unbind (dev, udev); 1770 info->unbind (dev, udev);
1771out1: 1771out1:
1772 /* subdrivers must undo all they did in bind() if they
1773 * fail it, but we may fail later and a deferred kevent
1774 * may trigger an error resubmitting itself and, worse,
1775 * schedule a timer. So we kill it all just in case.
1776 */
1777 cancel_work_sync(&dev->kevent);
1778 del_timer_sync(&dev->delay);
1772 free_netdev(net); 1779 free_netdev(net);
1773out: 1780out:
1774 return status; 1781 return status;
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index 221a53025fd0..72ba8ae7f09a 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -377,7 +377,7 @@ union Vmxnet3_GenericDesc {
377#define VMXNET3_TX_RING_MAX_SIZE 4096 377#define VMXNET3_TX_RING_MAX_SIZE 4096
378#define VMXNET3_TC_RING_MAX_SIZE 4096 378#define VMXNET3_TC_RING_MAX_SIZE 4096
379#define VMXNET3_RX_RING_MAX_SIZE 4096 379#define VMXNET3_RX_RING_MAX_SIZE 4096
380#define VMXNET3_RX_RING2_MAX_SIZE 2048 380#define VMXNET3_RX_RING2_MAX_SIZE 4096
381#define VMXNET3_RC_RING_MAX_SIZE 8192 381#define VMXNET3_RC_RING_MAX_SIZE 8192
382 382
383/* a list of reasons for queue stop */ 383/* a list of reasons for queue stop */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 0cbf520cea77..fc895d0e85d9 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -814,7 +814,7 @@ vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
814 814
815 815
816/* 816/*
817 * parse and copy relevant protocol headers: 817 * parse relevant protocol headers:
818 * For a tso pkt, relevant headers are L2/3/4 including options 818 * For a tso pkt, relevant headers are L2/3/4 including options
819 * For a pkt requesting csum offloading, they are L2/3 and may include L4 819 * For a pkt requesting csum offloading, they are L2/3 and may include L4
820 * if it's a TCP/UDP pkt 820 * if it's a TCP/UDP pkt
@@ -827,15 +827,14 @@ vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
827 * Other effects: 827 * Other effects:
828 * 1. related *ctx fields are updated. 828 * 1. related *ctx fields are updated.
829 * 2. ctx->copy_size is # of bytes copied 829 * 2. ctx->copy_size is # of bytes copied
830 * 3. the portion copied is guaranteed to be in the linear part 830 * 3. the portion to be copied is guaranteed to be in the linear part
831 * 831 *
832 */ 832 */
833static int 833static int
834vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, 834vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
835 struct vmxnet3_tx_ctx *ctx, 835 struct vmxnet3_tx_ctx *ctx,
836 struct vmxnet3_adapter *adapter) 836 struct vmxnet3_adapter *adapter)
837{ 837{
838 struct Vmxnet3_TxDataDesc *tdd;
839 u8 protocol = 0; 838 u8 protocol = 0;
840 839
841 if (ctx->mss) { /* TSO */ 840 if (ctx->mss) { /* TSO */
@@ -892,16 +891,34 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
892 return 0; 891 return 0;
893 } 892 }
894 893
894 return 1;
895err:
896 return -1;
897}
898
899/*
900 * copy relevant protocol headers to the transmit ring:
901 * For a tso pkt, relevant headers are L2/3/4 including options
902 * For a pkt requesting csum offloading, they are L2/3 and may include L4
903 * if it's a TCP/UDP pkt
904 *
905 *
906 * Note that this requires that vmxnet3_parse_hdr be called first to set the
907 * appropriate bits in ctx first
908 */
909static void
910vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
911 struct vmxnet3_tx_ctx *ctx,
912 struct vmxnet3_adapter *adapter)
913{
914 struct Vmxnet3_TxDataDesc *tdd;
915
895 tdd = tq->data_ring.base + tq->tx_ring.next2fill; 916 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
896 917
897 memcpy(tdd->data, skb->data, ctx->copy_size); 918 memcpy(tdd->data, skb->data, ctx->copy_size);
898 netdev_dbg(adapter->netdev, 919 netdev_dbg(adapter->netdev,
899 "copy %u bytes to dataRing[%u]\n", 920 "copy %u bytes to dataRing[%u]\n",
900 ctx->copy_size, tq->tx_ring.next2fill); 921 ctx->copy_size, tq->tx_ring.next2fill);
901 return 1;
902
903err:
904 return -1;
905} 922}
906 923
907 924
@@ -998,22 +1015,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
998 } 1015 }
999 } 1016 }
1000 1017
1001 spin_lock_irqsave(&tq->tx_lock, flags); 1018 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1002
1003 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1004 tq->stats.tx_ring_full++;
1005 netdev_dbg(adapter->netdev,
1006 "tx queue stopped on %s, next2comp %u"
1007 " next2fill %u\n", adapter->netdev->name,
1008 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1009
1010 vmxnet3_tq_stop(tq, adapter);
1011 spin_unlock_irqrestore(&tq->tx_lock, flags);
1012 return NETDEV_TX_BUSY;
1013 }
1014
1015
1016 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
1017 if (ret >= 0) { 1019 if (ret >= 0) {
1018 BUG_ON(ret <= 0 && ctx.copy_size != 0); 1020 BUG_ON(ret <= 0 && ctx.copy_size != 0);
1019 /* hdrs parsed, check against other limits */ 1021 /* hdrs parsed, check against other limits */
@@ -1033,9 +1035,26 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1033 } 1035 }
1034 } else { 1036 } else {
1035 tq->stats.drop_hdr_inspect_err++; 1037 tq->stats.drop_hdr_inspect_err++;
1036 goto unlock_drop_pkt; 1038 goto drop_pkt;
1037 } 1039 }
1038 1040
1041 spin_lock_irqsave(&tq->tx_lock, flags);
1042
1043 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1044 tq->stats.tx_ring_full++;
1045 netdev_dbg(adapter->netdev,
1046 "tx queue stopped on %s, next2comp %u"
1047 " next2fill %u\n", adapter->netdev->name,
1048 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1049
1050 vmxnet3_tq_stop(tq, adapter);
1051 spin_unlock_irqrestore(&tq->tx_lock, flags);
1052 return NETDEV_TX_BUSY;
1053 }
1054
1055
1056 vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1057
1039 /* fill tx descs related to addr & len */ 1058 /* fill tx descs related to addr & len */
1040 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter)) 1059 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1041 goto unlock_drop_pkt; 1060 goto unlock_drop_pkt;
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index bdb8a6c0f8aa..729c344e6774 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.5.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.6.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040500 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040600
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 66addb7a7911..bdcf617a9d52 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -104,20 +104,23 @@ static struct dst_ops vrf_dst_ops = {
104#if IS_ENABLED(CONFIG_IPV6) 104#if IS_ENABLED(CONFIG_IPV6)
105static bool check_ipv6_frame(const struct sk_buff *skb) 105static bool check_ipv6_frame(const struct sk_buff *skb)
106{ 106{
107 const struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb->data; 107 const struct ipv6hdr *ipv6h;
108 size_t hlen = sizeof(*ipv6h); 108 struct ipv6hdr _ipv6h;
109 bool rc = true; 109 bool rc = true;
110 110
111 if (skb->len < hlen) 111 ipv6h = skb_header_pointer(skb, 0, sizeof(_ipv6h), &_ipv6h);
112 if (!ipv6h)
112 goto out; 113 goto out;
113 114
114 if (ipv6h->nexthdr == NEXTHDR_ICMP) { 115 if (ipv6h->nexthdr == NEXTHDR_ICMP) {
115 const struct icmp6hdr *icmph; 116 const struct icmp6hdr *icmph;
117 struct icmp6hdr _icmph;
116 118
117 if (skb->len < hlen + sizeof(*icmph)) 119 icmph = skb_header_pointer(skb, sizeof(_ipv6h),
120 sizeof(_icmph), &_icmph);
121 if (!icmph)
118 goto out; 122 goto out;
119 123
120 icmph = (struct icmp6hdr *)(skb->data + sizeof(*ipv6h));
121 switch (icmph->icmp6_type) { 124 switch (icmph->icmp6_type) {
122 case NDISC_ROUTER_SOLICITATION: 125 case NDISC_ROUTER_SOLICITATION:
123 case NDISC_ROUTER_ADVERTISEMENT: 126 case NDISC_ROUTER_ADVERTISEMENT:
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 2d88c799d2ac..1c32bd104797 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -73,7 +73,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
73static int vxlan_net_id; 73static int vxlan_net_id;
74static struct rtnl_link_ops vxlan_link_ops; 74static struct rtnl_link_ops vxlan_link_ops;
75 75
76static const u8 all_zeros_mac[ETH_ALEN]; 76static const u8 all_zeros_mac[ETH_ALEN + 2];
77 77
78static int vxlan_sock_add(struct vxlan_dev *vxlan); 78static int vxlan_sock_add(struct vxlan_dev *vxlan);
79 79
@@ -931,8 +931,10 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
931 cb->nlh->nlmsg_seq, 931 cb->nlh->nlmsg_seq,
932 RTM_NEWNEIGH, 932 RTM_NEWNEIGH,
933 NLM_F_MULTI, rd); 933 NLM_F_MULTI, rd);
934 if (err < 0) 934 if (err < 0) {
935 cb->args[1] = err;
935 goto out; 936 goto out;
937 }
936skip: 938skip:
937 ++idx; 939 ++idx;
938 } 940 }
@@ -1306,8 +1308,10 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1306 gbp = (struct vxlanhdr_gbp *)vxh; 1308 gbp = (struct vxlanhdr_gbp *)vxh;
1307 md->gbp = ntohs(gbp->policy_id); 1309 md->gbp = ntohs(gbp->policy_id);
1308 1310
1309 if (tun_dst) 1311 if (tun_dst) {
1310 tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT; 1312 tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
1313 tun_dst->u.tun_info.options_len = sizeof(*md);
1314 }
1311 1315
1312 if (gbp->dont_learn) 1316 if (gbp->dont_learn)
1313 md->gbp |= VXLAN_GBP_DONT_LEARN; 1317 md->gbp |= VXLAN_GBP_DONT_LEARN;
@@ -1985,11 +1989,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1985 vxlan->cfg.port_max, true); 1989 vxlan->cfg.port_max, true);
1986 1990
1987 if (info) { 1991 if (info) {
1988 if (info->key.tun_flags & TUNNEL_CSUM)
1989 flags |= VXLAN_F_UDP_CSUM;
1990 else
1991 flags &= ~VXLAN_F_UDP_CSUM;
1992
1993 ttl = info->key.ttl; 1992 ttl = info->key.ttl;
1994 tos = info->key.tos; 1993 tos = info->key.tos;
1995 1994
@@ -2004,8 +2003,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2004 goto drop; 2003 goto drop;
2005 sk = vxlan->vn4_sock->sock->sk; 2004 sk = vxlan->vn4_sock->sock->sk;
2006 2005
2007 if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)) 2006 if (info) {
2008 df = htons(IP_DF); 2007 if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
2008 df = htons(IP_DF);
2009
2010 if (info->key.tun_flags & TUNNEL_CSUM)
2011 flags |= VXLAN_F_UDP_CSUM;
2012 else
2013 flags &= ~VXLAN_F_UDP_CSUM;
2014 }
2009 2015
2010 memset(&fl4, 0, sizeof(fl4)); 2016 memset(&fl4, 0, sizeof(fl4));
2011 fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0; 2017 fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
@@ -2101,6 +2107,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2101 return; 2107 return;
2102 } 2108 }
2103 2109
2110 if (info) {
2111 if (info->key.tun_flags & TUNNEL_CSUM)
2112 flags &= ~VXLAN_F_UDP_ZERO_CSUM6_TX;
2113 else
2114 flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
2115 }
2116
2104 ttl = ttl ? : ip6_dst_hoplimit(ndst); 2117 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2105 err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr, 2118 err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr,
2106 0, ttl, src_port, dst_port, htonl(vni << 8), md, 2119 0, ttl, src_port, dst_port, htonl(vni << 8), md,
@@ -2162,9 +2175,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2162#endif 2175#endif
2163 } 2176 }
2164 2177
2165 if (vxlan->flags & VXLAN_F_COLLECT_METADATA && 2178 if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
2166 info && info->mode & IP_TUNNEL_INFO_TX) { 2179 if (info && info->mode & IP_TUNNEL_INFO_TX)
2167 vxlan_xmit_one(skb, dev, NULL, false); 2180 vxlan_xmit_one(skb, dev, NULL, false);
2181 else
2182 kfree_skb(skb);
2168 return NETDEV_TX_OK; 2183 return NETDEV_TX_OK;
2169 } 2184 }
2170 2185
@@ -2358,29 +2373,43 @@ static void vxlan_set_multicast_list(struct net_device *dev)
2358{ 2373{
2359} 2374}
2360 2375
2361static int vxlan_change_mtu(struct net_device *dev, int new_mtu) 2376static int __vxlan_change_mtu(struct net_device *dev,
2377 struct net_device *lowerdev,
2378 struct vxlan_rdst *dst, int new_mtu, bool strict)
2362{ 2379{
2363 struct vxlan_dev *vxlan = netdev_priv(dev); 2380 int max_mtu = IP_MAX_MTU;
2364 struct vxlan_rdst *dst = &vxlan->default_dst;
2365 struct net_device *lowerdev;
2366 int max_mtu;
2367 2381
2368 lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex); 2382 if (lowerdev)
2369 if (lowerdev == NULL) 2383 max_mtu = lowerdev->mtu;
2370 return eth_change_mtu(dev, new_mtu);
2371 2384
2372 if (dst->remote_ip.sa.sa_family == AF_INET6) 2385 if (dst->remote_ip.sa.sa_family == AF_INET6)
2373 max_mtu = lowerdev->mtu - VXLAN6_HEADROOM; 2386 max_mtu -= VXLAN6_HEADROOM;
2374 else 2387 else
2375 max_mtu = lowerdev->mtu - VXLAN_HEADROOM; 2388 max_mtu -= VXLAN_HEADROOM;
2376 2389
2377 if (new_mtu < 68 || new_mtu > max_mtu) 2390 if (new_mtu < 68)
2378 return -EINVAL; 2391 return -EINVAL;
2379 2392
2393 if (new_mtu > max_mtu) {
2394 if (strict)
2395 return -EINVAL;
2396
2397 new_mtu = max_mtu;
2398 }
2399
2380 dev->mtu = new_mtu; 2400 dev->mtu = new_mtu;
2381 return 0; 2401 return 0;
2382} 2402}
2383 2403
2404static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2405{
2406 struct vxlan_dev *vxlan = netdev_priv(dev);
2407 struct vxlan_rdst *dst = &vxlan->default_dst;
2408 struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
2409 dst->remote_ifindex);
2410 return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true);
2411}
2412
2384static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb, 2413static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
2385 struct ip_tunnel_info *info, 2414 struct ip_tunnel_info *info,
2386 __be16 sport, __be16 dport) 2415 __be16 sport, __be16 dport)
@@ -2514,6 +2543,7 @@ static void vxlan_setup(struct net_device *dev)
2514 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 2543 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2515 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 2544 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2516 netif_keep_dst(dev); 2545 netif_keep_dst(dev);
2546 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2517 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 2547 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
2518 2548
2519 INIT_LIST_HEAD(&vxlan->next); 2549 INIT_LIST_HEAD(&vxlan->next);
@@ -2756,6 +2786,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2756 int err; 2786 int err;
2757 bool use_ipv6 = false; 2787 bool use_ipv6 = false;
2758 __be16 default_port = vxlan->cfg.dst_port; 2788 __be16 default_port = vxlan->cfg.dst_port;
2789 struct net_device *lowerdev = NULL;
2759 2790
2760 vxlan->net = src_net; 2791 vxlan->net = src_net;
2761 2792
@@ -2776,9 +2807,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2776 } 2807 }
2777 2808
2778 if (conf->remote_ifindex) { 2809 if (conf->remote_ifindex) {
2779 struct net_device *lowerdev 2810 lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
2780 = __dev_get_by_index(src_net, conf->remote_ifindex);
2781
2782 dst->remote_ifindex = conf->remote_ifindex; 2811 dst->remote_ifindex = conf->remote_ifindex;
2783 2812
2784 if (!lowerdev) { 2813 if (!lowerdev) {
@@ -2802,6 +2831,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2802 needed_headroom = lowerdev->hard_header_len; 2831 needed_headroom = lowerdev->hard_header_len;
2803 } 2832 }
2804 2833
2834 if (conf->mtu) {
2835 err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false);
2836 if (err)
2837 return err;
2838 }
2839
2805 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA) 2840 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
2806 needed_headroom += VXLAN6_HEADROOM; 2841 needed_headroom += VXLAN6_HEADROOM;
2807 else 2842 else
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 7a72407208b1..629225980463 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1626,7 +1626,7 @@ try:
1626 if (state & Xpr) { 1626 if (state & Xpr) {
1627 void __iomem *scc_addr; 1627 void __iomem *scc_addr;
1628 unsigned long ring; 1628 unsigned long ring;
1629 int i; 1629 unsigned int i;
1630 1630
1631 /* 1631 /*
1632 * - the busy condition happens (sometimes); 1632 * - the busy condition happens (sometimes);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index a7afdeee698c..73fb4232f9f2 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -150,18 +150,18 @@ int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size)
150 return -EIO; 150 return -EIO;
151 } 151 }
152 152
153 if (magic == AR5416_EEPROM_MAGIC) { 153 *swap_needed = false;
154 *swap_needed = false; 154 if (swab16(magic) == AR5416_EEPROM_MAGIC) {
155 } else if (swab16(magic) == AR5416_EEPROM_MAGIC) {
156 if (ah->ah_flags & AH_NO_EEP_SWAP) { 155 if (ah->ah_flags & AH_NO_EEP_SWAP) {
157 ath_info(common, 156 ath_info(common,
158 "Ignoring endianness difference in EEPROM magic bytes.\n"); 157 "Ignoring endianness difference in EEPROM magic bytes.\n");
159
160 *swap_needed = false;
161 } else { 158 } else {
162 *swap_needed = true; 159 *swap_needed = true;
163 } 160 }
164 } else { 161 } else if (magic != AR5416_EEPROM_MAGIC) {
162 if (ath9k_hw_use_flash(ah))
163 return 0;
164
165 ath_err(common, 165 ath_err(common,
166 "Invalid EEPROM Magic (0x%04x).\n", magic); 166 "Invalid EEPROM Magic (0x%04x).\n", magic);
167 return -EINVAL; 167 return -EINVAL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 53637399bb99..b98db8a0a069 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -879,11 +879,24 @@ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
879 return 0; 879 return 0;
880} 880}
881 881
882static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev) 882void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
883{ 883{
884 struct sdio_func *func;
885 struct mmc_host *host;
886 uint max_blocks;
884 uint nents; 887 uint nents;
885 int err; 888 int err;
886 889
890 func = sdiodev->func[2];
891 host = func->card->host;
892 sdiodev->sg_support = host->max_segs > 1;
893 max_blocks = min_t(uint, host->max_blk_count, 511u);
894 sdiodev->max_request_size = min_t(uint, host->max_req_size,
895 max_blocks * func->cur_blksize);
896 sdiodev->max_segment_count = min_t(uint, host->max_segs,
897 SG_MAX_SINGLE_ALLOC);
898 sdiodev->max_segment_size = host->max_seg_size;
899
887 if (!sdiodev->sg_support) 900 if (!sdiodev->sg_support)
888 return; 901 return;
889 902
@@ -1021,9 +1034,6 @@ static void brcmf_sdiod_host_fixup(struct mmc_host *host)
1021 1034
1022static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev) 1035static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
1023{ 1036{
1024 struct sdio_func *func;
1025 struct mmc_host *host;
1026 uint max_blocks;
1027 int ret = 0; 1037 int ret = 0;
1028 1038
1029 sdiodev->num_funcs = 2; 1039 sdiodev->num_funcs = 2;
@@ -1054,26 +1064,6 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
1054 goto out; 1064 goto out;
1055 } 1065 }
1056 1066
1057 /*
1058 * determine host related variables after brcmf_sdiod_probe()
1059 * as func->cur_blksize is properly set and F2 init has been
1060 * completed successfully.
1061 */
1062 func = sdiodev->func[2];
1063 host = func->card->host;
1064 sdiodev->sg_support = host->max_segs > 1;
1065 max_blocks = min_t(uint, host->max_blk_count, 511u);
1066 sdiodev->max_request_size = min_t(uint, host->max_req_size,
1067 max_blocks * func->cur_blksize);
1068 sdiodev->max_segment_count = min_t(uint, host->max_segs,
1069 SG_MAX_SINGLE_ALLOC);
1070 sdiodev->max_segment_size = host->max_seg_size;
1071
1072 /* allocate scatter-gather table. sg support
1073 * will be disabled upon allocation failure.
1074 */
1075 brcmf_sdiod_sgtable_alloc(sdiodev);
1076
1077 ret = brcmf_sdiod_freezer_attach(sdiodev); 1067 ret = brcmf_sdiod_freezer_attach(sdiodev);
1078 if (ret) 1068 if (ret)
1079 goto out; 1069 goto out;
@@ -1084,7 +1074,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
1084 ret = -ENODEV; 1074 ret = -ENODEV;
1085 goto out; 1075 goto out;
1086 } 1076 }
1087 brcmf_sdiod_host_fixup(host); 1077 brcmf_sdiod_host_fixup(sdiodev->func[2]->card->host);
1088out: 1078out:
1089 if (ret) 1079 if (ret)
1090 brcmf_sdiod_remove(sdiodev); 1080 brcmf_sdiod_remove(sdiodev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 4265b50faa98..cfee477a6eb1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -17,6 +17,7 @@
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/module.h>
20#include <brcmu_wifi.h> 21#include <brcmu_wifi.h>
21#include <brcmu_utils.h> 22#include <brcmu_utils.h>
22#include "core.h" 23#include "core.h"
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index dd6614332836..a14d9d9da094 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -4114,6 +4114,11 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
4114 goto fail; 4114 goto fail;
4115 } 4115 }
4116 4116
4117 /* allocate scatter-gather table. sg support
4118 * will be disabled upon allocation failure.
4119 */
4120 brcmf_sdiod_sgtable_alloc(bus->sdiodev);
4121
4117 /* Query the F2 block size, set roundup accordingly */ 4122 /* Query the F2 block size, set roundup accordingly */
4118 bus->blocksize = bus->sdiodev->func[2]->cur_blksize; 4123 bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
4119 bus->roundup = min(max_roundup, bus->blocksize); 4124 bus->roundup = min(max_roundup, bus->blocksize);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index 5ec7a6d87672..23f223150cef 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -342,6 +342,7 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
342 342
343/* Issue an abort to the specified function */ 343/* Issue an abort to the specified function */
344int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn); 344int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
345void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev);
345void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev, 346void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
346 enum brcmf_sdiod_state state); 347 enum brcmf_sdiod_state state);
347#ifdef CONFIG_PM_SLEEP 348#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 866067789330..7438fbeef744 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -53,7 +53,6 @@ config IWLWIFI_LEDS
53 53
54config IWLDVM 54config IWLDVM
55 tristate "Intel Wireless WiFi DVM Firmware support" 55 tristate "Intel Wireless WiFi DVM Firmware support"
56 depends on m
57 help 56 help
58 This is the driver that supports the DVM firmware. The list 57 This is the driver that supports the DVM firmware. The list
59 of the devices that use this firmware is available here: 58 of the devices that use this firmware is available here:
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index e60cf141ed79..fa41a5e1c890 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -74,16 +74,19 @@
74#define IWL7260_UCODE_API_MAX 17 74#define IWL7260_UCODE_API_MAX 17
75#define IWL7265_UCODE_API_MAX 17 75#define IWL7265_UCODE_API_MAX 17
76#define IWL7265D_UCODE_API_MAX 20 76#define IWL7265D_UCODE_API_MAX 20
77#define IWL3168_UCODE_API_MAX 20
77 78
78/* Oldest version we won't warn about */ 79/* Oldest version we won't warn about */
79#define IWL7260_UCODE_API_OK 13 80#define IWL7260_UCODE_API_OK 13
80#define IWL7265_UCODE_API_OK 13 81#define IWL7265_UCODE_API_OK 13
81#define IWL7265D_UCODE_API_OK 13 82#define IWL7265D_UCODE_API_OK 13
83#define IWL3168_UCODE_API_OK 20
82 84
83/* Lowest firmware API version supported */ 85/* Lowest firmware API version supported */
84#define IWL7260_UCODE_API_MIN 13 86#define IWL7260_UCODE_API_MIN 13
85#define IWL7265_UCODE_API_MIN 13 87#define IWL7265_UCODE_API_MIN 13
86#define IWL7265D_UCODE_API_MIN 13 88#define IWL7265D_UCODE_API_MIN 13
89#define IWL3168_UCODE_API_MIN 20
87 90
88/* NVM versions */ 91/* NVM versions */
89#define IWL7260_NVM_VERSION 0x0a1d 92#define IWL7260_NVM_VERSION 0x0a1d
@@ -92,6 +95,8 @@
92#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */ 95#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */
93#define IWL3165_NVM_VERSION 0x709 96#define IWL3165_NVM_VERSION 0x709
94#define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */ 97#define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */
98#define IWL3168_NVM_VERSION 0xd01
99#define IWL3168_TX_POWER_VERSION 0xffff /* meaningless */
95#define IWL7265_NVM_VERSION 0x0a1d 100#define IWL7265_NVM_VERSION 0x0a1d
96#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ 101#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
97#define IWL7265D_NVM_VERSION 0x0c11 102#define IWL7265D_NVM_VERSION 0x0c11
@@ -109,6 +114,9 @@
109#define IWL3160_FW_PRE "iwlwifi-3160-" 114#define IWL3160_FW_PRE "iwlwifi-3160-"
110#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" 115#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
111 116
117#define IWL3168_FW_PRE "iwlwifi-3168-"
118#define IWL3168_MODULE_FIRMWARE(api) IWL3168_FW_PRE __stringify(api) ".ucode"
119
112#define IWL7265_FW_PRE "iwlwifi-7265-" 120#define IWL7265_FW_PRE "iwlwifi-7265-"
113#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" 121#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
114 122
@@ -180,6 +188,12 @@ static const struct iwl_ht_params iwl7000_ht_params = {
180 .ucode_api_ok = IWL7265_UCODE_API_OK, \ 188 .ucode_api_ok = IWL7265_UCODE_API_OK, \
181 .ucode_api_min = IWL7265_UCODE_API_MIN 189 .ucode_api_min = IWL7265_UCODE_API_MIN
182 190
191#define IWL_DEVICE_3008 \
192 IWL_DEVICE_7000_COMMON, \
193 .ucode_api_max = IWL3168_UCODE_API_MAX, \
194 .ucode_api_ok = IWL3168_UCODE_API_OK, \
195 .ucode_api_min = IWL3168_UCODE_API_MIN
196
183#define IWL_DEVICE_7005D \ 197#define IWL_DEVICE_7005D \
184 IWL_DEVICE_7000_COMMON, \ 198 IWL_DEVICE_7000_COMMON, \
185 .ucode_api_max = IWL7265D_UCODE_API_MAX, \ 199 .ucode_api_max = IWL7265D_UCODE_API_MAX, \
@@ -299,11 +313,11 @@ const struct iwl_cfg iwl3165_2ac_cfg = {
299 313
300const struct iwl_cfg iwl3168_2ac_cfg = { 314const struct iwl_cfg iwl3168_2ac_cfg = {
301 .name = "Intel(R) Dual Band Wireless AC 3168", 315 .name = "Intel(R) Dual Band Wireless AC 3168",
302 .fw_name_pre = IWL7265D_FW_PRE, 316 .fw_name_pre = IWL3168_FW_PRE,
303 IWL_DEVICE_7000, 317 IWL_DEVICE_3008,
304 .ht_params = &iwl7000_ht_params, 318 .ht_params = &iwl7000_ht_params,
305 .nvm_ver = IWL3165_NVM_VERSION, 319 .nvm_ver = IWL3168_NVM_VERSION,
306 .nvm_calib_ver = IWL3165_TX_POWER_VERSION, 320 .nvm_calib_ver = IWL3168_TX_POWER_VERSION,
307 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, 321 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
308 .dccm_len = IWL7265_DCCM_LEN, 322 .dccm_len = IWL7265_DCCM_LEN,
309}; 323};
@@ -376,5 +390,6 @@ const struct iwl_cfg iwl7265d_n_cfg = {
376 390
377MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 391MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
378MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 392MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
393MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_OK));
379MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK)); 394MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK));
380MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK)); 395MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index c84a0299d43e..bce9b3420a13 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright(c) 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -70,12 +71,15 @@
70 71
71/* Highest firmware API version supported */ 72/* Highest firmware API version supported */
72#define IWL8000_UCODE_API_MAX 20 73#define IWL8000_UCODE_API_MAX 20
74#define IWL8265_UCODE_API_MAX 20
73 75
74/* Oldest version we won't warn about */ 76/* Oldest version we won't warn about */
75#define IWL8000_UCODE_API_OK 13 77#define IWL8000_UCODE_API_OK 13
78#define IWL8265_UCODE_API_OK 20
76 79
77/* Lowest firmware API version supported */ 80/* Lowest firmware API version supported */
78#define IWL8000_UCODE_API_MIN 13 81#define IWL8000_UCODE_API_MIN 13
82#define IWL8265_UCODE_API_MIN 20
79 83
80/* NVM versions */ 84/* NVM versions */
81#define IWL8000_NVM_VERSION 0x0a1d 85#define IWL8000_NVM_VERSION 0x0a1d
@@ -93,6 +97,10 @@
93#define IWL8000_MODULE_FIRMWARE(api) \ 97#define IWL8000_MODULE_FIRMWARE(api) \
94 IWL8000_FW_PRE "-" __stringify(api) ".ucode" 98 IWL8000_FW_PRE "-" __stringify(api) ".ucode"
95 99
100#define IWL8265_FW_PRE "iwlwifi-8265-"
101#define IWL8265_MODULE_FIRMWARE(api) \
102 IWL8265_FW_PRE __stringify(api) ".ucode"
103
96#define NVM_HW_SECTION_NUM_FAMILY_8000 10 104#define NVM_HW_SECTION_NUM_FAMILY_8000 10
97#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B" 105#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B"
98#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C" 106#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C"
@@ -144,10 +152,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
144 .support_tx_backoff = true, 152 .support_tx_backoff = true,
145}; 153};
146 154
147#define IWL_DEVICE_8000 \ 155#define IWL_DEVICE_8000_COMMON \
148 .ucode_api_max = IWL8000_UCODE_API_MAX, \
149 .ucode_api_ok = IWL8000_UCODE_API_OK, \
150 .ucode_api_min = IWL8000_UCODE_API_MIN, \
151 .device_family = IWL_DEVICE_FAMILY_8000, \ 156 .device_family = IWL_DEVICE_FAMILY_8000, \
152 .max_inst_size = IWL60_RTC_INST_SIZE, \ 157 .max_inst_size = IWL60_RTC_INST_SIZE, \
153 .max_data_size = IWL60_RTC_DATA_SIZE, \ 158 .max_data_size = IWL60_RTC_DATA_SIZE, \
@@ -167,10 +172,28 @@ static const struct iwl_tt_params iwl8000_tt_params = {
167 .thermal_params = &iwl8000_tt_params, \ 172 .thermal_params = &iwl8000_tt_params, \
168 .apmg_not_supported = true 173 .apmg_not_supported = true
169 174
175#define IWL_DEVICE_8000 \
176 IWL_DEVICE_8000_COMMON, \
177 .ucode_api_max = IWL8000_UCODE_API_MAX, \
178 .ucode_api_ok = IWL8000_UCODE_API_OK, \
179 .ucode_api_min = IWL8000_UCODE_API_MIN \
180
181#define IWL_DEVICE_8260 \
182 IWL_DEVICE_8000_COMMON, \
183 .ucode_api_max = IWL8000_UCODE_API_MAX, \
184 .ucode_api_ok = IWL8000_UCODE_API_OK, \
185 .ucode_api_min = IWL8000_UCODE_API_MIN \
186
187#define IWL_DEVICE_8265 \
188 IWL_DEVICE_8000_COMMON, \
189 .ucode_api_max = IWL8265_UCODE_API_MAX, \
190 .ucode_api_ok = IWL8265_UCODE_API_OK, \
191 .ucode_api_min = IWL8265_UCODE_API_MIN \
192
170const struct iwl_cfg iwl8260_2n_cfg = { 193const struct iwl_cfg iwl8260_2n_cfg = {
171 .name = "Intel(R) Dual Band Wireless N 8260", 194 .name = "Intel(R) Dual Band Wireless N 8260",
172 .fw_name_pre = IWL8000_FW_PRE, 195 .fw_name_pre = IWL8000_FW_PRE,
173 IWL_DEVICE_8000, 196 IWL_DEVICE_8260,
174 .ht_params = &iwl8000_ht_params, 197 .ht_params = &iwl8000_ht_params,
175 .nvm_ver = IWL8000_NVM_VERSION, 198 .nvm_ver = IWL8000_NVM_VERSION,
176 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 199 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -179,7 +202,7 @@ const struct iwl_cfg iwl8260_2n_cfg = {
179const struct iwl_cfg iwl8260_2ac_cfg = { 202const struct iwl_cfg iwl8260_2ac_cfg = {
180 .name = "Intel(R) Dual Band Wireless AC 8260", 203 .name = "Intel(R) Dual Band Wireless AC 8260",
181 .fw_name_pre = IWL8000_FW_PRE, 204 .fw_name_pre = IWL8000_FW_PRE,
182 IWL_DEVICE_8000, 205 IWL_DEVICE_8260,
183 .ht_params = &iwl8000_ht_params, 206 .ht_params = &iwl8000_ht_params,
184 .nvm_ver = IWL8000_NVM_VERSION, 207 .nvm_ver = IWL8000_NVM_VERSION,
185 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 208 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -188,8 +211,8 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
188 211
189const struct iwl_cfg iwl8265_2ac_cfg = { 212const struct iwl_cfg iwl8265_2ac_cfg = {
190 .name = "Intel(R) Dual Band Wireless AC 8265", 213 .name = "Intel(R) Dual Band Wireless AC 8265",
191 .fw_name_pre = IWL8000_FW_PRE, 214 .fw_name_pre = IWL8265_FW_PRE,
192 IWL_DEVICE_8000, 215 IWL_DEVICE_8265,
193 .ht_params = &iwl8000_ht_params, 216 .ht_params = &iwl8000_ht_params,
194 .nvm_ver = IWL8000_NVM_VERSION, 217 .nvm_ver = IWL8000_NVM_VERSION,
195 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 218 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -209,7 +232,7 @@ const struct iwl_cfg iwl4165_2ac_cfg = {
209const struct iwl_cfg iwl8260_2ac_sdio_cfg = { 232const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
210 .name = "Intel(R) Dual Band Wireless-AC 8260", 233 .name = "Intel(R) Dual Band Wireless-AC 8260",
211 .fw_name_pre = IWL8000_FW_PRE, 234 .fw_name_pre = IWL8000_FW_PRE,
212 IWL_DEVICE_8000, 235 IWL_DEVICE_8260,
213 .ht_params = &iwl8000_ht_params, 236 .ht_params = &iwl8000_ht_params,
214 .nvm_ver = IWL8000_NVM_VERSION, 237 .nvm_ver = IWL8000_NVM_VERSION,
215 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 238 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -236,3 +259,4 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
236}; 259};
237 260
238MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK)); 261MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
262MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_OK));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 7acb49075683..ab4c2a0470b2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -243,8 +243,10 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
243 if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 243 if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
244 char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev); 244 char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev);
245 245
246 snprintf(drv->firmware_name, sizeof(drv->firmware_name), 246 if (rev_step != 'A')
247 "%s%c-%s.ucode", name_pre, rev_step, tag); 247 snprintf(drv->firmware_name,
248 sizeof(drv->firmware_name), "%s%c-%s.ucode",
249 name_pre, rev_step, tag);
248 } 250 }
249 251
250 IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n", 252 IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
index 0036d18334af..ba3f0bbddde8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -510,6 +510,9 @@ struct iwl_mvm_tx_resp {
510 * @scd_ssn: the index of the last contiguously sent packet 510 * @scd_ssn: the index of the last contiguously sent packet
511 * @txed: number of Txed frames in this batch 511 * @txed: number of Txed frames in this batch
512 * @txed_2_done: number of Acked frames in this batch 512 * @txed_2_done: number of Acked frames in this batch
513 * @reduced_txp: power reduced according to TPC. This is the actual value and
514 * not a copy from the LQ command. Thus, if not the first rate was used
515 * for Tx-ing then this value will be set to 0 by FW.
513 */ 516 */
514struct iwl_mvm_ba_notif { 517struct iwl_mvm_ba_notif {
515 __le32 sta_addr_lo32; 518 __le32 sta_addr_lo32;
@@ -524,7 +527,8 @@ struct iwl_mvm_ba_notif {
524 __le16 scd_ssn; 527 __le16 scd_ssn;
525 u8 txed; 528 u8 txed;
526 u8 txed_2_done; 529 u8 txed_2_done;
527 __le16 reserved1; 530 u8 reduced_txp;
531 u8 reserved1;
528} __packed; 532} __packed;
529 533
530/* 534/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 4ed5180c547b..0ccc697fef76 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -107,7 +107,7 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
107 sizeof(tx_ant_cmd), &tx_ant_cmd); 107 sizeof(tx_ant_cmd), &tx_ant_cmd);
108} 108}
109 109
110static void iwl_free_fw_paging(struct iwl_mvm *mvm) 110void iwl_free_fw_paging(struct iwl_mvm *mvm)
111{ 111{
112 int i; 112 int i;
113 113
@@ -127,6 +127,8 @@ static void iwl_free_fw_paging(struct iwl_mvm *mvm)
127 get_order(mvm->fw_paging_db[i].fw_paging_size)); 127 get_order(mvm->fw_paging_db[i].fw_paging_size));
128 } 128 }
129 kfree(mvm->trans->paging_download_buf); 129 kfree(mvm->trans->paging_download_buf);
130 mvm->trans->paging_download_buf = NULL;
131
130 memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); 132 memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
131} 133}
132 134
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 5f3ac8cccf49..ff7c6df9f941 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1225,6 +1225,9 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
1225void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, 1225void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
1226 struct iwl_rx_cmd_buffer *rxb); 1226 struct iwl_rx_cmd_buffer *rxb);
1227 1227
1228/* Paging */
1229void iwl_free_fw_paging(struct iwl_mvm *mvm);
1230
1228/* MVM debugfs */ 1231/* MVM debugfs */
1229#ifdef CONFIG_IWLWIFI_DEBUGFS 1232#ifdef CONFIG_IWLWIFI_DEBUGFS
1230int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); 1233int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 89ea70deeb84..e80be9a59520 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -684,6 +684,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
684 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) 684 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
685 kfree(mvm->nvm_sections[i].data); 685 kfree(mvm->nvm_sections[i].data);
686 686
687 iwl_free_fw_paging(mvm);
688
687 iwl_mvm_tof_clean(mvm); 689 iwl_mvm_tof_clean(mvm);
688 690
689 ieee80211_free_hw(mvm->hw); 691 ieee80211_free_hw(mvm->hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 7bb6fd0e4391..94caa88df442 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -2,6 +2,7 @@
2 * 2 *
3 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 3 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 * Copyright(c) 2016 Intel Deutschland GmbH
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License as 8 * under the terms of version 2 of the GNU General Public License as
@@ -724,14 +725,28 @@ static int _rs_collect_tx_data(struct iwl_mvm *mvm,
724 return 0; 725 return 0;
725} 726}
726 727
727static int rs_collect_tx_data(struct iwl_mvm *mvm, 728static int rs_collect_tpc_data(struct iwl_mvm *mvm,
728 struct iwl_lq_sta *lq_sta, 729 struct iwl_lq_sta *lq_sta,
729 struct iwl_scale_tbl_info *tbl, 730 struct iwl_scale_tbl_info *tbl,
730 int scale_index, int attempts, int successes, 731 int scale_index, int attempts, int successes,
731 u8 reduced_txp) 732 u8 reduced_txp)
733{
734 struct iwl_rate_scale_data *window = NULL;
735
736 if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
737 return -EINVAL;
738
739 window = &tbl->tpc_win[reduced_txp];
740 return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
741 window);
742}
743
744static int rs_collect_tlc_data(struct iwl_mvm *mvm,
745 struct iwl_lq_sta *lq_sta,
746 struct iwl_scale_tbl_info *tbl,
747 int scale_index, int attempts, int successes)
732{ 748{
733 struct iwl_rate_scale_data *window = NULL; 749 struct iwl_rate_scale_data *window = NULL;
734 int ret;
735 750
736 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) 751 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
737 return -EINVAL; 752 return -EINVAL;
@@ -745,16 +760,6 @@ static int rs_collect_tx_data(struct iwl_mvm *mvm,
745 760
746 /* Select window for current tx bit rate */ 761 /* Select window for current tx bit rate */
747 window = &(tbl->win[scale_index]); 762 window = &(tbl->win[scale_index]);
748
749 ret = _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
750 window);
751 if (ret)
752 return ret;
753
754 if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
755 return -EINVAL;
756
757 window = &tbl->tpc_win[reduced_txp];
758 return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes, 763 return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
759 window); 764 window);
760} 765}
@@ -1301,17 +1306,30 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1301 * first index into rate scale table. 1306 * first index into rate scale table.
1302 */ 1307 */
1303 if (info->flags & IEEE80211_TX_STAT_AMPDU) { 1308 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
1304 /* ampdu_ack_len = 0 marks no BA was received. In this case 1309 rs_collect_tpc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
1305 * treat it as a single frame loss as we don't want the success 1310 info->status.ampdu_len,
1306 * ratio to dip too quickly because a BA wasn't received 1311 info->status.ampdu_ack_len,
1312 reduced_txp);
1313
1314 /* ampdu_ack_len = 0 marks no BA was received. For TLC, treat
1315 * it as a single frame loss as we don't want the success ratio
1316 * to dip too quickly because a BA wasn't received.
1317 * For TPC, there's no need for this optimisation since we want
1318 * to recover very quickly from a bad power reduction and,
1319 * therefore we'd like the success ratio to get an immediate hit
1320 * when failing to get a BA, so we'd switch back to a lower or
1321 * zero power reduction. When FW transmits agg with a rate
1322 * different from the initial rate, it will not use reduced txp
1323 * and will send BA notification twice (one empty with reduced
1324 * txp equal to the value from LQ and one with reduced txp 0).
1325 * We need to update counters for each txp level accordingly.
1307 */ 1326 */
1308 if (info->status.ampdu_ack_len == 0) 1327 if (info->status.ampdu_ack_len == 0)
1309 info->status.ampdu_len = 1; 1328 info->status.ampdu_len = 1;
1310 1329
1311 rs_collect_tx_data(mvm, lq_sta, curr_tbl, lq_rate.index, 1330 rs_collect_tlc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
1312 info->status.ampdu_len, 1331 info->status.ampdu_len,
1313 info->status.ampdu_ack_len, 1332 info->status.ampdu_ack_len);
1314 reduced_txp);
1315 1333
1316 /* Update success/fail counts if not searching for new mode */ 1334 /* Update success/fail counts if not searching for new mode */
1317 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { 1335 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
@@ -1344,9 +1362,13 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1344 else 1362 else
1345 continue; 1363 continue;
1346 1364
1347 rs_collect_tx_data(mvm, lq_sta, tmp_tbl, lq_rate.index, 1365 rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
1348 1, i < retries ? 0 : legacy_success, 1366 lq_rate.index, 1,
1349 reduced_txp); 1367 i < retries ? 0 : legacy_success,
1368 reduced_txp);
1369 rs_collect_tlc_data(mvm, lq_sta, tmp_tbl,
1370 lq_rate.index, 1,
1371 i < retries ? 0 : legacy_success);
1350 } 1372 }
1351 1373
1352 /* Update success/fail counts if not searching for new mode */ 1374 /* Update success/fail counts if not searching for new mode */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 9a15642f80dd..ea1e177c2ea1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1298,6 +1298,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
1298 return -EBUSY; 1298 return -EBUSY;
1299 } 1299 }
1300 1300
1301 /* we don't support "match all" in the firmware */
1302 if (!req->n_match_sets)
1303 return -EOPNOTSUPP;
1304
1301 ret = iwl_mvm_check_running_scans(mvm, type); 1305 ret = iwl_mvm_check_running_scans(mvm, type);
1302 if (ret) 1306 if (ret)
1303 return ret; 1307 return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 8bf48a7d0f4e..a040edc55057 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -423,6 +423,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
423 return -1; 423 return -1;
424 } 424 }
425 425
426 /*
427 * Increase the pending frames counter, so that later when a reply comes
428 * in and the counter is decreased - we don't start getting negative
429 * values.
430 * Note that we don't need to make sure it isn't agg'd, since we're
431 * TXing non-sta
432 */
433 atomic_inc(&mvm->pending_frames[sta_id]);
434
426 return 0; 435 return 0;
427} 436}
428 437
@@ -1029,7 +1038,6 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
1029 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1038 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1030 mvmsta->tid_data[tid].rate_n_flags = 1039 mvmsta->tid_data[tid].rate_n_flags =
1031 le32_to_cpu(tx_resp->initial_rate); 1040 le32_to_cpu(tx_resp->initial_rate);
1032 mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc;
1033 mvmsta->tid_data[tid].tx_time = 1041 mvmsta->tid_data[tid].tx_time =
1034 le16_to_cpu(tx_resp->wireless_media_time); 1042 le16_to_cpu(tx_resp->wireless_media_time);
1035 } 1043 }
@@ -1060,7 +1068,7 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
1060 /* TODO: not accounted if the whole A-MPDU failed */ 1068 /* TODO: not accounted if the whole A-MPDU failed */
1061 info->status.tx_time = tid_data->tx_time; 1069 info->status.tx_time = tid_data->tx_time;
1062 info->status.status_driver_data[0] = 1070 info->status.status_driver_data[0] =
1063 (void *)(uintptr_t)tid_data->reduced_tpc; 1071 (void *)(uintptr_t)ba_notif->reduced_txp;
1064 info->status.status_driver_data[1] = 1072 info->status.status_driver_data[1] =
1065 (void *)(uintptr_t)tid_data->rate_n_flags; 1073 (void *)(uintptr_t)tid_data->rate_n_flags;
1066} 1074}
@@ -1133,6 +1141,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1133 scd_flow, ba_resp_scd_ssn, ba_notif->txed, 1141 scd_flow, ba_resp_scd_ssn, ba_notif->txed,
1134 ba_notif->txed_2_done); 1142 ba_notif->txed_2_done);
1135 1143
1144 IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
1145 ba_notif->reduced_txp);
1136 tid_data->next_reclaimed = ba_resp_scd_ssn; 1146 tid_data->next_reclaimed = ba_resp_scd_ssn;
1137 1147
1138 iwl_mvm_check_ratid_empty(mvm, sta, tid); 1148 iwl_mvm_check_ratid_empty(mvm, sta, tid);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 6261a68cae90..00335ea6b3eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -378,7 +378,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
378 {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, 378 {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
379 379
380/* 3168 Series */ 380/* 3168 Series */
381 {IWL_PCI_DEVICE(0x24FB, 0x2010, iwl3168_2ac_cfg)},
381 {IWL_PCI_DEVICE(0x24FB, 0x2110, iwl3168_2ac_cfg)}, 382 {IWL_PCI_DEVICE(0x24FB, 0x2110, iwl3168_2ac_cfg)},
383 {IWL_PCI_DEVICE(0x24FB, 0x2050, iwl3168_2ac_cfg)},
384 {IWL_PCI_DEVICE(0x24FB, 0x2150, iwl3168_2ac_cfg)},
382 {IWL_PCI_DEVICE(0x24FB, 0x0000, iwl3168_2ac_cfg)}, 385 {IWL_PCI_DEVICE(0x24FB, 0x0000, iwl3168_2ac_cfg)},
383 386
384/* 7265 Series */ 387/* 7265 Series */
@@ -475,6 +478,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
475 {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)}, 478 {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
476 {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)}, 479 {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
477 {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)}, 480 {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
481 {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
478 482
479/* 9000 Series */ 483/* 9000 Series */
480 {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)}, 484 {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index cc3888e2700d..73c95594eabe 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -490,6 +490,15 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
490 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 490 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
491} 491}
492 492
493static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
494{
495 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
496
497 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
498 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
499 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
500}
501
493static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 502static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
494{ 503{
495 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 504 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index ccafbd8cf4b3..152cf9ad9566 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1438,9 +1438,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1438 inta & ~trans_pcie->inta_mask); 1438 inta & ~trans_pcie->inta_mask);
1439 } 1439 }
1440 1440
1441 /* Re-enable all interrupts */ 1441 /* we are loading the firmware, enable FH_TX interrupt only */
1442 /* only Re-enable if disabled by irq */ 1442 if (handled & CSR_INT_BIT_FH_TX)
1443 if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1443 iwl_enable_fw_load_int(trans);
1444 /* only Re-enable all interrupt if disabled by irq */
1445 else if (test_bit(STATUS_INT_ENABLED, &trans->status))
1444 iwl_enable_interrupts(trans); 1446 iwl_enable_interrupts(trans);
1445 /* Re-enable RF_KILL if it occurred */ 1447 /* Re-enable RF_KILL if it occurred */
1446 else if (handled & CSR_INT_BIT_RF_KILL) 1448 else if (handled & CSR_INT_BIT_RF_KILL)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index d60a467a983c..5a854c609477 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1021,82 +1021,6 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
1021 &first_ucode_section); 1021 &first_ucode_section);
1022} 1022}
1023 1023
1024static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1025 const struct fw_img *fw, bool run_in_rfkill)
1026{
1027 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1028 bool hw_rfkill;
1029 int ret;
1030
1031 mutex_lock(&trans_pcie->mutex);
1032
1033 /* Someone called stop_device, don't try to start_fw */
1034 if (trans_pcie->is_down) {
1035 IWL_WARN(trans,
1036 "Can't start_fw since the HW hasn't been started\n");
1037 ret = EIO;
1038 goto out;
1039 }
1040
1041 /* This may fail if AMT took ownership of the device */
1042 if (iwl_pcie_prepare_card_hw(trans)) {
1043 IWL_WARN(trans, "Exit HW not ready\n");
1044 ret = -EIO;
1045 goto out;
1046 }
1047
1048 iwl_enable_rfkill_int(trans);
1049
1050 /* If platform's RF_KILL switch is NOT set to KILL */
1051 hw_rfkill = iwl_is_rfkill_set(trans);
1052 if (hw_rfkill)
1053 set_bit(STATUS_RFKILL, &trans->status);
1054 else
1055 clear_bit(STATUS_RFKILL, &trans->status);
1056 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1057 if (hw_rfkill && !run_in_rfkill) {
1058 ret = -ERFKILL;
1059 goto out;
1060 }
1061
1062 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1063
1064 ret = iwl_pcie_nic_init(trans);
1065 if (ret) {
1066 IWL_ERR(trans, "Unable to init nic\n");
1067 goto out;
1068 }
1069
1070 /* make sure rfkill handshake bits are cleared */
1071 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1072 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1073 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1074
1075 /* clear (again), then enable host interrupts */
1076 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1077 iwl_enable_interrupts(trans);
1078
1079 /* really make sure rfkill handshake bits are cleared */
1080 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1081 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1082
1083 /* Load the given image to the HW */
1084 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1085 ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1086 else
1087 ret = iwl_pcie_load_given_ucode(trans, fw);
1088
1089out:
1090 mutex_unlock(&trans_pcie->mutex);
1091 return ret;
1092}
1093
1094static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1095{
1096 iwl_pcie_reset_ict(trans);
1097 iwl_pcie_tx_start(trans, scd_addr);
1098}
1099
1100static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) 1024static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1101{ 1025{
1102 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1026 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1127,7 +1051,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1127 * already dead. 1051 * already dead.
1128 */ 1052 */
1129 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { 1053 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1130 IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); 1054 IWL_DEBUG_INFO(trans,
1055 "DEVICE_ENABLED bit was set and is now cleared\n");
1131 iwl_pcie_tx_stop(trans); 1056 iwl_pcie_tx_stop(trans);
1132 iwl_pcie_rx_stop(trans); 1057 iwl_pcie_rx_stop(trans);
1133 1058
@@ -1161,7 +1086,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1161 iwl_disable_interrupts(trans); 1086 iwl_disable_interrupts(trans);
1162 spin_unlock(&trans_pcie->irq_lock); 1087 spin_unlock(&trans_pcie->irq_lock);
1163 1088
1164
1165 /* clear all status bits */ 1089 /* clear all status bits */
1166 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1090 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1167 clear_bit(STATUS_INT_ENABLED, &trans->status); 1091 clear_bit(STATUS_INT_ENABLED, &trans->status);
@@ -1194,10 +1118,116 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1194 if (hw_rfkill != was_hw_rfkill) 1118 if (hw_rfkill != was_hw_rfkill)
1195 iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1119 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1196 1120
1197 /* re-take ownership to prevent other users from stealing the deivce */ 1121 /* re-take ownership to prevent other users from stealing the device */
1198 iwl_pcie_prepare_card_hw(trans); 1122 iwl_pcie_prepare_card_hw(trans);
1199} 1123}
1200 1124
1125static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1126 const struct fw_img *fw, bool run_in_rfkill)
1127{
1128 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1129 bool hw_rfkill;
1130 int ret;
1131
1132 /* This may fail if AMT took ownership of the device */
1133 if (iwl_pcie_prepare_card_hw(trans)) {
1134 IWL_WARN(trans, "Exit HW not ready\n");
1135 ret = -EIO;
1136 goto out;
1137 }
1138
1139 iwl_enable_rfkill_int(trans);
1140
1141 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1142
1143 /*
1144 * We enabled the RF-Kill interrupt and the handler may very
1145 * well be running. Disable the interrupts to make sure no other
1146 * interrupt can be fired.
1147 */
1148 iwl_disable_interrupts(trans);
1149
1150 /* Make sure it finished running */
1151 synchronize_irq(trans_pcie->pci_dev->irq);
1152
1153 mutex_lock(&trans_pcie->mutex);
1154
1155 /* If platform's RF_KILL switch is NOT set to KILL */
1156 hw_rfkill = iwl_is_rfkill_set(trans);
1157 if (hw_rfkill)
1158 set_bit(STATUS_RFKILL, &trans->status);
1159 else
1160 clear_bit(STATUS_RFKILL, &trans->status);
1161 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1162 if (hw_rfkill && !run_in_rfkill) {
1163 ret = -ERFKILL;
1164 goto out;
1165 }
1166
1167 /* Someone called stop_device, don't try to start_fw */
1168 if (trans_pcie->is_down) {
1169 IWL_WARN(trans,
1170 "Can't start_fw since the HW hasn't been started\n");
1171 ret = -EIO;
1172 goto out;
1173 }
1174
1175 /* make sure rfkill handshake bits are cleared */
1176 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1177 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1178 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1179
1180 /* clear (again), then enable host interrupts */
1181 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1182
1183 ret = iwl_pcie_nic_init(trans);
1184 if (ret) {
1185 IWL_ERR(trans, "Unable to init nic\n");
1186 goto out;
1187 }
1188
1189 /*
1190 * Now, we load the firmware and don't want to be interrupted, even
1191 * by the RF-Kill interrupt (hence mask all the interrupt besides the
1192 * FH_TX interrupt which is needed to load the firmware). If the
1193 * RF-Kill switch is toggled, we will find out after having loaded
1194 * the firmware and return the proper value to the caller.
1195 */
1196 iwl_enable_fw_load_int(trans);
1197
1198 /* really make sure rfkill handshake bits are cleared */
1199 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1200 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1201
1202 /* Load the given image to the HW */
1203 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1204 ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1205 else
1206 ret = iwl_pcie_load_given_ucode(trans, fw);
1207 iwl_enable_interrupts(trans);
1208
1209 /* re-check RF-Kill state since we may have missed the interrupt */
1210 hw_rfkill = iwl_is_rfkill_set(trans);
1211 if (hw_rfkill)
1212 set_bit(STATUS_RFKILL, &trans->status);
1213 else
1214 clear_bit(STATUS_RFKILL, &trans->status);
1215
1216 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1217 if (hw_rfkill && !run_in_rfkill)
1218 ret = -ERFKILL;
1219
1220out:
1221 mutex_unlock(&trans_pcie->mutex);
1222 return ret;
1223}
1224
1225static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1226{
1227 iwl_pcie_reset_ict(trans);
1228 iwl_pcie_tx_start(trans, scd_addr);
1229}
1230
1201static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) 1231static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1202{ 1232{
1203 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1233 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index c32889a1e39c..a28414c50edf 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -991,7 +991,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
991 goto nla_put_failure; 991 goto nla_put_failure;
992 } 992 }
993 993
994 if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, ETH_ALEN, hdr->addr2)) 994 if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
995 ETH_ALEN, data->addresses[1].addr))
995 goto nla_put_failure; 996 goto nla_put_failure;
996 997
997 /* We get the skb->data */ 998 /* We get the skb->data */
@@ -2736,7 +2737,7 @@ static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr)
2736 2737
2737 spin_lock_bh(&hwsim_radio_lock); 2738 spin_lock_bh(&hwsim_radio_lock);
2738 list_for_each_entry(data, &hwsim_radios, list) { 2739 list_for_each_entry(data, &hwsim_radios, list) {
2739 if (mac80211_hwsim_addr_match(data, addr)) { 2740 if (memcmp(data->addresses[1].addr, addr, ETH_ALEN) == 0) {
2740 _found = true; 2741 _found = true;
2741 break; 2742 break;
2742 } 2743 }
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
index 9a3966cd6fbe..155f343981fe 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
@@ -273,8 +273,10 @@ static void rt2400pci_config_filter(struct rt2x00_dev *rt2x00dev,
273 !(filter_flags & FIF_PLCPFAIL)); 273 !(filter_flags & FIF_PLCPFAIL));
274 rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL, 274 rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
275 !(filter_flags & FIF_CONTROL)); 275 !(filter_flags & FIF_CONTROL));
276 rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1); 276 rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
277 !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
277 rt2x00_set_field32(&reg, RXCSR0_DROP_TODS, 278 rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
279 !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
278 !rt2x00dev->intf_ap_count); 280 !rt2x00dev->intf_ap_count);
279 rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1); 281 rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
280 rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg); 282 rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
index 1a6740b4d396..2553cdd74066 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
@@ -274,8 +274,10 @@ static void rt2500pci_config_filter(struct rt2x00_dev *rt2x00dev,
274 !(filter_flags & FIF_PLCPFAIL)); 274 !(filter_flags & FIF_PLCPFAIL));
275 rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL, 275 rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
276 !(filter_flags & FIF_CONTROL)); 276 !(filter_flags & FIF_CONTROL));
277 rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1); 277 rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
278 !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
278 rt2x00_set_field32(&reg, RXCSR0_DROP_TODS, 279 rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
280 !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
279 !rt2x00dev->intf_ap_count); 281 !rt2x00dev->intf_ap_count);
280 rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1); 282 rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
281 rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST, 283 rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
index d26018f30b7d..2d64611de300 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
@@ -437,8 +437,10 @@ static void rt2500usb_config_filter(struct rt2x00_dev *rt2x00dev,
437 !(filter_flags & FIF_PLCPFAIL)); 437 !(filter_flags & FIF_PLCPFAIL));
438 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CONTROL, 438 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CONTROL,
439 !(filter_flags & FIF_CONTROL)); 439 !(filter_flags & FIF_CONTROL));
440 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME, 1); 440 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME,
441 !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
441 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_TODS, 442 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_TODS,
443 !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
442 !rt2x00dev->intf_ap_count); 444 !rt2x00dev->intf_ap_count);
443 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_VERSION_ERROR, 1); 445 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_VERSION_ERROR, 1);
444 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_MULTICAST, 446 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_MULTICAST,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 9733b31a780d..a26afcab03ed 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -1490,7 +1490,8 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
1490 !(filter_flags & FIF_FCSFAIL)); 1490 !(filter_flags & FIF_FCSFAIL));
1491 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR, 1491 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR,
1492 !(filter_flags & FIF_PLCPFAIL)); 1492 !(filter_flags & FIF_PLCPFAIL));
1493 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME, 1); 1493 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME,
1494 !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
1494 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0); 1495 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0);
1495 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1); 1496 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1);
1496 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST, 1497 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 3282ddb766f4..26427140a963 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -669,6 +669,7 @@ enum rt2x00_state_flags {
669 CONFIG_POWERSAVING, 669 CONFIG_POWERSAVING,
670 CONFIG_HT_DISABLED, 670 CONFIG_HT_DISABLED,
671 CONFIG_QOS_DISABLED, 671 CONFIG_QOS_DISABLED,
672 CONFIG_MONITORING,
672 673
673 /* 674 /*
674 * Mark we currently are sequentially reading TX_STA_FIFO register 675 * Mark we currently are sequentially reading TX_STA_FIFO register
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
index 7e8bb1198ae9..6a1f508d472f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
@@ -277,6 +277,11 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
277 else 277 else
278 clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); 278 clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
279 279
280 if (conf->flags & IEEE80211_CONF_MONITOR)
281 set_bit(CONFIG_MONITORING, &rt2x00dev->flags);
282 else
283 clear_bit(CONFIG_MONITORING, &rt2x00dev->flags);
284
280 rt2x00dev->curr_band = conf->chandef.chan->band; 285 rt2x00dev->curr_band = conf->chandef.chan->band;
281 rt2x00dev->curr_freq = conf->chandef.chan->center_freq; 286 rt2x00dev->curr_freq = conf->chandef.chan->center_freq;
282 rt2x00dev->tx_power = conf->power_level; 287 rt2x00dev->tx_power = conf->power_level;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index 3c26ee65a415..13da95a24cf7 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -385,11 +385,6 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
385 *total_flags |= FIF_PSPOLL; 385 *total_flags |= FIF_PSPOLL;
386 } 386 }
387 387
388 /*
389 * Check if there is any work left for us.
390 */
391 if (rt2x00dev->packet_filter == *total_flags)
392 return;
393 rt2x00dev->packet_filter = *total_flags; 388 rt2x00dev->packet_filter = *total_flags;
394 389
395 rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags); 390 rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index c0e730ea1b69..24a3436ef952 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -530,8 +530,10 @@ static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev,
530 !(filter_flags & FIF_PLCPFAIL)); 530 !(filter_flags & FIF_PLCPFAIL));
531 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL, 531 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
532 !(filter_flags & (FIF_CONTROL | FIF_PSPOLL))); 532 !(filter_flags & (FIF_CONTROL | FIF_PSPOLL)));
533 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, 1); 533 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
534 !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
534 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS, 535 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
536 !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
535 !rt2x00dev->intf_ap_count); 537 !rt2x00dev->intf_ap_count);
536 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1); 538 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
537 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST, 539 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
index 7081e13b4fd6..7bbc86931168 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
@@ -480,8 +480,10 @@ static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev,
480 !(filter_flags & FIF_PLCPFAIL)); 480 !(filter_flags & FIF_PLCPFAIL));
481 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL, 481 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
482 !(filter_flags & (FIF_CONTROL | FIF_PSPOLL))); 482 !(filter_flags & (FIF_CONTROL | FIF_PSPOLL)));
483 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, 1); 483 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
484 !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
484 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS, 485 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
486 !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
485 !rt2x00dev->intf_ap_count); 487 !rt2x00dev->intf_ap_count);
486 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1); 488 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
487 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST, 489 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
index 74c14ce28238..28f7010e7108 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
@@ -138,6 +138,11 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
138 ((wireless_mode == WIRELESS_MODE_N_5G) || 138 ((wireless_mode == WIRELESS_MODE_N_5G) ||
139 (wireless_mode == WIRELESS_MODE_N_24G))) 139 (wireless_mode == WIRELESS_MODE_N_24G)))
140 rate->flags |= IEEE80211_TX_RC_MCS; 140 rate->flags |= IEEE80211_TX_RC_MCS;
141 if (sta && sta->vht_cap.vht_supported &&
142 (wireless_mode == WIRELESS_MODE_AC_5G ||
143 wireless_mode == WIRELESS_MODE_AC_24G ||
144 wireless_mode == WIRELESS_MODE_AC_ONLY))
145 rate->flags |= IEEE80211_TX_RC_VHT_MCS;
141 } 146 }
142} 147}
143 148
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index a62bf0a65c32..5be34118e0af 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -351,7 +351,6 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
351 case COUNTRY_CODE_SPAIN: 351 case COUNTRY_CODE_SPAIN:
352 case COUNTRY_CODE_FRANCE: 352 case COUNTRY_CODE_FRANCE:
353 case COUNTRY_CODE_ISRAEL: 353 case COUNTRY_CODE_ISRAEL:
354 case COUNTRY_CODE_WORLD_WIDE_13:
355 return &rtl_regdom_12_13; 354 return &rtl_regdom_12_13;
356 case COUNTRY_CODE_MKK: 355 case COUNTRY_CODE_MKK:
357 case COUNTRY_CODE_MKK1: 356 case COUNTRY_CODE_MKK1:
@@ -360,6 +359,7 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
360 return &rtl_regdom_14_60_64; 359 return &rtl_regdom_14_60_64;
361 case COUNTRY_CODE_GLOBAL_DOMAIN: 360 case COUNTRY_CODE_GLOBAL_DOMAIN:
362 return &rtl_regdom_14; 361 return &rtl_regdom_14;
362 case COUNTRY_CODE_WORLD_WIDE_13:
363 case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL: 363 case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL:
364 return &rtl_regdom_12_13_5g_all; 364 return &rtl_regdom_12_13_5g_all;
365 default: 365 default:
diff --git a/drivers/net/wireless/ti/wlcore/io.c b/drivers/net/wireless/ti/wlcore/io.c
index 9ac118e727e9..564ca750c5ee 100644
--- a/drivers/net/wireless/ti/wlcore/io.c
+++ b/drivers/net/wireless/ti/wlcore/io.c
@@ -175,14 +175,14 @@ int wlcore_set_partition(struct wl1271 *wl,
175 if (ret < 0) 175 if (ret < 0)
176 goto out; 176 goto out;
177 177
178 /* We don't need the size of the last partition, as it is
179 * automatically calculated based on the total memory size and
180 * the sizes of the previous partitions.
181 */
178 ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start); 182 ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
179 if (ret < 0) 183 if (ret < 0)
180 goto out; 184 goto out;
181 185
182 ret = wlcore_raw_write32(wl, HW_PART3_SIZE_ADDR, p->mem3.size);
183 if (ret < 0)
184 goto out;
185
186out: 186out:
187 return ret; 187 return ret;
188} 188}
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 6c257b54f415..10cf3747694d 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -36,8 +36,8 @@
36#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12) 36#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
37#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16) 37#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
38#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20) 38#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
39#define HW_PART3_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 24) 39#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
40#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 28) 40
41#define HW_ACCESS_REGISTER_SIZE 4 41#define HW_ACCESS_REGISTER_SIZE 4
42 42
43#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000 43#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index d6abf191122a..96ccd4e943db 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -364,6 +364,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
364 RING_IDX cons, prod; 364 RING_IDX cons, prod;
365 unsigned short id; 365 unsigned short id;
366 struct sk_buff *skb; 366 struct sk_buff *skb;
367 bool more_to_do;
367 368
368 BUG_ON(!netif_carrier_ok(queue->info->netdev)); 369 BUG_ON(!netif_carrier_ok(queue->info->netdev));
369 370
@@ -398,18 +399,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
398 399
399 queue->tx.rsp_cons = prod; 400 queue->tx.rsp_cons = prod;
400 401
401 /* 402 RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
402 * Set a new event, then check for race with update of tx_cons. 403 } while (more_to_do);
403 * Note that it is essential to schedule a callback, no matter
404 * how few buffers are pending. Even if there is space in the
405 * transmit ring, higher layers may be blocked because too much
406 * data is outstanding: in such cases notification from Xen is
407 * likely to be the only kick that we'll get.
408 */
409 queue->tx.sring->rsp_event =
410 prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1;
411 mb(); /* update shared area */
412 } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod));
413 404
414 xennet_maybe_wake_tx(queue); 405 xennet_maybe_wake_tx(queue);
415} 406}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 7e2c43f701bc..5d28e9405f32 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -382,18 +382,18 @@ static const struct nd_cmd_desc __nd_cmd_bus_descs[] = {
382 [ND_CMD_ARS_CAP] = { 382 [ND_CMD_ARS_CAP] = {
383 .in_num = 2, 383 .in_num = 2,
384 .in_sizes = { 8, 8, }, 384 .in_sizes = { 8, 8, },
385 .out_num = 2, 385 .out_num = 4,
386 .out_sizes = { 4, 4, }, 386 .out_sizes = { 4, 4, 4, 4, },
387 }, 387 },
388 [ND_CMD_ARS_START] = { 388 [ND_CMD_ARS_START] = {
389 .in_num = 4, 389 .in_num = 5,
390 .in_sizes = { 8, 8, 2, 6, }, 390 .in_sizes = { 8, 8, 2, 1, 5, },
391 .out_num = 1, 391 .out_num = 2,
392 .out_sizes = { 4, }, 392 .out_sizes = { 4, 4, },
393 }, 393 },
394 [ND_CMD_ARS_STATUS] = { 394 [ND_CMD_ARS_STATUS] = {
395 .out_num = 2, 395 .out_num = 3,
396 .out_sizes = { 4, UINT_MAX, }, 396 .out_sizes = { 4, 4, UINT_MAX, },
397 }, 397 },
398}; 398};
399 399
@@ -442,8 +442,8 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
442 return in_field[1]; 442 return in_field[1];
443 else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2) 443 else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2)
444 return out_field[1]; 444 return out_field[1];
445 else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 1) 445 else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2)
446 return ND_CMD_ARS_STATUS_MAX; 446 return out_field[1] - 8;
447 447
448 return UINT_MAX; 448 return UINT_MAX;
449} 449}
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 8ebfcaae3f5a..9edf7eb7d17c 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1277,10 +1277,12 @@ static ssize_t mode_show(struct device *dev,
1277 1277
1278 device_lock(dev); 1278 device_lock(dev);
1279 claim = ndns->claim; 1279 claim = ndns->claim;
1280 if (pmem_should_map_pages(dev) || (claim && is_nd_pfn(claim))) 1280 if (claim && is_nd_btt(claim))
1281 mode = "memory";
1282 else if (claim && is_nd_btt(claim))
1283 mode = "safe"; 1281 mode = "safe";
1282 else if (claim && is_nd_pfn(claim))
1283 mode = "memory";
1284 else if (!claim && pmem_should_map_pages(dev))
1285 mode = "memory";
1284 else 1286 else
1285 mode = "raw"; 1287 mode = "raw";
1286 rc = sprintf(buf, "%s\n", mode); 1288 rc = sprintf(buf, "%s\n", mode);
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 0cc9048b86e2..ae81a2f1da50 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -301,10 +301,8 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
301 301
302 switch (le32_to_cpu(pfn_sb->mode)) { 302 switch (le32_to_cpu(pfn_sb->mode)) {
303 case PFN_MODE_RAM: 303 case PFN_MODE_RAM:
304 break;
305 case PFN_MODE_PMEM: 304 case PFN_MODE_PMEM:
306 /* TODO: allocate from PMEM support */ 305 break;
307 return -ENOTTY;
308 default: 306 default:
309 return -ENXIO; 307 return -ENXIO;
310 } 308 }
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 7edf31671dab..8d0b54670184 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -41,7 +41,7 @@ struct pmem_device {
41 phys_addr_t phys_addr; 41 phys_addr_t phys_addr;
42 /* when non-zero this device is hosting a 'pfn' instance */ 42 /* when non-zero this device is hosting a 'pfn' instance */
43 phys_addr_t data_offset; 43 phys_addr_t data_offset;
44 unsigned long pfn_flags; 44 u64 pfn_flags;
45 void __pmem *virt_addr; 45 void __pmem *virt_addr;
46 size_t size; 46 size_t size;
47 struct badblocks bb; 47 struct badblocks bb;
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 5d6237391dcd..b586d84f2518 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -17,5 +17,6 @@ config BLK_DEV_NVME_SCSI
17 and block devices nodes, as well a a translation for a small 17 and block devices nodes, as well a a translation for a small
18 number of selected SCSI commands to NVMe commands to the NVMe 18 number of selected SCSI commands to NVMe commands to the NVMe
19 driver. If you don't know what this means you probably want 19 driver. If you don't know what this means you probably want
20 to say N here, and if you know what it means you probably 20 to say N here, unless you run a distro that abuses the SCSI
21 want to say N as well. 21 emulation to provide stable device names for mount by id, like
22 some OpenSuSE and SLES versions.
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c5bf001af559..03c46412fff4 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -55,8 +55,9 @@ static void nvme_free_ns(struct kref *kref)
55 ns->disk->private_data = NULL; 55 ns->disk->private_data = NULL;
56 spin_unlock(&dev_list_lock); 56 spin_unlock(&dev_list_lock);
57 57
58 nvme_put_ctrl(ns->ctrl);
59 put_disk(ns->disk); 58 put_disk(ns->disk);
59 ida_simple_remove(&ns->ctrl->ns_ida, ns->instance);
60 nvme_put_ctrl(ns->ctrl);
60 kfree(ns); 61 kfree(ns);
61} 62}
62 63
@@ -183,7 +184,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
183 goto out_unmap; 184 goto out_unmap;
184 } 185 }
185 186
186 if (meta_buffer) { 187 if (meta_buffer && meta_len) {
187 struct bio_integrity_payload *bip; 188 struct bio_integrity_payload *bip;
188 189
189 meta = kmalloc(meta_len, GFP_KERNEL); 190 meta = kmalloc(meta_len, GFP_KERNEL);
@@ -373,6 +374,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
373 374
374 if (copy_from_user(&io, uio, sizeof(io))) 375 if (copy_from_user(&io, uio, sizeof(io)))
375 return -EFAULT; 376 return -EFAULT;
377 if (io.flags)
378 return -EINVAL;
376 379
377 switch (io.opcode) { 380 switch (io.opcode) {
378 case nvme_cmd_write: 381 case nvme_cmd_write:
@@ -424,6 +427,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
424 return -EACCES; 427 return -EACCES;
425 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 428 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
426 return -EFAULT; 429 return -EFAULT;
430 if (cmd.flags)
431 return -EINVAL;
427 432
428 memset(&c, 0, sizeof(c)); 433 memset(&c, 0, sizeof(c));
429 c.common.opcode = cmd.opcode; 434 c.common.opcode = cmd.opcode;
@@ -556,6 +561,10 @@ static int nvme_revalidate_disk(struct gendisk *disk)
556 u16 old_ms; 561 u16 old_ms;
557 unsigned short bs; 562 unsigned short bs;
558 563
564 if (test_bit(NVME_NS_DEAD, &ns->flags)) {
565 set_capacity(disk, 0);
566 return -ENODEV;
567 }
559 if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) { 568 if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
560 dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n", 569 dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n",
561 __func__, ns->ctrl->instance, ns->ns_id); 570 __func__, ns->ctrl->instance, ns->ns_id);
@@ -831,6 +840,23 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
831 return ret; 840 return ret;
832} 841}
833 842
843static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
844 struct request_queue *q)
845{
846 if (ctrl->max_hw_sectors) {
847 u32 max_segments =
848 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
849
850 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
851 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
852 }
853 if (ctrl->stripe_size)
854 blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
855 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
856 blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
857 blk_queue_virt_boundary(q, ctrl->page_size - 1);
858}
859
834/* 860/*
835 * Initialize the cached copies of the Identify data and various controller 861 * Initialize the cached copies of the Identify data and various controller
836 * register in our nvme_ctrl structure. This should be called as soon as 862 * register in our nvme_ctrl structure. This should be called as soon as
@@ -888,6 +914,8 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
888 } 914 }
889 } 915 }
890 916
917 nvme_set_queue_limits(ctrl, ctrl->admin_q);
918
891 kfree(id); 919 kfree(id);
892 return 0; 920 return 0;
893} 921}
@@ -1118,10 +1146,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1118 if (!ns) 1146 if (!ns)
1119 return; 1147 return;
1120 1148
1149 ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL);
1150 if (ns->instance < 0)
1151 goto out_free_ns;
1152
1121 ns->queue = blk_mq_init_queue(ctrl->tagset); 1153 ns->queue = blk_mq_init_queue(ctrl->tagset);
1122 if (IS_ERR(ns->queue)) 1154 if (IS_ERR(ns->queue))
1123 goto out_free_ns; 1155 goto out_release_instance;
1124 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
1125 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); 1156 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
1126 ns->queue->queuedata = ns; 1157 ns->queue->queuedata = ns;
1127 ns->ctrl = ctrl; 1158 ns->ctrl = ctrl;
@@ -1135,17 +1166,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1135 ns->disk = disk; 1166 ns->disk = disk;
1136 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ 1167 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
1137 1168
1169
1138 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 1170 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
1139 if (ctrl->max_hw_sectors) { 1171 nvme_set_queue_limits(ctrl, ns->queue);
1140 blk_queue_max_hw_sectors(ns->queue, ctrl->max_hw_sectors);
1141 blk_queue_max_segments(ns->queue,
1142 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1);
1143 }
1144 if (ctrl->stripe_size)
1145 blk_queue_chunk_sectors(ns->queue, ctrl->stripe_size >> 9);
1146 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
1147 blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
1148 blk_queue_virt_boundary(ns->queue, ctrl->page_size - 1);
1149 1172
1150 disk->major = nvme_major; 1173 disk->major = nvme_major;
1151 disk->first_minor = 0; 1174 disk->first_minor = 0;
@@ -1154,7 +1177,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1154 disk->queue = ns->queue; 1177 disk->queue = ns->queue;
1155 disk->driverfs_dev = ctrl->device; 1178 disk->driverfs_dev = ctrl->device;
1156 disk->flags = GENHD_FL_EXT_DEVT; 1179 disk->flags = GENHD_FL_EXT_DEVT;
1157 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, nsid); 1180 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
1158 1181
1159 if (nvme_revalidate_disk(ns->disk)) 1182 if (nvme_revalidate_disk(ns->disk))
1160 goto out_free_disk; 1183 goto out_free_disk;
@@ -1174,40 +1197,29 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1174 kfree(disk); 1197 kfree(disk);
1175 out_free_queue: 1198 out_free_queue:
1176 blk_cleanup_queue(ns->queue); 1199 blk_cleanup_queue(ns->queue);
1200 out_release_instance:
1201 ida_simple_remove(&ctrl->ns_ida, ns->instance);
1177 out_free_ns: 1202 out_free_ns:
1178 kfree(ns); 1203 kfree(ns);
1179} 1204}
1180 1205
1181static void nvme_ns_remove(struct nvme_ns *ns) 1206static void nvme_ns_remove(struct nvme_ns *ns)
1182{ 1207{
1183 bool kill = nvme_io_incapable(ns->ctrl) && 1208 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
1184 !blk_queue_dying(ns->queue); 1209 return;
1185
1186 lockdep_assert_held(&ns->ctrl->namespaces_mutex);
1187
1188 if (kill) {
1189 blk_set_queue_dying(ns->queue);
1190 1210
1191 /*
1192 * The controller was shutdown first if we got here through
1193 * device removal. The shutdown may requeue outstanding
1194 * requests. These need to be aborted immediately so
1195 * del_gendisk doesn't block indefinitely for their completion.
1196 */
1197 blk_mq_abort_requeue_list(ns->queue);
1198 }
1199 if (ns->disk->flags & GENHD_FL_UP) { 1211 if (ns->disk->flags & GENHD_FL_UP) {
1200 if (blk_get_integrity(ns->disk)) 1212 if (blk_get_integrity(ns->disk))
1201 blk_integrity_unregister(ns->disk); 1213 blk_integrity_unregister(ns->disk);
1202 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, 1214 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
1203 &nvme_ns_attr_group); 1215 &nvme_ns_attr_group);
1204 del_gendisk(ns->disk); 1216 del_gendisk(ns->disk);
1205 }
1206 if (kill || !blk_queue_dying(ns->queue)) {
1207 blk_mq_abort_requeue_list(ns->queue); 1217 blk_mq_abort_requeue_list(ns->queue);
1208 blk_cleanup_queue(ns->queue); 1218 blk_cleanup_queue(ns->queue);
1209 } 1219 }
1220 mutex_lock(&ns->ctrl->namespaces_mutex);
1210 list_del_init(&ns->list); 1221 list_del_init(&ns->list);
1222 mutex_unlock(&ns->ctrl->namespaces_mutex);
1211 nvme_put_ns(ns); 1223 nvme_put_ns(ns);
1212} 1224}
1213 1225
@@ -1301,10 +1313,8 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
1301{ 1313{
1302 struct nvme_ns *ns, *next; 1314 struct nvme_ns *ns, *next;
1303 1315
1304 mutex_lock(&ctrl->namespaces_mutex);
1305 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) 1316 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
1306 nvme_ns_remove(ns); 1317 nvme_ns_remove(ns);
1307 mutex_unlock(&ctrl->namespaces_mutex);
1308} 1318}
1309 1319
1310static DEFINE_IDA(nvme_instance_ida); 1320static DEFINE_IDA(nvme_instance_ida);
@@ -1351,6 +1361,7 @@ static void nvme_free_ctrl(struct kref *kref)
1351 1361
1352 put_device(ctrl->device); 1362 put_device(ctrl->device);
1353 nvme_release_instance(ctrl); 1363 nvme_release_instance(ctrl);
1364 ida_destroy(&ctrl->ns_ida);
1354 1365
1355 ctrl->ops->free_ctrl(ctrl); 1366 ctrl->ops->free_ctrl(ctrl);
1356} 1367}
@@ -1391,6 +1402,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
1391 } 1402 }
1392 get_device(ctrl->device); 1403 get_device(ctrl->device);
1393 dev_set_drvdata(ctrl->device, ctrl); 1404 dev_set_drvdata(ctrl->device, ctrl);
1405 ida_init(&ctrl->ns_ida);
1394 1406
1395 spin_lock(&dev_list_lock); 1407 spin_lock(&dev_list_lock);
1396 list_add_tail(&ctrl->node, &nvme_ctrl_list); 1408 list_add_tail(&ctrl->node, &nvme_ctrl_list);
@@ -1403,6 +1415,38 @@ out:
1403 return ret; 1415 return ret;
1404} 1416}
1405 1417
1418/**
1419 * nvme_kill_queues(): Ends all namespace queues
1420 * @ctrl: the dead controller that needs to end
1421 *
1422 * Call this function when the driver determines it is unable to get the
1423 * controller in a state capable of servicing IO.
1424 */
1425void nvme_kill_queues(struct nvme_ctrl *ctrl)
1426{
1427 struct nvme_ns *ns;
1428
1429 mutex_lock(&ctrl->namespaces_mutex);
1430 list_for_each_entry(ns, &ctrl->namespaces, list) {
1431 if (!kref_get_unless_zero(&ns->kref))
1432 continue;
1433
1434 /*
1435 * Revalidating a dead namespace sets capacity to 0. This will
1436 * end buffered writers dirtying pages that can't be synced.
1437 */
1438 if (!test_and_set_bit(NVME_NS_DEAD, &ns->flags))
1439 revalidate_disk(ns->disk);
1440
1441 blk_set_queue_dying(ns->queue);
1442 blk_mq_abort_requeue_list(ns->queue);
1443 blk_mq_start_stopped_hw_queues(ns->queue, true);
1444
1445 nvme_put_ns(ns);
1446 }
1447 mutex_unlock(&ctrl->namespaces_mutex);
1448}
1449
1406void nvme_stop_queues(struct nvme_ctrl *ctrl) 1450void nvme_stop_queues(struct nvme_ctrl *ctrl)
1407{ 1451{
1408 struct nvme_ns *ns; 1452 struct nvme_ns *ns;
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 5cd3725e2fa4..6bb15e4926dc 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -146,9 +146,10 @@ struct nvme_nvm_command {
146 }; 146 };
147}; 147};
148 148
149#define NVME_NVM_LP_MLC_PAIRS 886
149struct nvme_nvm_lp_mlc { 150struct nvme_nvm_lp_mlc {
150 __u16 num_pairs; 151 __u16 num_pairs;
151 __u8 pairs[886]; 152 __u8 pairs[NVME_NVM_LP_MLC_PAIRS];
152}; 153};
153 154
154struct nvme_nvm_lp_tbl { 155struct nvme_nvm_lp_tbl {
@@ -282,9 +283,14 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
282 memcpy(dst->lptbl.id, src->lptbl.id, 8); 283 memcpy(dst->lptbl.id, src->lptbl.id, 8);
283 dst->lptbl.mlc.num_pairs = 284 dst->lptbl.mlc.num_pairs =
284 le16_to_cpu(src->lptbl.mlc.num_pairs); 285 le16_to_cpu(src->lptbl.mlc.num_pairs);
285 /* 4 bits per pair */ 286
287 if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
288 pr_err("nvm: number of MLC pairs not supported\n");
289 return -EINVAL;
290 }
291
286 memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs, 292 memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
287 dst->lptbl.mlc.num_pairs >> 1); 293 dst->lptbl.mlc.num_pairs);
288 } 294 }
289 } 295 }
290 296
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 4fb5bb737868..fb15ba5f5d19 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -72,6 +72,7 @@ struct nvme_ctrl {
72 struct mutex namespaces_mutex; 72 struct mutex namespaces_mutex;
73 struct device *device; /* char device */ 73 struct device *device; /* char device */
74 struct list_head node; 74 struct list_head node;
75 struct ida ns_ida;
75 76
76 char name[12]; 77 char name[12];
77 char serial[20]; 78 char serial[20];
@@ -102,6 +103,7 @@ struct nvme_ns {
102 struct request_queue *queue; 103 struct request_queue *queue;
103 struct gendisk *disk; 104 struct gendisk *disk;
104 struct kref kref; 105 struct kref kref;
106 int instance;
105 107
106 u8 eui[8]; 108 u8 eui[8];
107 u8 uuid[16]; 109 u8 uuid[16];
@@ -112,6 +114,11 @@ struct nvme_ns {
112 bool ext; 114 bool ext;
113 u8 pi_type; 115 u8 pi_type;
114 int type; 116 int type;
117 unsigned long flags;
118
119#define NVME_NS_REMOVING 0
120#define NVME_NS_DEAD 1
121
115 u64 mode_select_num_blocks; 122 u64 mode_select_num_blocks;
116 u32 mode_select_block_len; 123 u32 mode_select_block_len;
117}; 124};
@@ -139,9 +146,9 @@ static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl)
139 u32 val = 0; 146 u32 val = 0;
140 147
141 if (ctrl->ops->io_incapable(ctrl)) 148 if (ctrl->ops->io_incapable(ctrl))
142 return false; 149 return true;
143 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val)) 150 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
144 return false; 151 return true;
145 return val & NVME_CSTS_CFS; 152 return val & NVME_CSTS_CFS;
146} 153}
147 154
@@ -240,6 +247,7 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
240 247
241void nvme_stop_queues(struct nvme_ctrl *ctrl); 248void nvme_stop_queues(struct nvme_ctrl *ctrl);
242void nvme_start_queues(struct nvme_ctrl *ctrl); 249void nvme_start_queues(struct nvme_ctrl *ctrl);
250void nvme_kill_queues(struct nvme_ctrl *ctrl);
243 251
244struct request *nvme_alloc_request(struct request_queue *q, 252struct request *nvme_alloc_request(struct request_queue *q,
245 struct nvme_command *cmd, unsigned int flags); 253 struct nvme_command *cmd, unsigned int flags);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 72ef8322d32a..680f5780750c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -86,7 +86,6 @@ struct nvme_queue;
86 86
87static int nvme_reset(struct nvme_dev *dev); 87static int nvme_reset(struct nvme_dev *dev);
88static void nvme_process_cq(struct nvme_queue *nvmeq); 88static void nvme_process_cq(struct nvme_queue *nvmeq);
89static void nvme_remove_dead_ctrl(struct nvme_dev *dev);
90static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 89static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
91 90
92/* 91/*
@@ -120,6 +119,7 @@ struct nvme_dev {
120 unsigned long flags; 119 unsigned long flags;
121 120
122#define NVME_CTRL_RESETTING 0 121#define NVME_CTRL_RESETTING 0
122#define NVME_CTRL_REMOVING 1
123 123
124 struct nvme_ctrl ctrl; 124 struct nvme_ctrl ctrl;
125 struct completion ioq_wait; 125 struct completion ioq_wait;
@@ -286,6 +286,17 @@ static int nvme_init_request(void *data, struct request *req,
286 return 0; 286 return 0;
287} 287}
288 288
289static void nvme_queue_scan(struct nvme_dev *dev)
290{
291 /*
292 * Do not queue new scan work when a controller is reset during
293 * removal.
294 */
295 if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
296 return;
297 queue_work(nvme_workq, &dev->scan_work);
298}
299
289static void nvme_complete_async_event(struct nvme_dev *dev, 300static void nvme_complete_async_event(struct nvme_dev *dev,
290 struct nvme_completion *cqe) 301 struct nvme_completion *cqe)
291{ 302{
@@ -300,7 +311,7 @@ static void nvme_complete_async_event(struct nvme_dev *dev,
300 switch (result & 0xff07) { 311 switch (result & 0xff07) {
301 case NVME_AER_NOTICE_NS_CHANGED: 312 case NVME_AER_NOTICE_NS_CHANGED:
302 dev_info(dev->dev, "rescanning\n"); 313 dev_info(dev->dev, "rescanning\n");
303 queue_work(nvme_workq, &dev->scan_work); 314 nvme_queue_scan(dev);
304 default: 315 default:
305 dev_warn(dev->dev, "async event result %08x\n", result); 316 dev_warn(dev->dev, "async event result %08x\n", result);
306 } 317 }
@@ -678,6 +689,14 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
678 blk_mq_start_request(req); 689 blk_mq_start_request(req);
679 690
680 spin_lock_irq(&nvmeq->q_lock); 691 spin_lock_irq(&nvmeq->q_lock);
692 if (unlikely(nvmeq->cq_vector < 0)) {
693 if (ns && !test_bit(NVME_NS_DEAD, &ns->flags))
694 ret = BLK_MQ_RQ_QUEUE_BUSY;
695 else
696 ret = BLK_MQ_RQ_QUEUE_ERROR;
697 spin_unlock_irq(&nvmeq->q_lock);
698 goto out;
699 }
681 __nvme_submit_cmd(nvmeq, &cmnd); 700 __nvme_submit_cmd(nvmeq, &cmnd);
682 nvme_process_cq(nvmeq); 701 nvme_process_cq(nvmeq);
683 spin_unlock_irq(&nvmeq->q_lock); 702 spin_unlock_irq(&nvmeq->q_lock);
@@ -999,7 +1018,7 @@ static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved
999 if (!blk_mq_request_started(req)) 1018 if (!blk_mq_request_started(req))
1000 return; 1019 return;
1001 1020
1002 dev_warn(nvmeq->q_dmadev, 1021 dev_dbg_ratelimited(nvmeq->q_dmadev,
1003 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid); 1022 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
1004 1023
1005 status = NVME_SC_ABORT_REQ; 1024 status = NVME_SC_ABORT_REQ;
@@ -1245,6 +1264,12 @@ static struct blk_mq_ops nvme_mq_ops = {
1245static void nvme_dev_remove_admin(struct nvme_dev *dev) 1264static void nvme_dev_remove_admin(struct nvme_dev *dev)
1246{ 1265{
1247 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { 1266 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
1267 /*
1268 * If the controller was reset during removal, it's possible
1269 * user requests may be waiting on a stopped queue. Start the
1270 * queue to flush these to completion.
1271 */
1272 blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true);
1248 blk_cleanup_queue(dev->ctrl.admin_q); 1273 blk_cleanup_queue(dev->ctrl.admin_q);
1249 blk_mq_free_tag_set(&dev->admin_tagset); 1274 blk_mq_free_tag_set(&dev->admin_tagset);
1250 } 1275 }
@@ -1685,14 +1710,14 @@ static int nvme_dev_add(struct nvme_dev *dev)
1685 return 0; 1710 return 0;
1686 dev->ctrl.tagset = &dev->tagset; 1711 dev->ctrl.tagset = &dev->tagset;
1687 } 1712 }
1688 queue_work(nvme_workq, &dev->scan_work); 1713 nvme_queue_scan(dev);
1689 return 0; 1714 return 0;
1690} 1715}
1691 1716
1692static int nvme_dev_map(struct nvme_dev *dev) 1717static int nvme_pci_enable(struct nvme_dev *dev)
1693{ 1718{
1694 u64 cap; 1719 u64 cap;
1695 int bars, result = -ENOMEM; 1720 int result = -ENOMEM;
1696 struct pci_dev *pdev = to_pci_dev(dev->dev); 1721 struct pci_dev *pdev = to_pci_dev(dev->dev);
1697 1722
1698 if (pci_enable_device_mem(pdev)) 1723 if (pci_enable_device_mem(pdev))
@@ -1700,24 +1725,14 @@ static int nvme_dev_map(struct nvme_dev *dev)
1700 1725
1701 dev->entry[0].vector = pdev->irq; 1726 dev->entry[0].vector = pdev->irq;
1702 pci_set_master(pdev); 1727 pci_set_master(pdev);
1703 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1704 if (!bars)
1705 goto disable_pci;
1706
1707 if (pci_request_selected_regions(pdev, bars, "nvme"))
1708 goto disable_pci;
1709 1728
1710 if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) && 1729 if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
1711 dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32))) 1730 dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
1712 goto disable; 1731 goto disable;
1713 1732
1714 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
1715 if (!dev->bar)
1716 goto disable;
1717
1718 if (readl(dev->bar + NVME_REG_CSTS) == -1) { 1733 if (readl(dev->bar + NVME_REG_CSTS) == -1) {
1719 result = -ENODEV; 1734 result = -ENODEV;
1720 goto unmap; 1735 goto disable;
1721 } 1736 }
1722 1737
1723 /* 1738 /*
@@ -1727,7 +1742,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
1727 if (!pdev->irq) { 1742 if (!pdev->irq) {
1728 result = pci_enable_msix(pdev, dev->entry, 1); 1743 result = pci_enable_msix(pdev, dev->entry, 1);
1729 if (result < 0) 1744 if (result < 0)
1730 goto unmap; 1745 goto disable;
1731 } 1746 }
1732 1747
1733 cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 1748 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
@@ -1754,18 +1769,20 @@ static int nvme_dev_map(struct nvme_dev *dev)
1754 pci_save_state(pdev); 1769 pci_save_state(pdev);
1755 return 0; 1770 return 0;
1756 1771
1757 unmap:
1758 iounmap(dev->bar);
1759 dev->bar = NULL;
1760 disable: 1772 disable:
1761 pci_release_regions(pdev);
1762 disable_pci:
1763 pci_disable_device(pdev); 1773 pci_disable_device(pdev);
1764 return result; 1774 return result;
1765} 1775}
1766 1776
1767static void nvme_dev_unmap(struct nvme_dev *dev) 1777static void nvme_dev_unmap(struct nvme_dev *dev)
1768{ 1778{
1779 if (dev->bar)
1780 iounmap(dev->bar);
1781 pci_release_regions(to_pci_dev(dev->dev));
1782}
1783
1784static void nvme_pci_disable(struct nvme_dev *dev)
1785{
1769 struct pci_dev *pdev = to_pci_dev(dev->dev); 1786 struct pci_dev *pdev = to_pci_dev(dev->dev);
1770 1787
1771 if (pdev->msi_enabled) 1788 if (pdev->msi_enabled)
@@ -1773,12 +1790,6 @@ static void nvme_dev_unmap(struct nvme_dev *dev)
1773 else if (pdev->msix_enabled) 1790 else if (pdev->msix_enabled)
1774 pci_disable_msix(pdev); 1791 pci_disable_msix(pdev);
1775 1792
1776 if (dev->bar) {
1777 iounmap(dev->bar);
1778 dev->bar = NULL;
1779 pci_release_regions(pdev);
1780 }
1781
1782 if (pci_is_enabled(pdev)) { 1793 if (pci_is_enabled(pdev)) {
1783 pci_disable_pcie_error_reporting(pdev); 1794 pci_disable_pcie_error_reporting(pdev);
1784 pci_disable_device(pdev); 1795 pci_disable_device(pdev);
@@ -1837,7 +1848,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
1837 nvme_dev_list_remove(dev); 1848 nvme_dev_list_remove(dev);
1838 1849
1839 mutex_lock(&dev->shutdown_lock); 1850 mutex_lock(&dev->shutdown_lock);
1840 if (dev->bar) { 1851 if (pci_is_enabled(to_pci_dev(dev->dev))) {
1841 nvme_stop_queues(&dev->ctrl); 1852 nvme_stop_queues(&dev->ctrl);
1842 csts = readl(dev->bar + NVME_REG_CSTS); 1853 csts = readl(dev->bar + NVME_REG_CSTS);
1843 } 1854 }
@@ -1850,7 +1861,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
1850 nvme_disable_io_queues(dev); 1861 nvme_disable_io_queues(dev);
1851 nvme_disable_admin_queue(dev, shutdown); 1862 nvme_disable_admin_queue(dev, shutdown);
1852 } 1863 }
1853 nvme_dev_unmap(dev); 1864 nvme_pci_disable(dev);
1854 1865
1855 for (i = dev->queue_count - 1; i >= 0; i--) 1866 for (i = dev->queue_count - 1; i >= 0; i--)
1856 nvme_clear_queue(dev->queues[i]); 1867 nvme_clear_queue(dev->queues[i]);
@@ -1894,10 +1905,20 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
1894 kfree(dev); 1905 kfree(dev);
1895} 1906}
1896 1907
1908static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
1909{
1910 dev_warn(dev->dev, "Removing after probe failure status: %d\n", status);
1911
1912 kref_get(&dev->ctrl.kref);
1913 nvme_dev_disable(dev, false);
1914 if (!schedule_work(&dev->remove_work))
1915 nvme_put_ctrl(&dev->ctrl);
1916}
1917
1897static void nvme_reset_work(struct work_struct *work) 1918static void nvme_reset_work(struct work_struct *work)
1898{ 1919{
1899 struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work); 1920 struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
1900 int result; 1921 int result = -ENODEV;
1901 1922
1902 if (WARN_ON(test_bit(NVME_CTRL_RESETTING, &dev->flags))) 1923 if (WARN_ON(test_bit(NVME_CTRL_RESETTING, &dev->flags)))
1903 goto out; 1924 goto out;
@@ -1906,37 +1927,37 @@ static void nvme_reset_work(struct work_struct *work)
1906 * If we're called to reset a live controller first shut it down before 1927 * If we're called to reset a live controller first shut it down before
1907 * moving on. 1928 * moving on.
1908 */ 1929 */
1909 if (dev->bar) 1930 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
1910 nvme_dev_disable(dev, false); 1931 nvme_dev_disable(dev, false);
1911 1932
1912 set_bit(NVME_CTRL_RESETTING, &dev->flags); 1933 set_bit(NVME_CTRL_RESETTING, &dev->flags);
1913 1934
1914 result = nvme_dev_map(dev); 1935 result = nvme_pci_enable(dev);
1915 if (result) 1936 if (result)
1916 goto out; 1937 goto out;
1917 1938
1918 result = nvme_configure_admin_queue(dev); 1939 result = nvme_configure_admin_queue(dev);
1919 if (result) 1940 if (result)
1920 goto unmap; 1941 goto out;
1921 1942
1922 nvme_init_queue(dev->queues[0], 0); 1943 nvme_init_queue(dev->queues[0], 0);
1923 result = nvme_alloc_admin_tags(dev); 1944 result = nvme_alloc_admin_tags(dev);
1924 if (result) 1945 if (result)
1925 goto disable; 1946 goto out;
1926 1947
1927 result = nvme_init_identify(&dev->ctrl); 1948 result = nvme_init_identify(&dev->ctrl);
1928 if (result) 1949 if (result)
1929 goto free_tags; 1950 goto out;
1930 1951
1931 result = nvme_setup_io_queues(dev); 1952 result = nvme_setup_io_queues(dev);
1932 if (result) 1953 if (result)
1933 goto free_tags; 1954 goto out;
1934 1955
1935 dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS; 1956 dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
1936 1957
1937 result = nvme_dev_list_add(dev); 1958 result = nvme_dev_list_add(dev);
1938 if (result) 1959 if (result)
1939 goto remove; 1960 goto out;
1940 1961
1941 /* 1962 /*
1942 * Keep the controller around but remove all namespaces if we don't have 1963 * Keep the controller around but remove all namespaces if we don't have
@@ -1953,19 +1974,8 @@ static void nvme_reset_work(struct work_struct *work)
1953 clear_bit(NVME_CTRL_RESETTING, &dev->flags); 1974 clear_bit(NVME_CTRL_RESETTING, &dev->flags);
1954 return; 1975 return;
1955 1976
1956 remove:
1957 nvme_dev_list_remove(dev);
1958 free_tags:
1959 nvme_dev_remove_admin(dev);
1960 blk_put_queue(dev->ctrl.admin_q);
1961 dev->ctrl.admin_q = NULL;
1962 dev->queues[0]->tags = NULL;
1963 disable:
1964 nvme_disable_admin_queue(dev, false);
1965 unmap:
1966 nvme_dev_unmap(dev);
1967 out: 1977 out:
1968 nvme_remove_dead_ctrl(dev); 1978 nvme_remove_dead_ctrl(dev, result);
1969} 1979}
1970 1980
1971static void nvme_remove_dead_ctrl_work(struct work_struct *work) 1981static void nvme_remove_dead_ctrl_work(struct work_struct *work)
@@ -1973,19 +1983,12 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
1973 struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); 1983 struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
1974 struct pci_dev *pdev = to_pci_dev(dev->dev); 1984 struct pci_dev *pdev = to_pci_dev(dev->dev);
1975 1985
1986 nvme_kill_queues(&dev->ctrl);
1976 if (pci_get_drvdata(pdev)) 1987 if (pci_get_drvdata(pdev))
1977 pci_stop_and_remove_bus_device_locked(pdev); 1988 pci_stop_and_remove_bus_device_locked(pdev);
1978 nvme_put_ctrl(&dev->ctrl); 1989 nvme_put_ctrl(&dev->ctrl);
1979} 1990}
1980 1991
1981static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
1982{
1983 dev_warn(dev->dev, "Removing after probe failure\n");
1984 kref_get(&dev->ctrl.kref);
1985 if (!schedule_work(&dev->remove_work))
1986 nvme_put_ctrl(&dev->ctrl);
1987}
1988
1989static int nvme_reset(struct nvme_dev *dev) 1992static int nvme_reset(struct nvme_dev *dev)
1990{ 1993{
1991 if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) 1994 if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
@@ -2037,6 +2040,27 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
2037 .free_ctrl = nvme_pci_free_ctrl, 2040 .free_ctrl = nvme_pci_free_ctrl,
2038}; 2041};
2039 2042
2043static int nvme_dev_map(struct nvme_dev *dev)
2044{
2045 int bars;
2046 struct pci_dev *pdev = to_pci_dev(dev->dev);
2047
2048 bars = pci_select_bars(pdev, IORESOURCE_MEM);
2049 if (!bars)
2050 return -ENODEV;
2051 if (pci_request_selected_regions(pdev, bars, "nvme"))
2052 return -ENODEV;
2053
2054 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
2055 if (!dev->bar)
2056 goto release;
2057
2058 return 0;
2059 release:
2060 pci_release_regions(pdev);
2061 return -ENODEV;
2062}
2063
2040static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2064static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2041{ 2065{
2042 int node, result = -ENOMEM; 2066 int node, result = -ENOMEM;
@@ -2061,6 +2085,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2061 dev->dev = get_device(&pdev->dev); 2085 dev->dev = get_device(&pdev->dev);
2062 pci_set_drvdata(pdev, dev); 2086 pci_set_drvdata(pdev, dev);
2063 2087
2088 result = nvme_dev_map(dev);
2089 if (result)
2090 goto free;
2091
2064 INIT_LIST_HEAD(&dev->node); 2092 INIT_LIST_HEAD(&dev->node);
2065 INIT_WORK(&dev->scan_work, nvme_dev_scan); 2093 INIT_WORK(&dev->scan_work, nvme_dev_scan);
2066 INIT_WORK(&dev->reset_work, nvme_reset_work); 2094 INIT_WORK(&dev->reset_work, nvme_reset_work);
@@ -2084,6 +2112,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2084 nvme_release_prp_pools(dev); 2112 nvme_release_prp_pools(dev);
2085 put_pci: 2113 put_pci:
2086 put_device(dev->dev); 2114 put_device(dev->dev);
2115 nvme_dev_unmap(dev);
2087 free: 2116 free:
2088 kfree(dev->queues); 2117 kfree(dev->queues);
2089 kfree(dev->entry); 2118 kfree(dev->entry);
@@ -2107,24 +2136,27 @@ static void nvme_shutdown(struct pci_dev *pdev)
2107 nvme_dev_disable(dev, true); 2136 nvme_dev_disable(dev, true);
2108} 2137}
2109 2138
2139/*
2140 * The driver's remove may be called on a device in a partially initialized
2141 * state. This function must not have any dependencies on the device state in
2142 * order to proceed.
2143 */
2110static void nvme_remove(struct pci_dev *pdev) 2144static void nvme_remove(struct pci_dev *pdev)
2111{ 2145{
2112 struct nvme_dev *dev = pci_get_drvdata(pdev); 2146 struct nvme_dev *dev = pci_get_drvdata(pdev);
2113 2147
2114 spin_lock(&dev_list_lock); 2148 set_bit(NVME_CTRL_REMOVING, &dev->flags);
2115 list_del_init(&dev->node);
2116 spin_unlock(&dev_list_lock);
2117
2118 pci_set_drvdata(pdev, NULL); 2149 pci_set_drvdata(pdev, NULL);
2119 flush_work(&dev->reset_work);
2120 flush_work(&dev->scan_work); 2150 flush_work(&dev->scan_work);
2121 nvme_remove_namespaces(&dev->ctrl); 2151 nvme_remove_namespaces(&dev->ctrl);
2122 nvme_uninit_ctrl(&dev->ctrl); 2152 nvme_uninit_ctrl(&dev->ctrl);
2123 nvme_dev_disable(dev, true); 2153 nvme_dev_disable(dev, true);
2154 flush_work(&dev->reset_work);
2124 nvme_dev_remove_admin(dev); 2155 nvme_dev_remove_admin(dev);
2125 nvme_free_queues(dev, 0); 2156 nvme_free_queues(dev, 0);
2126 nvme_release_cmb(dev); 2157 nvme_release_cmb(dev);
2127 nvme_release_prp_pools(dev); 2158 nvme_release_prp_pools(dev);
2159 nvme_dev_unmap(dev);
2128 nvme_put_ctrl(&dev->ctrl); 2160 nvme_put_ctrl(&dev->ctrl);
2129} 2161}
2130 2162
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 6fd4e5a5ef4a..9d11d9837312 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -70,6 +70,9 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
70 if (pos >= nvmem->size) 70 if (pos >= nvmem->size)
71 return 0; 71 return 0;
72 72
73 if (count < nvmem->word_size)
74 return -EINVAL;
75
73 if (pos + count > nvmem->size) 76 if (pos + count > nvmem->size)
74 count = nvmem->size - pos; 77 count = nvmem->size - pos;
75 78
@@ -95,6 +98,9 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
95 if (pos >= nvmem->size) 98 if (pos >= nvmem->size)
96 return 0; 99 return 0;
97 100
101 if (count < nvmem->word_size)
102 return -EINVAL;
103
98 if (pos + count > nvmem->size) 104 if (pos + count > nvmem->size)
99 count = nvmem->size - pos; 105 count = nvmem->size - pos;
100 106
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index afb67e7eeee4..3829e5fbf8c3 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -21,6 +21,7 @@ static struct regmap_config qfprom_regmap_config = {
21 .reg_bits = 32, 21 .reg_bits = 32,
22 .val_bits = 8, 22 .val_bits = 8,
23 .reg_stride = 1, 23 .reg_stride = 1,
24 .val_format_endian = REGMAP_ENDIAN_LITTLE,
24}; 25};
25 26
26static struct nvmem_config econfig = { 27static struct nvmem_config econfig = {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 706e3ff67f8b..e7bfc175b8e1 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -635,6 +635,13 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
635 msi_base = be32_to_cpup(msi_map + 2); 635 msi_base = be32_to_cpup(msi_map + 2);
636 rid_len = be32_to_cpup(msi_map + 3); 636 rid_len = be32_to_cpup(msi_map + 3);
637 637
638 if (rid_base & ~map_mask) {
639 dev_err(parent_dev,
640 "Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n",
641 map_mask, rid_base);
642 return rid_out;
643 }
644
638 msi_controller_node = of_find_node_by_phandle(phandle); 645 msi_controller_node = of_find_node_by_phandle(phandle);
639 646
640 matched = (masked_rid >= rid_base && 647 matched = (masked_rid >= rid_base &&
@@ -654,7 +661,7 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
654 if (!matched) 661 if (!matched)
655 return rid_out; 662 return rid_out;
656 663
657 rid_out = masked_rid + msi_base; 664 rid_out = masked_rid - rid_base + msi_base;
658 dev_dbg(dev, 665 dev_dbg(dev,
659 "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n", 666 "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
660 dev_name(parent_dev), map_mask, rid_base, msi_base, 667 dev_name(parent_dev), map_mask, rid_base, msi_base,
@@ -679,18 +686,6 @@ u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in)
679 return __of_msi_map_rid(dev, &msi_np, rid_in); 686 return __of_msi_map_rid(dev, &msi_np, rid_in);
680} 687}
681 688
682static struct irq_domain *__of_get_msi_domain(struct device_node *np,
683 enum irq_domain_bus_token token)
684{
685 struct irq_domain *d;
686
687 d = irq_find_matching_host(np, token);
688 if (!d)
689 d = irq_find_host(np);
690
691 return d;
692}
693
694/** 689/**
695 * of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain 690 * of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain
696 * @dev: device for which the mapping is to be done. 691 * @dev: device for which the mapping is to be done.
@@ -706,7 +701,7 @@ struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 rid)
706 struct device_node *np = NULL; 701 struct device_node *np = NULL;
707 702
708 __of_msi_map_rid(dev, &np, rid); 703 __of_msi_map_rid(dev, &np, rid);
709 return __of_get_msi_domain(np, DOMAIN_BUS_PCI_MSI); 704 return irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI);
710} 705}
711 706
712/** 707/**
@@ -730,7 +725,7 @@ struct irq_domain *of_msi_get_domain(struct device *dev,
730 /* Check for a single msi-parent property */ 725 /* Check for a single msi-parent property */
731 msi_np = of_parse_phandle(np, "msi-parent", 0); 726 msi_np = of_parse_phandle(np, "msi-parent", 0);
732 if (msi_np && !of_property_read_bool(msi_np, "#msi-cells")) { 727 if (msi_np && !of_property_read_bool(msi_np, "#msi-cells")) {
733 d = __of_get_msi_domain(msi_np, token); 728 d = irq_find_matching_host(msi_np, token);
734 if (!d) 729 if (!d)
735 of_node_put(msi_np); 730 of_node_put(msi_np);
736 return d; 731 return d;
@@ -744,7 +739,7 @@ struct irq_domain *of_msi_get_domain(struct device *dev,
744 while (!of_parse_phandle_with_args(np, "msi-parent", 739 while (!of_parse_phandle_with_args(np, "msi-parent",
745 "#msi-cells", 740 "#msi-cells",
746 index, &args)) { 741 index, &args)) {
747 d = __of_get_msi_domain(args.np, token); 742 d = irq_find_matching_host(args.np, token);
748 if (d) 743 if (d)
749 return d; 744 return d;
750 745
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 86829f8064a6..365dc7e83ab4 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -143,11 +143,32 @@ int of_mdio_parse_addr(struct device *dev, const struct device_node *np)
143} 143}
144EXPORT_SYMBOL(of_mdio_parse_addr); 144EXPORT_SYMBOL(of_mdio_parse_addr);
145 145
146/* The following is a list of PHY compatible strings which appear in
147 * some DTBs. The compatible string is never matched against a PHY
148 * driver, so is pointless. We only expect devices which are not PHYs
149 * to have a compatible string, so they can be matched to an MDIO
150 * driver. Encourage users to upgrade their DT blobs to remove these.
151 */
152static const struct of_device_id whitelist_phys[] = {
153 { .compatible = "brcm,40nm-ephy" },
154 { .compatible = "marvell,88E1111", },
155 { .compatible = "marvell,88e1116", },
156 { .compatible = "marvell,88e1118", },
157 { .compatible = "marvell,88e1145", },
158 { .compatible = "marvell,88e1149r", },
159 { .compatible = "marvell,88e1310", },
160 { .compatible = "marvell,88E1510", },
161 { .compatible = "marvell,88E1514", },
162 { .compatible = "moxa,moxart-rtl8201cp", },
163 {}
164};
165
146/* 166/*
147 * Return true if the child node is for a phy. It must either: 167 * Return true if the child node is for a phy. It must either:
148 * o Compatible string of "ethernet-phy-idX.X" 168 * o Compatible string of "ethernet-phy-idX.X"
149 * o Compatible string of "ethernet-phy-ieee802.3-c45" 169 * o Compatible string of "ethernet-phy-ieee802.3-c45"
150 * o Compatible string of "ethernet-phy-ieee802.3-c22" 170 * o Compatible string of "ethernet-phy-ieee802.3-c22"
171 * o In the white list above (and issue a warning)
151 * o No compatibility string 172 * o No compatibility string
152 * 173 *
153 * A device which is not a phy is expected to have a compatible string 174 * A device which is not a phy is expected to have a compatible string
@@ -166,6 +187,13 @@ static bool of_mdiobus_child_is_phy(struct device_node *child)
166 if (of_device_is_compatible(child, "ethernet-phy-ieee802.3-c22")) 187 if (of_device_is_compatible(child, "ethernet-phy-ieee802.3-c22"))
167 return true; 188 return true;
168 189
190 if (of_match_node(whitelist_phys, child)) {
191 pr_warn(FW_WARN
192 "%s: Whitelisted compatible string. Please remove\n",
193 child->full_name);
194 return true;
195 }
196
169 if (!of_find_property(child, "compatible", NULL)) 197 if (!of_find_property(child, "compatible", NULL))
170 return true; 198 return true;
171 199
@@ -256,11 +284,19 @@ static int of_phy_match(struct device *dev, void *phy_np)
256struct phy_device *of_phy_find_device(struct device_node *phy_np) 284struct phy_device *of_phy_find_device(struct device_node *phy_np)
257{ 285{
258 struct device *d; 286 struct device *d;
287 struct mdio_device *mdiodev;
288
259 if (!phy_np) 289 if (!phy_np)
260 return NULL; 290 return NULL;
261 291
262 d = bus_find_device(&mdio_bus_type, NULL, phy_np, of_phy_match); 292 d = bus_find_device(&mdio_bus_type, NULL, phy_np, of_phy_match);
263 return d ? to_phy_device(d) : NULL; 293 if (d) {
294 mdiodev = to_mdio_device(d);
295 if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
296 return to_phy_device(d);
297 }
298
299 return NULL;
264} 300}
265EXPORT_SYMBOL(of_phy_find_device); 301EXPORT_SYMBOL(of_phy_find_device);
266 302
@@ -269,6 +305,7 @@ EXPORT_SYMBOL(of_phy_find_device);
269 * @dev: pointer to net_device claiming the phy 305 * @dev: pointer to net_device claiming the phy
270 * @phy_np: Pointer to device tree node for the PHY 306 * @phy_np: Pointer to device tree node for the PHY
271 * @hndlr: Link state callback for the network device 307 * @hndlr: Link state callback for the network device
308 * @flags: flags to pass to the PHY
272 * @iface: PHY data interface type 309 * @iface: PHY data interface type
273 * 310 *
274 * If successful, returns a pointer to the phy_device with the embedded 311 * If successful, returns a pointer to the phy_device with the embedded
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 75a605426538..d1cdd9c992ac 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -14,6 +14,7 @@ config PCI_DRA7XX
14config PCI_MVEBU 14config PCI_MVEBU
15 bool "Marvell EBU PCIe controller" 15 bool "Marvell EBU PCIe controller"
16 depends on ARCH_MVEBU || ARCH_DOVE 16 depends on ARCH_MVEBU || ARCH_DOVE
17 depends on ARM
17 depends on OF 18 depends on OF
18 19
19config PCIE_DW 20config PCIE_DW
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
index ed34c9520a02..6153853ca9c3 100644
--- a/drivers/pci/host/pci-keystone-dw.c
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -58,11 +58,6 @@
58 58
59#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp) 59#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
60 60
61static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
62{
63 return sys->private_data;
64}
65
66static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, 61static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
67 u32 *bit_pos) 62 u32 *bit_pos)
68{ 63{
@@ -108,7 +103,7 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
108 struct pcie_port *pp; 103 struct pcie_port *pp;
109 104
110 msi = irq_data_get_msi_desc(d); 105 msi = irq_data_get_msi_desc(d);
111 pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); 106 pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
112 ks_pcie = to_keystone_pcie(pp); 107 ks_pcie = to_keystone_pcie(pp);
113 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); 108 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
114 update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos); 109 update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
@@ -146,7 +141,7 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
146 u32 offset; 141 u32 offset;
147 142
148 msi = irq_data_get_msi_desc(d); 143 msi = irq_data_get_msi_desc(d);
149 pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); 144 pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
150 ks_pcie = to_keystone_pcie(pp); 145 ks_pcie = to_keystone_pcie(pp);
151 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); 146 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
152 147
@@ -167,7 +162,7 @@ static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
167 u32 offset; 162 u32 offset;
168 163
169 msi = irq_data_get_msi_desc(d); 164 msi = irq_data_get_msi_desc(d);
170 pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); 165 pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
171 ks_pcie = to_keystone_pcie(pp); 166 ks_pcie = to_keystone_pcie(pp);
172 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); 167 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
173 168
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 3923bed93c7e..f39961bcf7aa 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -77,6 +77,16 @@ static void ls_pcie_fix_class(struct ls_pcie *pcie)
77 iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE); 77 iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
78} 78}
79 79
80/* Drop MSG TLP except for Vendor MSG */
81static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
82{
83 u32 val;
84
85 val = ioread32(pcie->dbi + PCIE_STRFMR1);
86 val &= 0xDFFFFFFF;
87 iowrite32(val, pcie->dbi + PCIE_STRFMR1);
88}
89
80static int ls1021_pcie_link_up(struct pcie_port *pp) 90static int ls1021_pcie_link_up(struct pcie_port *pp)
81{ 91{
82 u32 state; 92 u32 state;
@@ -97,7 +107,7 @@ static int ls1021_pcie_link_up(struct pcie_port *pp)
97static void ls1021_pcie_host_init(struct pcie_port *pp) 107static void ls1021_pcie_host_init(struct pcie_port *pp)
98{ 108{
99 struct ls_pcie *pcie = to_ls_pcie(pp); 109 struct ls_pcie *pcie = to_ls_pcie(pp);
100 u32 val, index[2]; 110 u32 index[2];
101 111
102 pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node, 112 pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node,
103 "fsl,pcie-scfg"); 113 "fsl,pcie-scfg");
@@ -116,13 +126,7 @@ static void ls1021_pcie_host_init(struct pcie_port *pp)
116 126
117 dw_pcie_setup_rc(pp); 127 dw_pcie_setup_rc(pp);
118 128
119 /* 129 ls_pcie_drop_msg_tlp(pcie);
120 * LS1021A Workaround for internal TKT228622
121 * to fix the INTx hang issue
122 */
123 val = ioread32(pcie->dbi + PCIE_STRFMR1);
124 val &= 0xffff;
125 iowrite32(val, pcie->dbi + PCIE_STRFMR1);
126} 130}
127 131
128static int ls_pcie_link_up(struct pcie_port *pp) 132static int ls_pcie_link_up(struct pcie_port *pp)
@@ -147,6 +151,7 @@ static void ls_pcie_host_init(struct pcie_port *pp)
147 iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN); 151 iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN);
148 ls_pcie_fix_class(pcie); 152 ls_pcie_fix_class(pcie);
149 ls_pcie_clear_multifunction(pcie); 153 ls_pcie_clear_multifunction(pcie);
154 ls_pcie_drop_msg_tlp(pcie);
150 iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN); 155 iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN);
151} 156}
152 157
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index 5816bceddb65..a576aeeb22da 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -64,7 +64,6 @@
64#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT) 64#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT)
65 65
66#define MAX_NUM_OB_WINDOWS 2 66#define MAX_NUM_OB_WINDOWS 2
67#define MAX_NUM_PAXC_PF 4
68 67
69#define IPROC_PCIE_REG_INVALID 0xffff 68#define IPROC_PCIE_REG_INVALID 0xffff
70 69
@@ -170,20 +169,6 @@ static inline void iproc_pcie_ob_write(struct iproc_pcie *pcie,
170 writel(val, pcie->base + offset + (window * 8)); 169 writel(val, pcie->base + offset + (window * 8));
171} 170}
172 171
173static inline bool iproc_pcie_device_is_valid(struct iproc_pcie *pcie,
174 unsigned int slot,
175 unsigned int fn)
176{
177 if (slot > 0)
178 return false;
179
180 /* PAXC can only support limited number of functions */
181 if (pcie->type == IPROC_PCIE_PAXC && fn >= MAX_NUM_PAXC_PF)
182 return false;
183
184 return true;
185}
186
187/** 172/**
188 * Note access to the configuration registers are protected at the higher layer 173 * Note access to the configuration registers are protected at the higher layer
189 * by 'pci_lock' in drivers/pci/access.c 174 * by 'pci_lock' in drivers/pci/access.c
@@ -199,11 +184,11 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
199 u32 val; 184 u32 val;
200 u16 offset; 185 u16 offset;
201 186
202 if (!iproc_pcie_device_is_valid(pcie, slot, fn))
203 return NULL;
204
205 /* root complex access */ 187 /* root complex access */
206 if (busno == 0) { 188 if (busno == 0) {
189 if (slot > 0 || fn > 0)
190 return NULL;
191
207 iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR, 192 iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR,
208 where & CFG_IND_ADDR_MASK); 193 where & CFG_IND_ADDR_MASK);
209 offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA); 194 offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA);
@@ -213,6 +198,14 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
213 return (pcie->base + offset); 198 return (pcie->base + offset);
214 } 199 }
215 200
201 /*
202 * PAXC is connected to an internally emulated EP within the SoC. It
203 * allows only one device.
204 */
205 if (pcie->type == IPROC_PCIE_PAXC)
206 if (slot > 0)
207 return NULL;
208
216 /* EP device access */ 209 /* EP device access */
217 val = (busno << CFG_ADDR_BUS_NUM_SHIFT) | 210 val = (busno << CFG_ADDR_BUS_NUM_SHIFT) |
218 (slot << CFG_ADDR_DEV_NUM_SHIFT) | 211 (slot << CFG_ADDR_DEV_NUM_SHIFT) |
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 5f2fda12e006..fa49f9143b80 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -953,8 +953,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
953{ 953{
954 pci_lock_rescan_remove(); 954 pci_lock_rescan_remove();
955 955
956 if (slot->flags & SLOT_IS_GOING_AWAY) 956 if (slot->flags & SLOT_IS_GOING_AWAY) {
957 pci_unlock_rescan_remove();
957 return -ENODEV; 958 return -ENODEV;
959 }
958 960
959 /* configure all functions */ 961 /* configure all functions */
960 if (!(slot->flags & SLOT_ENABLED)) 962 if (!(slot->flags & SLOT_ENABLED))
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 602eb4223510..f89db3af0607 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -4772,8 +4772,10 @@ int pci_get_new_domain_nr(void)
4772void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent) 4772void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
4773{ 4773{
4774 static int use_dt_domains = -1; 4774 static int use_dt_domains = -1;
4775 int domain = of_get_pci_domain_nr(parent->of_node); 4775 int domain = -1;
4776 4776
4777 if (parent)
4778 domain = of_get_pci_domain_nr(parent->of_node);
4777 /* 4779 /*
4778 * Check DT domain and use_dt_domains values. 4780 * Check DT domain and use_dt_domains values.
4779 * 4781 *
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 0bf82a20a0fb..48d21e0edd56 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
262 rpc->rpd = dev; 262 rpc->rpd = dev;
263 INIT_WORK(&rpc->dpc_handler, aer_isr); 263 INIT_WORK(&rpc->dpc_handler, aer_isr);
264 mutex_init(&rpc->rpc_mutex); 264 mutex_init(&rpc->rpc_mutex);
265 init_waitqueue_head(&rpc->wait_release);
266 265
267 /* Use PCIe bus function to store rpc into PCIe device */ 266 /* Use PCIe bus function to store rpc into PCIe device */
268 set_service_data(dev, rpc); 267 set_service_data(dev, rpc);
@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
285 if (rpc->isr) 284 if (rpc->isr)
286 free_irq(dev->irq, dev); 285 free_irq(dev->irq, dev);
287 286
288 wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx); 287 flush_work(&rpc->dpc_handler);
289
290 aer_disable_rootport(rpc); 288 aer_disable_rootport(rpc);
291 kfree(rpc); 289 kfree(rpc);
292 set_service_data(dev, NULL); 290 set_service_data(dev, NULL);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 84420b7c9456..945c939a86c5 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -72,7 +72,6 @@ struct aer_rpc {
72 * recovery on the same 72 * recovery on the same
73 * root port hierarchy 73 * root port hierarchy
74 */ 74 */
75 wait_queue_head_t wait_release;
76}; 75};
77 76
78struct aer_broadcast_data { 77struct aer_broadcast_data {
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 712392504ed9..521e39c1b66d 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -811,8 +811,6 @@ void aer_isr(struct work_struct *work)
811 while (get_e_source(rpc, &e_src)) 811 while (get_e_source(rpc, &e_src))
812 aer_isr_one_error(p_device, &e_src); 812 aer_isr_one_error(p_device, &e_src);
813 mutex_unlock(&rpc->rpc_mutex); 813 mutex_unlock(&rpc->rpc_mutex);
814
815 wake_up(&rpc->wait_release);
816} 814}
817 815
818/** 816/**
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index c777b97207d5..5f70fee59a94 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -53,7 +53,7 @@ struct pcifront_device {
53}; 53};
54 54
55struct pcifront_sd { 55struct pcifront_sd {
56 int domain; 56 struct pci_sysdata sd;
57 struct pcifront_device *pdev; 57 struct pcifront_device *pdev;
58}; 58};
59 59
@@ -67,7 +67,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
67 unsigned int domain, unsigned int bus, 67 unsigned int domain, unsigned int bus,
68 struct pcifront_device *pdev) 68 struct pcifront_device *pdev)
69{ 69{
70 sd->domain = domain; 70 /* Because we do not expose that information via XenBus. */
71 sd->sd.node = first_online_node;
72 sd->sd.domain = domain;
71 sd->pdev = pdev; 73 sd->pdev = pdev;
72} 74}
73 75
@@ -468,8 +470,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
468 dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n", 470 dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
469 domain, bus); 471 domain, bus);
470 472
471 bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL); 473 bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
472 sd = kmalloc(sizeof(*sd), GFP_KERNEL); 474 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
473 if (!bus_entry || !sd) { 475 if (!bus_entry || !sd) {
474 err = -ENOMEM; 476 err = -ENOMEM;
475 goto err_out; 477 goto err_out;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index e7e117d5dbbe..0124d17bd9fe 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -224,6 +224,7 @@ config PHY_MT65XX_USB3
224 224
225config PHY_HI6220_USB 225config PHY_HI6220_USB
226 tristate "hi6220 USB PHY support" 226 tristate "hi6220 USB PHY support"
227 depends on (ARCH_HISI && ARM64) || COMPILE_TEST
227 select GENERIC_PHY 228 select GENERIC_PHY
228 select MFD_SYSCON 229 select MFD_SYSCON
229 help 230 help
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 8c7f27db6ad3..e7e574dc667a 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -275,20 +275,21 @@ EXPORT_SYMBOL_GPL(phy_exit);
275 275
276int phy_power_on(struct phy *phy) 276int phy_power_on(struct phy *phy)
277{ 277{
278 int ret; 278 int ret = 0;
279 279
280 if (!phy) 280 if (!phy)
281 return 0; 281 goto out;
282 282
283 if (phy->pwr) { 283 if (phy->pwr) {
284 ret = regulator_enable(phy->pwr); 284 ret = regulator_enable(phy->pwr);
285 if (ret) 285 if (ret)
286 return ret; 286 goto out;
287 } 287 }
288 288
289 ret = phy_pm_runtime_get_sync(phy); 289 ret = phy_pm_runtime_get_sync(phy);
290 if (ret < 0 && ret != -ENOTSUPP) 290 if (ret < 0 && ret != -ENOTSUPP)
291 return ret; 291 goto err_pm_sync;
292
292 ret = 0; /* Override possible ret == -ENOTSUPP */ 293 ret = 0; /* Override possible ret == -ENOTSUPP */
293 294
294 mutex_lock(&phy->mutex); 295 mutex_lock(&phy->mutex);
@@ -296,19 +297,20 @@ int phy_power_on(struct phy *phy)
296 ret = phy->ops->power_on(phy); 297 ret = phy->ops->power_on(phy);
297 if (ret < 0) { 298 if (ret < 0) {
298 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); 299 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
299 goto out; 300 goto err_pwr_on;
300 } 301 }
301 } 302 }
302 ++phy->power_count; 303 ++phy->power_count;
303 mutex_unlock(&phy->mutex); 304 mutex_unlock(&phy->mutex);
304 return 0; 305 return 0;
305 306
306out: 307err_pwr_on:
307 mutex_unlock(&phy->mutex); 308 mutex_unlock(&phy->mutex);
308 phy_pm_runtime_put_sync(phy); 309 phy_pm_runtime_put_sync(phy);
310err_pm_sync:
309 if (phy->pwr) 311 if (phy->pwr)
310 regulator_disable(phy->pwr); 312 regulator_disable(phy->pwr);
311 313out:
312 return ret; 314 return ret;
313} 315}
314EXPORT_SYMBOL_GPL(phy_power_on); 316EXPORT_SYMBOL_GPL(phy_power_on);
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 4a3fc6e59f8e..840f3eae428b 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -715,6 +715,7 @@ static int twl4030_usb_probe(struct platform_device *pdev)
715 pm_runtime_use_autosuspend(&pdev->dev); 715 pm_runtime_use_autosuspend(&pdev->dev);
716 pm_runtime_set_autosuspend_delay(&pdev->dev, 2000); 716 pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
717 pm_runtime_enable(&pdev->dev); 717 pm_runtime_enable(&pdev->dev);
718 pm_runtime_get_sync(&pdev->dev);
718 719
719 /* Our job is to use irqs and status from the power module 720 /* Our job is to use irqs and status from the power module
720 * to keep the transceiver disabled when nothing's connected. 721 * to keep the transceiver disabled when nothing's connected.
@@ -750,6 +751,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
750 struct twl4030_usb *twl = platform_get_drvdata(pdev); 751 struct twl4030_usb *twl = platform_get_drvdata(pdev);
751 int val; 752 int val;
752 753
754 usb_remove_phy(&twl->phy);
753 pm_runtime_get_sync(twl->dev); 755 pm_runtime_get_sync(twl->dev);
754 cancel_delayed_work(&twl->id_workaround_work); 756 cancel_delayed_work(&twl->id_workaround_work);
755 device_remove_file(twl->dev, &dev_attr_vbus); 757 device_remove_file(twl->dev, &dev_attr_vbus);
@@ -757,6 +759,13 @@ static int twl4030_usb_remove(struct platform_device *pdev)
757 /* set transceiver mode to power on defaults */ 759 /* set transceiver mode to power on defaults */
758 twl4030_usb_set_mode(twl, -1); 760 twl4030_usb_set_mode(twl, -1);
759 761
762 /* idle ulpi before powering off */
763 if (cable_present(twl->linkstat))
764 pm_runtime_put_noidle(twl->dev);
765 pm_runtime_mark_last_busy(twl->dev);
766 pm_runtime_put_sync_suspend(twl->dev);
767 pm_runtime_disable(twl->dev);
768
760 /* autogate 60MHz ULPI clock, 769 /* autogate 60MHz ULPI clock,
761 * clear dpll clock request for i2c access, 770 * clear dpll clock request for i2c access,
762 * disable 32KHz 771 * disable 32KHz
@@ -771,11 +780,6 @@ static int twl4030_usb_remove(struct platform_device *pdev)
771 /* disable complete OTG block */ 780 /* disable complete OTG block */
772 twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB); 781 twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
773 782
774 if (cable_present(twl->linkstat))
775 pm_runtime_put_noidle(twl->dev);
776 pm_runtime_mark_last_busy(twl->dev);
777 pm_runtime_put(twl->dev);
778
779 return 0; 783 return 0;
780} 784}
781 785
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 16d48a4ed225..e96e86d2e745 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -347,6 +347,7 @@ static int mtk_pconf_parse_conf(struct pinctrl_dev *pctldev,
347 ret = mtk_pconf_set_pull_select(pctl, pin, true, false, arg); 347 ret = mtk_pconf_set_pull_select(pctl, pin, true, false, arg);
348 break; 348 break;
349 case PIN_CONFIG_INPUT_ENABLE: 349 case PIN_CONFIG_INPUT_ENABLE:
350 mtk_pmx_gpio_set_direction(pctldev, NULL, pin, true);
350 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param); 351 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param);
351 break; 352 break;
352 case PIN_CONFIG_OUTPUT: 353 case PIN_CONFIG_OUTPUT:
@@ -354,6 +355,7 @@ static int mtk_pconf_parse_conf(struct pinctrl_dev *pctldev,
354 ret = mtk_pmx_gpio_set_direction(pctldev, NULL, pin, false); 355 ret = mtk_pmx_gpio_set_direction(pctldev, NULL, pin, false);
355 break; 356 break;
356 case PIN_CONFIG_INPUT_SCHMITT_ENABLE: 357 case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
358 mtk_pmx_gpio_set_direction(pctldev, NULL, pin, true);
357 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param); 359 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param);
358 break; 360 break;
359 case PIN_CONFIG_DRIVE_STRENGTH: 361 case PIN_CONFIG_DRIVE_STRENGTH:
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index e4d473811bb3..3ef798fac81b 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -666,16 +666,19 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
666 struct mvebu_mpp_ctrl_setting *set = &mode->settings[0]; 666 struct mvebu_mpp_ctrl_setting *set = &mode->settings[0];
667 struct mvebu_pinctrl_group *grp; 667 struct mvebu_pinctrl_group *grp;
668 unsigned num_settings; 668 unsigned num_settings;
669 unsigned supp_settings;
669 670
670 for (num_settings = 0; ; set++) { 671 for (num_settings = 0, supp_settings = 0; ; set++) {
671 if (!set->name) 672 if (!set->name)
672 break; 673 break;
673 674
675 num_settings++;
676
674 /* skip unsupported settings for this variant */ 677 /* skip unsupported settings for this variant */
675 if (pctl->variant && !(pctl->variant & set->variant)) 678 if (pctl->variant && !(pctl->variant & set->variant))
676 continue; 679 continue;
677 680
678 num_settings++; 681 supp_settings++;
679 682
680 /* find gpio/gpo/gpi settings */ 683 /* find gpio/gpo/gpi settings */
681 if (strcmp(set->name, "gpio") == 0) 684 if (strcmp(set->name, "gpio") == 0)
@@ -688,7 +691,7 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
688 } 691 }
689 692
690 /* skip modes with no settings for this variant */ 693 /* skip modes with no settings for this variant */
691 if (!num_settings) 694 if (!supp_settings)
692 continue; 695 continue;
693 696
694 grp = mvebu_pinctrl_find_group_by_pid(pctl, mode->pid); 697 grp = mvebu_pinctrl_find_group_by_pid(pctl, mode->pid);
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 085e60106ec2..1f7469c9857d 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -191,6 +191,7 @@ static void abx500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
191 dev_err(pct->dev, "%s write failed (%d)\n", __func__, ret); 191 dev_err(pct->dev, "%s write failed (%d)\n", __func__, ret);
192} 192}
193 193
194#ifdef CONFIG_DEBUG_FS
194static int abx500_get_pull_updown(struct abx500_pinctrl *pct, int offset, 195static int abx500_get_pull_updown(struct abx500_pinctrl *pct, int offset,
195 enum abx500_gpio_pull_updown *pull_updown) 196 enum abx500_gpio_pull_updown *pull_updown)
196{ 197{
@@ -226,6 +227,7 @@ out:
226 227
227 return ret; 228 return ret;
228} 229}
230#endif
229 231
230static int abx500_set_pull_updown(struct abx500_pinctrl *pct, 232static int abx500_set_pull_updown(struct abx500_pinctrl *pct,
231 int offset, enum abx500_gpio_pull_updown val) 233 int offset, enum abx500_gpio_pull_updown val)
@@ -468,6 +470,7 @@ out:
468 return ret; 470 return ret;
469} 471}
470 472
473#ifdef CONFIG_DEBUG_FS
471static int abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip, 474static int abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
472 unsigned gpio) 475 unsigned gpio)
473{ 476{
@@ -553,8 +556,6 @@ out:
553 return ret; 556 return ret;
554} 557}
555 558
556#ifdef CONFIG_DEBUG_FS
557
558#include <linux/seq_file.h> 559#include <linux/seq_file.h>
559 560
560static void abx500_gpio_dbg_show_one(struct seq_file *s, 561static void abx500_gpio_dbg_show_one(struct seq_file *s,
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
index d90e205cf809..216f227c6009 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
@@ -426,6 +426,7 @@ int pxa2xx_pinctrl_init(struct platform_device *pdev,
426 426
427 return 0; 427 return 0;
428} 428}
429EXPORT_SYMBOL(pxa2xx_pinctrl_init);
429 430
430int pxa2xx_pinctrl_exit(struct platform_device *pdev) 431int pxa2xx_pinctrl_exit(struct platform_device *pdev)
431{ 432{
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index f67b1e958589..5cc97f85db02 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -514,25 +514,35 @@ static const struct pinconf_ops samsung_pinconf_ops = {
514 .pin_config_group_set = samsung_pinconf_group_set, 514 .pin_config_group_set = samsung_pinconf_group_set,
515}; 515};
516 516
517/* gpiolib gpio_set callback function */ 517/*
518static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value) 518 * The samsung_gpio_set_vlaue() should be called with "bank->slock" held
519 * to avoid race condition.
520 */
521static void samsung_gpio_set_value(struct gpio_chip *gc,
522 unsigned offset, int value)
519{ 523{
520 struct samsung_pin_bank *bank = gpiochip_get_data(gc); 524 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
521 const struct samsung_pin_bank_type *type = bank->type; 525 const struct samsung_pin_bank_type *type = bank->type;
522 unsigned long flags;
523 void __iomem *reg; 526 void __iomem *reg;
524 u32 data; 527 u32 data;
525 528
526 reg = bank->drvdata->virt_base + bank->pctl_offset; 529 reg = bank->drvdata->virt_base + bank->pctl_offset;
527 530
528 spin_lock_irqsave(&bank->slock, flags);
529
530 data = readl(reg + type->reg_offset[PINCFG_TYPE_DAT]); 531 data = readl(reg + type->reg_offset[PINCFG_TYPE_DAT]);
531 data &= ~(1 << offset); 532 data &= ~(1 << offset);
532 if (value) 533 if (value)
533 data |= 1 << offset; 534 data |= 1 << offset;
534 writel(data, reg + type->reg_offset[PINCFG_TYPE_DAT]); 535 writel(data, reg + type->reg_offset[PINCFG_TYPE_DAT]);
536}
537
538/* gpiolib gpio_set callback function */
539static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
540{
541 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
542 unsigned long flags;
535 543
544 spin_lock_irqsave(&bank->slock, flags);
545 samsung_gpio_set_value(gc, offset, value);
536 spin_unlock_irqrestore(&bank->slock, flags); 546 spin_unlock_irqrestore(&bank->slock, flags);
537} 547}
538 548
@@ -553,6 +563,8 @@ static int samsung_gpio_get(struct gpio_chip *gc, unsigned offset)
553} 563}
554 564
555/* 565/*
566 * The samsung_gpio_set_direction() should be called with "bank->slock" held
567 * to avoid race condition.
556 * The calls to gpio_direction_output() and gpio_direction_input() 568 * The calls to gpio_direction_output() and gpio_direction_input()
557 * leads to this function call. 569 * leads to this function call.
558 */ 570 */
@@ -564,7 +576,6 @@ static int samsung_gpio_set_direction(struct gpio_chip *gc,
564 struct samsung_pinctrl_drv_data *drvdata; 576 struct samsung_pinctrl_drv_data *drvdata;
565 void __iomem *reg; 577 void __iomem *reg;
566 u32 data, mask, shift; 578 u32 data, mask, shift;
567 unsigned long flags;
568 579
569 bank = gpiochip_get_data(gc); 580 bank = gpiochip_get_data(gc);
570 type = bank->type; 581 type = bank->type;
@@ -581,31 +592,42 @@ static int samsung_gpio_set_direction(struct gpio_chip *gc,
581 reg += 4; 592 reg += 4;
582 } 593 }
583 594
584 spin_lock_irqsave(&bank->slock, flags);
585
586 data = readl(reg); 595 data = readl(reg);
587 data &= ~(mask << shift); 596 data &= ~(mask << shift);
588 if (!input) 597 if (!input)
589 data |= FUNC_OUTPUT << shift; 598 data |= FUNC_OUTPUT << shift;
590 writel(data, reg); 599 writel(data, reg);
591 600
592 spin_unlock_irqrestore(&bank->slock, flags);
593
594 return 0; 601 return 0;
595} 602}
596 603
597/* gpiolib gpio_direction_input callback function. */ 604/* gpiolib gpio_direction_input callback function. */
598static int samsung_gpio_direction_input(struct gpio_chip *gc, unsigned offset) 605static int samsung_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
599{ 606{
600 return samsung_gpio_set_direction(gc, offset, true); 607 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
608 unsigned long flags;
609 int ret;
610
611 spin_lock_irqsave(&bank->slock, flags);
612 ret = samsung_gpio_set_direction(gc, offset, true);
613 spin_unlock_irqrestore(&bank->slock, flags);
614 return ret;
601} 615}
602 616
603/* gpiolib gpio_direction_output callback function. */ 617/* gpiolib gpio_direction_output callback function. */
604static int samsung_gpio_direction_output(struct gpio_chip *gc, unsigned offset, 618static int samsung_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
605 int value) 619 int value)
606{ 620{
607 samsung_gpio_set(gc, offset, value); 621 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
608 return samsung_gpio_set_direction(gc, offset, false); 622 unsigned long flags;
623 int ret;
624
625 spin_lock_irqsave(&bank->slock, flags);
626 samsung_gpio_set_value(gc, offset, value);
627 ret = samsung_gpio_set_direction(gc, offset, false);
628 spin_unlock_irqrestore(&bank->slock, flags);
629
630 return ret;
609} 631}
610 632
611/* 633/*
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
index 77d4cf047cee..11760bbe9d51 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
@@ -492,6 +492,7 @@ static const struct sunxi_pinctrl_desc sun8i_h3_pinctrl_data = {
492 .pins = sun8i_h3_pins, 492 .pins = sun8i_h3_pins,
493 .npins = ARRAY_SIZE(sun8i_h3_pins), 493 .npins = ARRAY_SIZE(sun8i_h3_pins),
494 .irq_banks = 2, 494 .irq_banks = 2,
495 .irq_read_needs_mux = true
495}; 496};
496 497
497static int sun8i_h3_pinctrl_probe(struct platform_device *pdev) 498static int sun8i_h3_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 20f0ad9bb9f3..e20f23e04c24 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -41,8 +41,7 @@ static const struct key_entry intel_hid_keymap[] = {
41 { KE_KEY, 4, { KEY_HOME } }, 41 { KE_KEY, 4, { KEY_HOME } },
42 { KE_KEY, 5, { KEY_END } }, 42 { KE_KEY, 5, { KEY_END } },
43 { KE_KEY, 6, { KEY_PAGEUP } }, 43 { KE_KEY, 6, { KEY_PAGEUP } },
44 { KE_KEY, 4, { KEY_PAGEDOWN } }, 44 { KE_KEY, 7, { KEY_PAGEDOWN } },
45 { KE_KEY, 4, { KEY_HOME } },
46 { KE_KEY, 8, { KEY_RFKILL } }, 45 { KE_KEY, 8, { KEY_RFKILL } },
47 { KE_KEY, 9, { KEY_POWER } }, 46 { KE_KEY, 9, { KEY_POWER } },
48 { KE_KEY, 11, { KEY_SLEEP } }, 47 { KE_KEY, 11, { KEY_SLEEP } },
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
index 02bc5a6343c3..aa454241489c 100644
--- a/drivers/platform/x86/intel_scu_ipcutil.c
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -49,7 +49,7 @@ struct scu_ipc_data {
49 49
50static int scu_reg_access(u32 cmd, struct scu_ipc_data *data) 50static int scu_reg_access(u32 cmd, struct scu_ipc_data *data)
51{ 51{
52 int count = data->count; 52 unsigned int count = data->count;
53 53
54 if (count == 0 || count == 3 || count > 4) 54 if (count == 0 || count == 3 || count > 4)
55 return -EINVAL; 55 return -EINVAL;
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index f700723ca5d6..d28e3ab9479c 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -342,6 +342,7 @@ static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
342/* Device IDs of parts that have 32KB MCH space */ 342/* Device IDs of parts that have 32KB MCH space */
343static const unsigned int mch_quirk_devices[] = { 343static const unsigned int mch_quirk_devices[] = {
344 0x0154, /* Ivy Bridge */ 344 0x0154, /* Ivy Bridge */
345 0x0a04, /* Haswell-ULT */
345 0x0c00, /* Haswell */ 346 0x0c00, /* Haswell */
346 0x1604, /* Broadwell */ 347 0x1604, /* Broadwell */
347}; 348};
diff --git a/drivers/power/bq27xxx_battery_i2c.c b/drivers/power/bq27xxx_battery_i2c.c
index 9429e66be096..8eafc6f0df88 100644
--- a/drivers/power/bq27xxx_battery_i2c.c
+++ b/drivers/power/bq27xxx_battery_i2c.c
@@ -21,6 +21,9 @@
21 21
22#include <linux/power/bq27xxx_battery.h> 22#include <linux/power/bq27xxx_battery.h>
23 23
24static DEFINE_IDR(battery_id);
25static DEFINE_MUTEX(battery_mutex);
26
24static irqreturn_t bq27xxx_battery_irq_handler_thread(int irq, void *data) 27static irqreturn_t bq27xxx_battery_irq_handler_thread(int irq, void *data)
25{ 28{
26 struct bq27xxx_device_info *di = data; 29 struct bq27xxx_device_info *di = data;
@@ -70,19 +73,33 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
70{ 73{
71 struct bq27xxx_device_info *di; 74 struct bq27xxx_device_info *di;
72 int ret; 75 int ret;
76 char *name;
77 int num;
78
79 /* Get new ID for the new battery device */
80 mutex_lock(&battery_mutex);
81 num = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
82 mutex_unlock(&battery_mutex);
83 if (num < 0)
84 return num;
85
86 name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s-%d", id->name, num);
87 if (!name)
88 goto err_mem;
73 89
74 di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL); 90 di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL);
75 if (!di) 91 if (!di)
76 return -ENOMEM; 92 goto err_mem;
77 93
94 di->id = num;
78 di->dev = &client->dev; 95 di->dev = &client->dev;
79 di->chip = id->driver_data; 96 di->chip = id->driver_data;
80 di->name = id->name; 97 di->name = name;
81 di->bus.read = bq27xxx_battery_i2c_read; 98 di->bus.read = bq27xxx_battery_i2c_read;
82 99
83 ret = bq27xxx_battery_setup(di); 100 ret = bq27xxx_battery_setup(di);
84 if (ret) 101 if (ret)
85 return ret; 102 goto err_failed;
86 103
87 /* Schedule a polling after about 1 min */ 104 /* Schedule a polling after about 1 min */
88 schedule_delayed_work(&di->work, 60 * HZ); 105 schedule_delayed_work(&di->work, 60 * HZ);
@@ -103,6 +120,16 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
103 } 120 }
104 121
105 return 0; 122 return 0;
123
124err_mem:
125 ret = -ENOMEM;
126
127err_failed:
128 mutex_lock(&battery_mutex);
129 idr_remove(&battery_id, num);
130 mutex_unlock(&battery_mutex);
131
132 return ret;
106} 133}
107 134
108static int bq27xxx_battery_i2c_remove(struct i2c_client *client) 135static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
@@ -111,6 +138,10 @@ static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
111 138
112 bq27xxx_battery_teardown(di); 139 bq27xxx_battery_teardown(di);
113 140
141 mutex_lock(&battery_mutex);
142 idr_remove(&battery_id, di->id);
143 mutex_unlock(&battery_mutex);
144
114 return 0; 145 return 0;
115} 146}
116 147
diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c
index 934c139916c6..ee4f183ef9ee 100644
--- a/drivers/ptp/ptp_ixp46x.c
+++ b/drivers/ptp/ptp_ixp46x.c
@@ -178,7 +178,6 @@ static int ptp_ixp_adjtime(struct ptp_clock_info *ptp, s64 delta)
178static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) 178static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
179{ 179{
180 u64 ns; 180 u64 ns;
181 u32 remainder;
182 unsigned long flags; 181 unsigned long flags;
183 struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps); 182 struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
184 struct ixp46x_ts_regs *regs = ixp_clock->regs; 183 struct ixp46x_ts_regs *regs = ixp_clock->regs;
@@ -189,8 +188,7 @@ static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
189 188
190 spin_unlock_irqrestore(&register_lock, flags); 189 spin_unlock_irqrestore(&register_lock, flags);
191 190
192 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); 191 *ts = ns_to_timespec64(ns);
193 ts->tv_nsec = remainder;
194 return 0; 192 return 0;
195} 193}
196 194
@@ -202,8 +200,7 @@ static int ptp_ixp_settime(struct ptp_clock_info *ptp,
202 struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps); 200 struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
203 struct ixp46x_ts_regs *regs = ixp_clock->regs; 201 struct ixp46x_ts_regs *regs = ixp_clock->regs;
204 202
205 ns = ts->tv_sec * 1000000000ULL; 203 ns = timespec64_to_ns(ts);
206 ns += ts->tv_nsec;
207 204
208 spin_lock_irqsave(&register_lock, flags); 205 spin_lock_irqsave(&register_lock, flags);
209 206
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 41605dac8309..c78db05e75b1 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3035,6 +3035,7 @@ static void dasd_setup_queue(struct dasd_block *block)
3035 max = block->base->discipline->max_blocks << block->s2b_shift; 3035 max = block->base->discipline->max_blocks << block->s2b_shift;
3036 } 3036 }
3037 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue); 3037 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue);
3038 block->request_queue->limits.max_dev_sectors = max;
3038 blk_queue_logical_block_size(block->request_queue, 3039 blk_queue_logical_block_size(block->request_queue,
3039 block->bp_block); 3040 block->bp_block);
3040 blk_queue_max_hw_sectors(block->request_queue, max); 3041 blk_queue_max_hw_sectors(block->request_queue, max);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 184b1dbeb554..286782c60da4 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
264 spin_unlock_irqrestore(&lcu->lock, flags); 264 spin_unlock_irqrestore(&lcu->lock, flags);
265 cancel_work_sync(&lcu->suc_data.worker); 265 cancel_work_sync(&lcu->suc_data.worker);
266 spin_lock_irqsave(&lcu->lock, flags); 266 spin_lock_irqsave(&lcu->lock, flags);
267 if (device == lcu->suc_data.device) 267 if (device == lcu->suc_data.device) {
268 dasd_put_device(device);
268 lcu->suc_data.device = NULL; 269 lcu->suc_data.device = NULL;
270 }
269 } 271 }
270 was_pending = 0; 272 was_pending = 0;
271 if (device == lcu->ruac_data.device) { 273 if (device == lcu->ruac_data.device) {
@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
273 was_pending = 1; 275 was_pending = 1;
274 cancel_delayed_work_sync(&lcu->ruac_data.dwork); 276 cancel_delayed_work_sync(&lcu->ruac_data.dwork);
275 spin_lock_irqsave(&lcu->lock, flags); 277 spin_lock_irqsave(&lcu->lock, flags);
276 if (device == lcu->ruac_data.device) 278 if (device == lcu->ruac_data.device) {
279 dasd_put_device(device);
277 lcu->ruac_data.device = NULL; 280 lcu->ruac_data.device = NULL;
281 }
278 } 282 }
279 private->lcu = NULL; 283 private->lcu = NULL;
280 spin_unlock_irqrestore(&lcu->lock, flags); 284 spin_unlock_irqrestore(&lcu->lock, flags);
@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
549 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) { 553 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
550 DBF_DEV_EVENT(DBF_WARNING, device, "could not update" 554 DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
551 " alias data in lcu (rc = %d), retry later", rc); 555 " alias data in lcu (rc = %d), retry later", rc);
552 schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ); 556 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
557 dasd_put_device(device);
553 } else { 558 } else {
559 dasd_put_device(device);
554 lcu->ruac_data.device = NULL; 560 lcu->ruac_data.device = NULL;
555 lcu->flags &= ~UPDATE_PENDING; 561 lcu->flags &= ~UPDATE_PENDING;
556 } 562 }
@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
593 */ 599 */
594 if (!usedev) 600 if (!usedev)
595 return -EINVAL; 601 return -EINVAL;
602 dasd_get_device(usedev);
596 lcu->ruac_data.device = usedev; 603 lcu->ruac_data.device = usedev;
597 schedule_delayed_work(&lcu->ruac_data.dwork, 0); 604 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
605 dasd_put_device(usedev);
598 return 0; 606 return 0;
599} 607}
600 608
@@ -723,7 +731,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
723 ASCEBC((char *) &cqr->magic, 4); 731 ASCEBC((char *) &cqr->magic, 4);
724 ccw = cqr->cpaddr; 732 ccw = cqr->cpaddr;
725 ccw->cmd_code = DASD_ECKD_CCW_RSCK; 733 ccw->cmd_code = DASD_ECKD_CCW_RSCK;
726 ccw->flags = 0 ; 734 ccw->flags = CCW_FLAG_SLI;
727 ccw->count = 16; 735 ccw->count = 16;
728 ccw->cda = (__u32)(addr_t) cqr->data; 736 ccw->cda = (__u32)(addr_t) cqr->data;
729 ((char *)cqr->data)[0] = reason; 737 ((char *)cqr->data)[0] = reason;
@@ -930,6 +938,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
930 /* 3. read new alias configuration */ 938 /* 3. read new alias configuration */
931 _schedule_lcu_update(lcu, device); 939 _schedule_lcu_update(lcu, device);
932 lcu->suc_data.device = NULL; 940 lcu->suc_data.device = NULL;
941 dasd_put_device(device);
933 spin_unlock_irqrestore(&lcu->lock, flags); 942 spin_unlock_irqrestore(&lcu->lock, flags);
934} 943}
935 944
@@ -989,6 +998,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
989 } 998 }
990 lcu->suc_data.reason = reason; 999 lcu->suc_data.reason = reason;
991 lcu->suc_data.device = device; 1000 lcu->suc_data.device = device;
1001 dasd_get_device(device);
992 spin_unlock(&lcu->lock); 1002 spin_unlock(&lcu->lock);
993 schedule_work(&lcu->suc_data.worker); 1003 if (!schedule_work(&lcu->suc_data.worker))
1004 dasd_put_device(device);
994}; 1005};
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index cb61f300f8b5..277b5c8c825c 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -67,7 +67,7 @@ static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
67 * and function code cmd. 67 * and function code cmd.
68 * In case of an exception return 3. Otherwise return result of bitwise OR of 68 * In case of an exception return 3. Otherwise return result of bitwise OR of
69 * resulting condition code and DIAG return code. */ 69 * resulting condition code and DIAG return code. */
70static inline int dia250(void *iob, int cmd) 70static inline int __dia250(void *iob, int cmd)
71{ 71{
72 register unsigned long reg2 asm ("2") = (unsigned long) iob; 72 register unsigned long reg2 asm ("2") = (unsigned long) iob;
73 typedef union { 73 typedef union {
@@ -77,7 +77,6 @@ static inline int dia250(void *iob, int cmd)
77 int rc; 77 int rc;
78 78
79 rc = 3; 79 rc = 3;
80 diag_stat_inc(DIAG_STAT_X250);
81 asm volatile( 80 asm volatile(
82 " diag 2,%2,0x250\n" 81 " diag 2,%2,0x250\n"
83 "0: ipm %0\n" 82 "0: ipm %0\n"
@@ -91,6 +90,12 @@ static inline int dia250(void *iob, int cmd)
91 return rc; 90 return rc;
92} 91}
93 92
93static inline int dia250(void *iob, int cmd)
94{
95 diag_stat_inc(DIAG_STAT_X250);
96 return __dia250(iob, cmd);
97}
98
94/* Initialize block I/O to DIAG device using the specified blocksize and 99/* Initialize block I/O to DIAG device using the specified blocksize and
95 * block offset. On success, return zero and set end_block to contain the 100 * block offset. On success, return zero and set end_block to contain the
96 * number of blocks on the device minus the specified offset. Return non-zero 101 * number of blocks on the device minus the specified offset. Return non-zero
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index c692dfebd0ba..50597f9522fe 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -139,11 +139,11 @@ static ssize_t chp_measurement_chars_read(struct file *filp,
139 139
140 device = container_of(kobj, struct device, kobj); 140 device = container_of(kobj, struct device, kobj);
141 chp = to_channelpath(device); 141 chp = to_channelpath(device);
142 if (!chp->cmg_chars) 142 if (chp->cmg == -1)
143 return 0; 143 return 0;
144 144
145 return memory_read_from_buffer(buf, count, &off, 145 return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars,
146 chp->cmg_chars, sizeof(struct cmg_chars)); 146 sizeof(chp->cmg_chars));
147} 147}
148 148
149static struct bin_attribute chp_measurement_chars_attr = { 149static struct bin_attribute chp_measurement_chars_attr = {
@@ -416,7 +416,8 @@ static void chp_release(struct device *dev)
416 * chp_update_desc - update channel-path description 416 * chp_update_desc - update channel-path description
417 * @chp - channel-path 417 * @chp - channel-path
418 * 418 *
419 * Update the channel-path description of the specified channel-path. 419 * Update the channel-path description of the specified channel-path
420 * including channel measurement related information.
420 * Return zero on success, non-zero otherwise. 421 * Return zero on success, non-zero otherwise.
421 */ 422 */
422int chp_update_desc(struct channel_path *chp) 423int chp_update_desc(struct channel_path *chp)
@@ -428,8 +429,10 @@ int chp_update_desc(struct channel_path *chp)
428 return rc; 429 return rc;
429 430
430 rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1); 431 rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
432 if (rc)
433 return rc;
431 434
432 return rc; 435 return chsc_get_channel_measurement_chars(chp);
433} 436}
434 437
435/** 438/**
@@ -466,14 +469,6 @@ int chp_new(struct chp_id chpid)
466 ret = -ENODEV; 469 ret = -ENODEV;
467 goto out_free; 470 goto out_free;
468 } 471 }
469 /* Get channel-measurement characteristics. */
470 if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
471 ret = chsc_get_channel_measurement_chars(chp);
472 if (ret)
473 goto out_free;
474 } else {
475 chp->cmg = -1;
476 }
477 dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id); 472 dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
478 473
479 /* make it known to the system */ 474 /* make it known to the system */
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 4efd5b867cc3..af0232290dc4 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -48,7 +48,7 @@ struct channel_path {
48 /* Channel-measurement related stuff: */ 48 /* Channel-measurement related stuff: */
49 int cmg; 49 int cmg;
50 int shared; 50 int shared;
51 void *cmg_chars; 51 struct cmg_chars cmg_chars;
52}; 52};
53 53
54/* Return channel_path struct for given chpid. */ 54/* Return channel_path struct for given chpid. */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index a831d18596a5..c424c0c7367e 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/mutex.h>
17#include <linux/pci.h> 18#include <linux/pci.h>
18 19
19#include <asm/cio.h> 20#include <asm/cio.h>
@@ -224,8 +225,9 @@ out_unreg:
224 225
225void chsc_chp_offline(struct chp_id chpid) 226void chsc_chp_offline(struct chp_id chpid)
226{ 227{
227 char dbf_txt[15]; 228 struct channel_path *chp = chpid_to_chp(chpid);
228 struct chp_link link; 229 struct chp_link link;
230 char dbf_txt[15];
229 231
230 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 232 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
231 CIO_TRACE_EVENT(2, dbf_txt); 233 CIO_TRACE_EVENT(2, dbf_txt);
@@ -236,6 +238,11 @@ void chsc_chp_offline(struct chp_id chpid)
236 link.chpid = chpid; 238 link.chpid = chpid;
237 /* Wait until previous actions have settled. */ 239 /* Wait until previous actions have settled. */
238 css_wait_for_slow_path(); 240 css_wait_for_slow_path();
241
242 mutex_lock(&chp->lock);
243 chp_update_desc(chp);
244 mutex_unlock(&chp->lock);
245
239 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); 246 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
240} 247}
241 248
@@ -690,8 +697,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
690 697
691void chsc_chp_online(struct chp_id chpid) 698void chsc_chp_online(struct chp_id chpid)
692{ 699{
693 char dbf_txt[15]; 700 struct channel_path *chp = chpid_to_chp(chpid);
694 struct chp_link link; 701 struct chp_link link;
702 char dbf_txt[15];
695 703
696 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 704 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
697 CIO_TRACE_EVENT(2, dbf_txt); 705 CIO_TRACE_EVENT(2, dbf_txt);
@@ -701,6 +709,11 @@ void chsc_chp_online(struct chp_id chpid)
701 link.chpid = chpid; 709 link.chpid = chpid;
702 /* Wait until previous actions have settled. */ 710 /* Wait until previous actions have settled. */
703 css_wait_for_slow_path(); 711 css_wait_for_slow_path();
712
713 mutex_lock(&chp->lock);
714 chp_update_desc(chp);
715 mutex_unlock(&chp->lock);
716
704 for_each_subchannel_staged(__s390_process_res_acc, NULL, 717 for_each_subchannel_staged(__s390_process_res_acc, NULL,
705 &link); 718 &link);
706 css_schedule_reprobe(); 719 css_schedule_reprobe();
@@ -967,22 +980,19 @@ static void
967chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 980chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
968 struct cmg_chars *chars) 981 struct cmg_chars *chars)
969{ 982{
970 struct cmg_chars *cmg_chars;
971 int i, mask; 983 int i, mask;
972 984
973 cmg_chars = chp->cmg_chars;
974 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 985 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
975 mask = 0x80 >> (i + 3); 986 mask = 0x80 >> (i + 3);
976 if (cmcv & mask) 987 if (cmcv & mask)
977 cmg_chars->values[i] = chars->values[i]; 988 chp->cmg_chars.values[i] = chars->values[i];
978 else 989 else
979 cmg_chars->values[i] = 0; 990 chp->cmg_chars.values[i] = 0;
980 } 991 }
981} 992}
982 993
983int chsc_get_channel_measurement_chars(struct channel_path *chp) 994int chsc_get_channel_measurement_chars(struct channel_path *chp)
984{ 995{
985 struct cmg_chars *cmg_chars;
986 int ccode, ret; 996 int ccode, ret;
987 997
988 struct { 998 struct {
@@ -1006,10 +1016,11 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
1006 u32 data[NR_MEASUREMENT_CHARS]; 1016 u32 data[NR_MEASUREMENT_CHARS];
1007 } __attribute__ ((packed)) *scmc_area; 1017 } __attribute__ ((packed)) *scmc_area;
1008 1018
1009 chp->cmg_chars = NULL; 1019 chp->shared = -1;
1010 cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL); 1020 chp->cmg = -1;
1011 if (!cmg_chars) 1021
1012 return -ENOMEM; 1022 if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
1023 return 0;
1013 1024
1014 spin_lock_irq(&chsc_page_lock); 1025 spin_lock_irq(&chsc_page_lock);
1015 memset(chsc_page, 0, PAGE_SIZE); 1026 memset(chsc_page, 0, PAGE_SIZE);
@@ -1031,25 +1042,19 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
1031 scmc_area->response.code); 1042 scmc_area->response.code);
1032 goto out; 1043 goto out;
1033 } 1044 }
1034 if (scmc_area->not_valid) { 1045 if (scmc_area->not_valid)
1035 chp->cmg = -1;
1036 chp->shared = -1;
1037 goto out; 1046 goto out;
1038 } 1047
1039 chp->cmg = scmc_area->cmg; 1048 chp->cmg = scmc_area->cmg;
1040 chp->shared = scmc_area->shared; 1049 chp->shared = scmc_area->shared;
1041 if (chp->cmg != 2 && chp->cmg != 3) { 1050 if (chp->cmg != 2 && chp->cmg != 3) {
1042 /* No cmg-dependent data. */ 1051 /* No cmg-dependent data. */
1043 goto out; 1052 goto out;
1044 } 1053 }
1045 chp->cmg_chars = cmg_chars;
1046 chsc_initialize_cmg_chars(chp, scmc_area->cmcv, 1054 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
1047 (struct cmg_chars *) &scmc_area->data); 1055 (struct cmg_chars *) &scmc_area->data);
1048out: 1056out:
1049 spin_unlock_irq(&chsc_page_lock); 1057 spin_unlock_irq(&chsc_page_lock);
1050 if (!chp->cmg_chars)
1051 kfree(cmg_chars);
1052
1053 return ret; 1058 return ret;
1054} 1059}
1055 1060
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 7b23f43c7b08..de1b6c1d172c 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -112,9 +112,10 @@ static inline int convert_error(struct zcrypt_device *zdev,
112 atomic_set(&zcrypt_rescan_req, 1); 112 atomic_set(&zcrypt_rescan_req, 1);
113 zdev->online = 0; 113 zdev->online = 0;
114 pr_err("Cryptographic device %x failed and was set offline\n", 114 pr_err("Cryptographic device %x failed and was set offline\n",
115 zdev->ap_dev->qid); 115 AP_QID_DEVICE(zdev->ap_dev->qid));
116 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 116 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
117 zdev->ap_dev->qid, zdev->online, ehdr->reply_code); 117 AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
118 ehdr->reply_code);
118 return -EAGAIN; 119 return -EAGAIN;
119 case REP82_ERROR_TRANSPORT_FAIL: 120 case REP82_ERROR_TRANSPORT_FAIL:
120 case REP82_ERROR_MACHINE_FAILURE: 121 case REP82_ERROR_MACHINE_FAILURE:
@@ -123,16 +124,18 @@ static inline int convert_error(struct zcrypt_device *zdev,
123 atomic_set(&zcrypt_rescan_req, 1); 124 atomic_set(&zcrypt_rescan_req, 1);
124 zdev->online = 0; 125 zdev->online = 0;
125 pr_err("Cryptographic device %x failed and was set offline\n", 126 pr_err("Cryptographic device %x failed and was set offline\n",
126 zdev->ap_dev->qid); 127 AP_QID_DEVICE(zdev->ap_dev->qid));
127 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 128 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
128 zdev->ap_dev->qid, zdev->online, ehdr->reply_code); 129 AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
130 ehdr->reply_code);
129 return -EAGAIN; 131 return -EAGAIN;
130 default: 132 default:
131 zdev->online = 0; 133 zdev->online = 0;
132 pr_err("Cryptographic device %x failed and was set offline\n", 134 pr_err("Cryptographic device %x failed and was set offline\n",
133 zdev->ap_dev->qid); 135 AP_QID_DEVICE(zdev->ap_dev->qid));
134 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 136 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
135 zdev->ap_dev->qid, zdev->online, ehdr->reply_code); 137 AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
138 ehdr->reply_code);
136 return -EAGAIN; /* repeat the request on a different device. */ 139 return -EAGAIN; /* repeat the request on a different device. */
137 } 140 }
138} 141}
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 74edf2934e7c..eedfaa2cf715 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -336,9 +336,10 @@ static int convert_type80(struct zcrypt_device *zdev,
336 /* The result is too short, the CEX2A card may not do that.. */ 336 /* The result is too short, the CEX2A card may not do that.. */
337 zdev->online = 0; 337 zdev->online = 0;
338 pr_err("Cryptographic device %x failed and was set offline\n", 338 pr_err("Cryptographic device %x failed and was set offline\n",
339 zdev->ap_dev->qid); 339 AP_QID_DEVICE(zdev->ap_dev->qid));
340 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 340 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
341 zdev->ap_dev->qid, zdev->online, t80h->code); 341 AP_QID_DEVICE(zdev->ap_dev->qid),
342 zdev->online, t80h->code);
342 343
343 return -EAGAIN; /* repeat the request on a different device. */ 344 return -EAGAIN; /* repeat the request on a different device. */
344 } 345 }
@@ -368,9 +369,9 @@ static int convert_response(struct zcrypt_device *zdev,
368 default: /* Unknown response type, this should NEVER EVER happen */ 369 default: /* Unknown response type, this should NEVER EVER happen */
369 zdev->online = 0; 370 zdev->online = 0;
370 pr_err("Cryptographic device %x failed and was set offline\n", 371 pr_err("Cryptographic device %x failed and was set offline\n",
371 zdev->ap_dev->qid); 372 AP_QID_DEVICE(zdev->ap_dev->qid));
372 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 373 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
373 zdev->ap_dev->qid, zdev->online); 374 AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
374 return -EAGAIN; /* repeat the request on a different device. */ 375 return -EAGAIN; /* repeat the request on a different device. */
375 } 376 }
376} 377}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 9a2dd472c1cc..21959719daef 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -572,9 +572,9 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
572 return -EINVAL; 572 return -EINVAL;
573 zdev->online = 0; 573 zdev->online = 0;
574 pr_err("Cryptographic device %x failed and was set offline\n", 574 pr_err("Cryptographic device %x failed and was set offline\n",
575 zdev->ap_dev->qid); 575 AP_QID_DEVICE(zdev->ap_dev->qid));
576 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 576 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
577 zdev->ap_dev->qid, zdev->online, 577 AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
578 msg->hdr.reply_code); 578 msg->hdr.reply_code);
579 return -EAGAIN; /* repeat the request on a different device. */ 579 return -EAGAIN; /* repeat the request on a different device. */
580 } 580 }
@@ -715,9 +715,9 @@ static int convert_response_ica(struct zcrypt_device *zdev,
715 default: /* Unknown response type, this should NEVER EVER happen */ 715 default: /* Unknown response type, this should NEVER EVER happen */
716 zdev->online = 0; 716 zdev->online = 0;
717 pr_err("Cryptographic device %x failed and was set offline\n", 717 pr_err("Cryptographic device %x failed and was set offline\n",
718 zdev->ap_dev->qid); 718 AP_QID_DEVICE(zdev->ap_dev->qid));
719 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 719 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
720 zdev->ap_dev->qid, zdev->online); 720 AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
721 return -EAGAIN; /* repeat the request on a different device. */ 721 return -EAGAIN; /* repeat the request on a different device. */
722 } 722 }
723} 723}
@@ -747,9 +747,9 @@ static int convert_response_xcrb(struct zcrypt_device *zdev,
747 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ 747 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
748 zdev->online = 0; 748 zdev->online = 0;
749 pr_err("Cryptographic device %x failed and was set offline\n", 749 pr_err("Cryptographic device %x failed and was set offline\n",
750 zdev->ap_dev->qid); 750 AP_QID_DEVICE(zdev->ap_dev->qid));
751 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 751 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
752 zdev->ap_dev->qid, zdev->online); 752 AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
753 return -EAGAIN; /* repeat the request on a different device. */ 753 return -EAGAIN; /* repeat the request on a different device. */
754 } 754 }
755} 755}
@@ -773,9 +773,9 @@ static int convert_response_ep11_xcrb(struct zcrypt_device *zdev,
773 default: /* Unknown response type, this should NEVER EVER happen */ 773 default: /* Unknown response type, this should NEVER EVER happen */
774 zdev->online = 0; 774 zdev->online = 0;
775 pr_err("Cryptographic device %x failed and was set offline\n", 775 pr_err("Cryptographic device %x failed and was set offline\n",
776 zdev->ap_dev->qid); 776 AP_QID_DEVICE(zdev->ap_dev->qid));
777 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 777 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
778 zdev->ap_dev->qid, zdev->online); 778 AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
779 return -EAGAIN; /* repeat the request on a different device. */ 779 return -EAGAIN; /* repeat the request on a different device. */
780 } 780 }
781} 781}
@@ -800,9 +800,9 @@ static int convert_response_rng(struct zcrypt_device *zdev,
800 default: /* Unknown response type, this should NEVER EVER happen */ 800 default: /* Unknown response type, this should NEVER EVER happen */
801 zdev->online = 0; 801 zdev->online = 0;
802 pr_err("Cryptographic device %x failed and was set offline\n", 802 pr_err("Cryptographic device %x failed and was set offline\n",
803 zdev->ap_dev->qid); 803 AP_QID_DEVICE(zdev->ap_dev->qid));
804 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 804 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
805 zdev->ap_dev->qid, zdev->online); 805 AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
806 return -EAGAIN; /* repeat the request on a different device. */ 806 return -EAGAIN; /* repeat the request on a different device. */
807 } 807 }
808} 808}
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 361358134315..93880ed6291c 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -562,7 +562,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
562 /* 562 /*
563 * Command Lock contention 563 * Command Lock contention
564 */ 564 */
565 err = SCSI_DH_RETRY; 565 err = SCSI_DH_IMM_RETRY;
566 break; 566 break;
567 default: 567 default:
568 break; 568 break;
@@ -612,6 +612,8 @@ retry:
612 err = mode_select_handle_sense(sdev, h->sense); 612 err = mode_select_handle_sense(sdev, h->sense);
613 if (err == SCSI_DH_RETRY && retry_cnt--) 613 if (err == SCSI_DH_RETRY && retry_cnt--)
614 goto retry; 614 goto retry;
615 if (err == SCSI_DH_IMM_RETRY)
616 goto retry;
615 } 617 }
616 if (err == SCSI_DH_OK) { 618 if (err == SCSI_DH_OK) {
617 h->state = RDAC_STATE_ACTIVE; 619 h->state = RDAC_STATE_ACTIVE;
diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig
index 37a0c7156087..d1dd1616f983 100644
--- a/drivers/scsi/hisi_sas/Kconfig
+++ b/drivers/scsi/hisi_sas/Kconfig
@@ -1,5 +1,7 @@
1config SCSI_HISI_SAS 1config SCSI_HISI_SAS
2 tristate "HiSilicon SAS" 2 tristate "HiSilicon SAS"
3 depends on HAS_DMA && HAS_IOMEM
4 depends on ARM64 || COMPILE_TEST
3 select SCSI_SAS_LIBSAS 5 select SCSI_SAS_LIBSAS
4 select BLK_DEV_INTEGRITY 6 select BLK_DEV_INTEGRITY
5 help 7 help
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 057fdeb720ac..eea24d7531cf 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1289,13 +1289,10 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
1289 goto out; 1289 goto out;
1290 } 1290 }
1291 1291
1292 if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK) { 1292 if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK &&
1293 if (!(cmplt_hdr_data & CMPLT_HDR_CMD_CMPLT_MSK) || 1293 !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) {
1294 !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK))
1295 ts->stat = SAS_DATA_OVERRUN;
1296 else
1297 slot_err_v1_hw(hisi_hba, task, slot);
1298 1294
1295 slot_err_v1_hw(hisi_hba, task, slot);
1299 goto out; 1296 goto out;
1300 } 1297 }
1301 1298
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 3b3e0998fa6e..d6a691e27d33 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4002,6 +4002,7 @@ static ssize_t ipr_store_update_fw(struct device *dev,
4002 struct ipr_sglist *sglist; 4002 struct ipr_sglist *sglist;
4003 char fname[100]; 4003 char fname[100];
4004 char *src; 4004 char *src;
4005 char *endline;
4005 int result, dnld_size; 4006 int result, dnld_size;
4006 4007
4007 if (!capable(CAP_SYS_ADMIN)) 4008 if (!capable(CAP_SYS_ADMIN))
@@ -4009,6 +4010,10 @@ static ssize_t ipr_store_update_fw(struct device *dev,
4009 4010
4010 snprintf(fname, sizeof(fname), "%s", buf); 4011 snprintf(fname, sizeof(fname), "%s", buf);
4011 4012
4013 endline = strchr(fname, '\n');
4014 if (endline)
4015 *endline = '\0';
4016
4012 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { 4017 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4013 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); 4018 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4014 return -EIO; 4019 return -EIO;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 52a87657c7dd..692a7570b5e1 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2204,7 +2204,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
2204 /* Clear outstanding commands array. */ 2204 /* Clear outstanding commands array. */
2205 for (que = 0; que < ha->max_req_queues; que++) { 2205 for (que = 0; que < ha->max_req_queues; que++) {
2206 req = ha->req_q_map[que]; 2206 req = ha->req_q_map[que];
2207 if (!req) 2207 if (!req || !test_bit(que, ha->req_qid_map))
2208 continue; 2208 continue;
2209 req->out_ptr = (void *)(req->ring + req->length); 2209 req->out_ptr = (void *)(req->ring + req->length);
2210 *req->out_ptr = 0; 2210 *req->out_ptr = 0;
@@ -2221,7 +2221,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
2221 2221
2222 for (que = 0; que < ha->max_rsp_queues; que++) { 2222 for (que = 0; que < ha->max_rsp_queues; que++) {
2223 rsp = ha->rsp_q_map[que]; 2223 rsp = ha->rsp_q_map[que];
2224 if (!rsp) 2224 if (!rsp || !test_bit(que, ha->rsp_qid_map))
2225 continue; 2225 continue;
2226 rsp->in_ptr = (void *)(rsp->ring + rsp->length); 2226 rsp->in_ptr = (void *)(rsp->ring + rsp->length);
2227 *rsp->in_ptr = 0; 2227 *rsp->in_ptr = 0;
@@ -4981,7 +4981,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
4981 4981
4982 for (i = 1; i < ha->max_rsp_queues; i++) { 4982 for (i = 1; i < ha->max_rsp_queues; i++) {
4983 rsp = ha->rsp_q_map[i]; 4983 rsp = ha->rsp_q_map[i];
4984 if (rsp) { 4984 if (rsp && test_bit(i, ha->rsp_qid_map)) {
4985 rsp->options &= ~BIT_0; 4985 rsp->options &= ~BIT_0;
4986 ret = qla25xx_init_rsp_que(base_vha, rsp); 4986 ret = qla25xx_init_rsp_que(base_vha, rsp);
4987 if (ret != QLA_SUCCESS) 4987 if (ret != QLA_SUCCESS)
@@ -4996,8 +4996,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
4996 } 4996 }
4997 for (i = 1; i < ha->max_req_queues; i++) { 4997 for (i = 1; i < ha->max_req_queues; i++) {
4998 req = ha->req_q_map[i]; 4998 req = ha->req_q_map[i];
4999 if (req) { 4999 if (req && test_bit(i, ha->req_qid_map)) {
5000 /* Clear outstanding commands array. */ 5000 /* Clear outstanding commands array. */
5001 req->options &= ~BIT_0; 5001 req->options &= ~BIT_0;
5002 ret = qla25xx_init_req_que(base_vha, req); 5002 ret = qla25xx_init_req_que(base_vha, req);
5003 if (ret != QLA_SUCCESS) 5003 if (ret != QLA_SUCCESS)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d4d65eb0e9b4..4af95479a9db 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3063,9 +3063,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3063 "MSI-X: Failed to enable support " 3063 "MSI-X: Failed to enable support "
3064 "-- %d/%d\n Retry with %d vectors.\n", 3064 "-- %d/%d\n Retry with %d vectors.\n",
3065 ha->msix_count, ret, ret); 3065 ha->msix_count, ret, ret);
3066 ha->msix_count = ret;
3067 ha->max_rsp_queues = ha->msix_count - 1;
3066 } 3068 }
3067 ha->msix_count = ret;
3068 ha->max_rsp_queues = ha->msix_count - 1;
3069 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 3069 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
3070 ha->msix_count, GFP_KERNEL); 3070 ha->msix_count, GFP_KERNEL);
3071 if (!ha->msix_entries) { 3071 if (!ha->msix_entries) {
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index c5dd594f6c31..cf7ba52bae66 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -600,7 +600,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
600 /* Delete request queues */ 600 /* Delete request queues */
601 for (cnt = 1; cnt < ha->max_req_queues; cnt++) { 601 for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
602 req = ha->req_q_map[cnt]; 602 req = ha->req_q_map[cnt];
603 if (req) { 603 if (req && test_bit(cnt, ha->req_qid_map)) {
604 ret = qla25xx_delete_req_que(vha, req); 604 ret = qla25xx_delete_req_que(vha, req);
605 if (ret != QLA_SUCCESS) { 605 if (ret != QLA_SUCCESS) {
606 ql_log(ql_log_warn, vha, 0x00ea, 606 ql_log(ql_log_warn, vha, 0x00ea,
@@ -614,7 +614,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
614 /* Delete response queues */ 614 /* Delete response queues */
615 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { 615 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
616 rsp = ha->rsp_q_map[cnt]; 616 rsp = ha->rsp_q_map[cnt];
617 if (rsp) { 617 if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
618 ret = qla25xx_delete_rsp_que(vha, rsp); 618 ret = qla25xx_delete_rsp_que(vha, rsp);
619 if (ret != QLA_SUCCESS) { 619 if (ret != QLA_SUCCESS) {
620 ql_log(ql_log_warn, vha, 0x00eb, 620 ql_log(ql_log_warn, vha, 0x00eb,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f1788db43195..f6c7ce35b542 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -409,6 +409,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
409 int cnt; 409 int cnt;
410 410
411 for (cnt = 0; cnt < ha->max_req_queues; cnt++) { 411 for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
412 if (!test_bit(cnt, ha->req_qid_map))
413 continue;
414
412 req = ha->req_q_map[cnt]; 415 req = ha->req_q_map[cnt];
413 qla2x00_free_req_que(ha, req); 416 qla2x00_free_req_que(ha, req);
414 } 417 }
@@ -416,6 +419,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
416 ha->req_q_map = NULL; 419 ha->req_q_map = NULL;
417 420
418 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { 421 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
422 if (!test_bit(cnt, ha->rsp_qid_map))
423 continue;
424
419 rsp = ha->rsp_q_map[cnt]; 425 rsp = ha->rsp_q_map[cnt];
420 qla2x00_free_rsp_que(ha, rsp); 426 qla2x00_free_rsp_que(ha, rsp);
421 } 427 }
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 8075a4cdb45c..ee967becd257 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -105,7 +105,7 @@ static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
105static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, 105static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
106 int fn, void *iocb, int flags); 106 int fn, void *iocb, int flags);
107static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd 107static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
108 *cmd, struct atio_from_isp *atio, int ha_locked); 108 *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
109static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, 109static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
110 struct qla_tgt_srr_imm *imm, int ha_lock); 110 struct qla_tgt_srr_imm *imm, int ha_lock);
111static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, 111static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
@@ -1756,7 +1756,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
1756 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, 1756 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
1757 0, 0, 0, 0, 0, 0); 1757 0, 0, 0, 0, 0, 0);
1758 else { 1758 else {
1759 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK) 1759 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
1760 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, 1760 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
1761 mcmd->fc_tm_rsp, false); 1761 mcmd->fc_tm_rsp, false);
1762 else 1762 else
@@ -2665,7 +2665,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2665 /* no need to terminate. FW already freed exchange. */ 2665 /* no need to terminate. FW already freed exchange. */
2666 qlt_abort_cmd_on_host_reset(cmd->vha, cmd); 2666 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2667 else 2667 else
2668 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 2668 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
2669 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2669 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2670 return 0; 2670 return 0;
2671 } 2671 }
@@ -3173,7 +3173,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
3173} 3173}
3174 3174
3175static void qlt_send_term_exchange(struct scsi_qla_host *vha, 3175static void qlt_send_term_exchange(struct scsi_qla_host *vha,
3176 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) 3176 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
3177 int ul_abort)
3177{ 3178{
3178 unsigned long flags = 0; 3179 unsigned long flags = 0;
3179 int rc; 3180 int rc;
@@ -3193,8 +3194,7 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
3193 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3194 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3194 3195
3195done: 3196done:
3196 if (cmd && (!cmd->aborted || 3197 if (cmd && !ul_abort && !cmd->aborted) {
3197 !cmd->cmd_sent_to_fw)) {
3198 if (cmd->sg_mapped) 3198 if (cmd->sg_mapped)
3199 qlt_unmap_sg(vha, cmd); 3199 qlt_unmap_sg(vha, cmd);
3200 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3200 vha->hw->tgt.tgt_ops->free_cmd(cmd);
@@ -3253,21 +3253,38 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3253 3253
3254} 3254}
3255 3255
3256void qlt_abort_cmd(struct qla_tgt_cmd *cmd) 3256int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3257{ 3257{
3258 struct qla_tgt *tgt = cmd->tgt; 3258 struct qla_tgt *tgt = cmd->tgt;
3259 struct scsi_qla_host *vha = tgt->vha; 3259 struct scsi_qla_host *vha = tgt->vha;
3260 struct se_cmd *se_cmd = &cmd->se_cmd; 3260 struct se_cmd *se_cmd = &cmd->se_cmd;
3261 unsigned long flags;
3261 3262
3262 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, 3263 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3263 "qla_target(%d): terminating exchange for aborted cmd=%p " 3264 "qla_target(%d): terminating exchange for aborted cmd=%p "
3264 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, 3265 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3265 se_cmd->tag); 3266 se_cmd->tag);
3266 3267
3268 spin_lock_irqsave(&cmd->cmd_lock, flags);
3269 if (cmd->aborted) {
3270 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3271 /*
3272 * It's normal to see 2 calls in this path:
3273 * 1) XFER Rdy completion + CMD_T_ABORT
3274 * 2) TCM TMR - drain_state_list
3275 */
3276 ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
3277 "multiple abort. %p transport_state %x, t_state %x,"
3278 " se_cmd_flags %x \n", cmd, cmd->se_cmd.transport_state,
3279 cmd->se_cmd.t_state,cmd->se_cmd.se_cmd_flags);
3280 return EIO;
3281 }
3267 cmd->aborted = 1; 3282 cmd->aborted = 1;
3268 cmd->cmd_flags |= BIT_6; 3283 cmd->cmd_flags |= BIT_6;
3284 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3269 3285
3270 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); 3286 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1);
3287 return 0;
3271} 3288}
3272EXPORT_SYMBOL(qlt_abort_cmd); 3289EXPORT_SYMBOL(qlt_abort_cmd);
3273 3290
@@ -3282,6 +3299,9 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
3282 3299
3283 BUG_ON(cmd->cmd_in_wq); 3300 BUG_ON(cmd->cmd_in_wq);
3284 3301
3302 if (cmd->sg_mapped)
3303 qlt_unmap_sg(cmd->vha, cmd);
3304
3285 if (!cmd->q_full) 3305 if (!cmd->q_full)
3286 qlt_decr_num_pend_cmds(cmd->vha); 3306 qlt_decr_num_pend_cmds(cmd->vha);
3287 3307
@@ -3399,7 +3419,7 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
3399 term = 1; 3419 term = 1;
3400 3420
3401 if (term) 3421 if (term)
3402 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 3422 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
3403 3423
3404 return term; 3424 return term;
3405} 3425}
@@ -3580,12 +3600,13 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3580 case CTIO_PORT_LOGGED_OUT: 3600 case CTIO_PORT_LOGGED_OUT:
3581 case CTIO_PORT_UNAVAILABLE: 3601 case CTIO_PORT_UNAVAILABLE:
3582 { 3602 {
3583 int logged_out = (status & 0xFFFF); 3603 int logged_out =
3604 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
3605
3584 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, 3606 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3585 "qla_target(%d): CTIO with %s status %x " 3607 "qla_target(%d): CTIO with %s status %x "
3586 "received (state %x, se_cmd %p)\n", vha->vp_idx, 3608 "received (state %x, se_cmd %p)\n", vha->vp_idx,
3587 (logged_out == CTIO_PORT_LOGGED_OUT) ? 3609 logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3588 "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3589 status, cmd->state, se_cmd); 3610 status, cmd->state, se_cmd);
3590 3611
3591 if (logged_out && cmd->sess) { 3612 if (logged_out && cmd->sess) {
@@ -3754,6 +3775,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3754 goto out_term; 3775 goto out_term;
3755 } 3776 }
3756 3777
3778 spin_lock_init(&cmd->cmd_lock);
3757 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 3779 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
3758 cmd->se_cmd.tag = atio->u.isp24.exchange_addr; 3780 cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
3759 cmd->unpacked_lun = scsilun_to_int( 3781 cmd->unpacked_lun = scsilun_to_int(
@@ -3796,7 +3818,7 @@ out_term:
3796 */ 3818 */
3797 cmd->cmd_flags |= BIT_2; 3819 cmd->cmd_flags |= BIT_2;
3798 spin_lock_irqsave(&ha->hardware_lock, flags); 3820 spin_lock_irqsave(&ha->hardware_lock, flags);
3799 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); 3821 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0);
3800 3822
3801 qlt_decr_num_pend_cmds(vha); 3823 qlt_decr_num_pend_cmds(vha);
3802 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 3824 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
@@ -3918,7 +3940,7 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
3918 3940
3919out_term: 3941out_term:
3920 spin_lock_irqsave(&ha->hardware_lock, flags); 3942 spin_lock_irqsave(&ha->hardware_lock, flags);
3921 qlt_send_term_exchange(vha, NULL, &op->atio, 1); 3943 qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0);
3922 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3944 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3923 kfree(op); 3945 kfree(op);
3924 3946
@@ -3982,7 +4004,8 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3982 4004
3983 cmd->cmd_in_wq = 1; 4005 cmd->cmd_in_wq = 1;
3984 cmd->cmd_flags |= BIT_0; 4006 cmd->cmd_flags |= BIT_0;
3985 cmd->se_cmd.cpuid = -1; 4007 cmd->se_cmd.cpuid = ha->msix_count ?
4008 ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND;
3986 4009
3987 spin_lock(&vha->cmd_list_lock); 4010 spin_lock(&vha->cmd_list_lock);
3988 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); 4011 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
@@ -3990,7 +4013,6 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3990 4013
3991 INIT_WORK(&cmd->work, qlt_do_work); 4014 INIT_WORK(&cmd->work, qlt_do_work);
3992 if (ha->msix_count) { 4015 if (ha->msix_count) {
3993 cmd->se_cmd.cpuid = ha->tgt.rspq_vector_cpuid;
3994 if (cmd->atio.u.isp24.fcp_cmnd.rddata) 4016 if (cmd->atio.u.isp24.fcp_cmnd.rddata)
3995 queue_work_on(smp_processor_id(), qla_tgt_wq, 4017 queue_work_on(smp_processor_id(), qla_tgt_wq,
3996 &cmd->work); 4018 &cmd->work);
@@ -4771,7 +4793,7 @@ out_reject:
4771 dump_stack(); 4793 dump_stack();
4772 } else { 4794 } else {
4773 cmd->cmd_flags |= BIT_9; 4795 cmd->cmd_flags |= BIT_9;
4774 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 4796 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
4775 } 4797 }
4776 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4798 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4777} 4799}
@@ -4950,7 +4972,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
4950 sctio, sctio->srr_id); 4972 sctio, sctio->srr_id);
4951 list_del(&sctio->srr_list_entry); 4973 list_del(&sctio->srr_list_entry);
4952 qlt_send_term_exchange(vha, sctio->cmd, 4974 qlt_send_term_exchange(vha, sctio->cmd,
4953 &sctio->cmd->atio, 1); 4975 &sctio->cmd->atio, 1, 0);
4954 kfree(sctio); 4976 kfree(sctio);
4955 } 4977 }
4956 } 4978 }
@@ -5123,7 +5145,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
5123 atio->u.isp24.fcp_hdr.s_id); 5145 atio->u.isp24.fcp_hdr.s_id);
5124 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5146 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5125 if (!sess) { 5147 if (!sess) {
5126 qlt_send_term_exchange(vha, NULL, atio, 1); 5148 qlt_send_term_exchange(vha, NULL, atio, 1, 0);
5127 return 0; 5149 return 0;
5128 } 5150 }
5129 /* Sending marker isn't necessary, since we called from ISR */ 5151 /* Sending marker isn't necessary, since we called from ISR */
@@ -5406,7 +5428,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5406#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 5428#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5407 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 5429 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
5408#else 5430#else
5409 qlt_send_term_exchange(vha, NULL, atio, 1); 5431 qlt_send_term_exchange(vha, NULL, atio, 1, 0);
5410#endif 5432#endif
5411 5433
5412 if (!ha_locked) 5434 if (!ha_locked)
@@ -5523,7 +5545,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
5523#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 5545#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5524 qlt_send_busy(vha, atio, 0); 5546 qlt_send_busy(vha, atio, 0);
5525#else 5547#else
5526 qlt_send_term_exchange(vha, NULL, atio, 1); 5548 qlt_send_term_exchange(vha, NULL, atio, 1, 0);
5527#endif 5549#endif
5528 } else { 5550 } else {
5529 if (tgt->tgt_stop) { 5551 if (tgt->tgt_stop) {
@@ -5532,7 +5554,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
5532 "command to target, sending TERM " 5554 "command to target, sending TERM "
5533 "EXCHANGE for rsp\n"); 5555 "EXCHANGE for rsp\n");
5534 qlt_send_term_exchange(vha, NULL, 5556 qlt_send_term_exchange(vha, NULL,
5535 atio, 1); 5557 atio, 1, 0);
5536 } else { 5558 } else {
5537 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5559 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5538 "qla_target(%d): Unable to send " 5560 "qla_target(%d): Unable to send "
@@ -5960,7 +5982,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5960 return; 5982 return;
5961 5983
5962out_term: 5984out_term:
5963 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 0); 5985 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
5964 if (sess) 5986 if (sess)
5965 ha->tgt.tgt_ops->put_sess(sess); 5987 ha->tgt.tgt_ops->put_sess(sess);
5966 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5988 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 71b2865ba3c8..22a6a767fe07 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -943,6 +943,36 @@ struct qla_tgt_sess {
943 qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; 943 qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
944}; 944};
945 945
946typedef enum {
947 /*
948 * BIT_0 - Atio Arrival / schedule to work
949 * BIT_1 - qlt_do_work
950 * BIT_2 - qlt_do work failed
951 * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
952 * BIT_4 - read respond/tcm_qla2xx_queue_data_in
953 * BIT_5 - status respond / tcm_qla2xx_queue_status
954 * BIT_6 - tcm request to abort/Term exchange.
955 * pre_xmit_response->qlt_send_term_exchange
956 * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
957 * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
958 * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
959 * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
960
961 * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
962 * BIT_13 - Bad completion -
963 * qlt_ctio_do_completion --> qlt_term_ctio_exchange
964 * BIT_14 - Back end data received/sent.
965 * BIT_15 - SRR prepare ctio
966 * BIT_16 - complete free
967 * BIT_17 - flush - qlt_abort_cmd_on_host_reset
968 * BIT_18 - completion w/abort status
969 * BIT_19 - completion w/unknown status
970 * BIT_20 - tcm_qla2xxx_free_cmd
971 */
972 CMD_FLAG_DATA_WORK = BIT_11,
973 CMD_FLAG_DATA_WORK_FREE = BIT_21,
974} cmd_flags_t;
975
946struct qla_tgt_cmd { 976struct qla_tgt_cmd {
947 struct se_cmd se_cmd; 977 struct se_cmd se_cmd;
948 struct qla_tgt_sess *sess; 978 struct qla_tgt_sess *sess;
@@ -952,6 +982,7 @@ struct qla_tgt_cmd {
952 /* Sense buffer that will be mapped into outgoing status */ 982 /* Sense buffer that will be mapped into outgoing status */
953 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; 983 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
954 984
985 spinlock_t cmd_lock;
955 /* to save extra sess dereferences */ 986 /* to save extra sess dereferences */
956 unsigned int conf_compl_supported:1; 987 unsigned int conf_compl_supported:1;
957 unsigned int sg_mapped:1; 988 unsigned int sg_mapped:1;
@@ -986,30 +1017,8 @@ struct qla_tgt_cmd {
986 1017
987 uint64_t jiffies_at_alloc; 1018 uint64_t jiffies_at_alloc;
988 uint64_t jiffies_at_free; 1019 uint64_t jiffies_at_free;
989 /* BIT_0 - Atio Arrival / schedule to work 1020
990 * BIT_1 - qlt_do_work 1021 cmd_flags_t cmd_flags;
991 * BIT_2 - qlt_do work failed
992 * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
993 * BIT_4 - read respond/tcm_qla2xx_queue_data_in
994 * BIT_5 - status respond / tcm_qla2xx_queue_status
995 * BIT_6 - tcm request to abort/Term exchange.
996 * pre_xmit_response->qlt_send_term_exchange
997 * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
998 * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
999 * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
1000 * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
1001 * BIT_11 - Data actually going to TCM : tcm_qla2xx_handle_data_work
1002 * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
1003 * BIT_13 - Bad completion -
1004 * qlt_ctio_do_completion --> qlt_term_ctio_exchange
1005 * BIT_14 - Back end data received/sent.
1006 * BIT_15 - SRR prepare ctio
1007 * BIT_16 - complete free
1008 * BIT_17 - flush - qlt_abort_cmd_on_host_reset
1009 * BIT_18 - completion w/abort status
1010 * BIT_19 - completion w/unknown status
1011 */
1012 uint32_t cmd_flags;
1013}; 1022};
1014 1023
1015struct qla_tgt_sess_work_param { 1024struct qla_tgt_sess_work_param {
@@ -1148,7 +1157,7 @@ static inline void sid_to_portid(const uint8_t *s_id, port_id_t *p)
1148extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); 1157extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
1149extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); 1158extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
1150extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); 1159extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
1151extern void qlt_abort_cmd(struct qla_tgt_cmd *); 1160extern int qlt_abort_cmd(struct qla_tgt_cmd *);
1152extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); 1161extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
1153extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); 1162extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
1154extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); 1163extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index ddbe2e7ac14d..c3e622524604 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -395,6 +395,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
395 if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) { 395 if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
396 for (i = 0; i < vha->hw->max_req_queues; i++) { 396 for (i = 0; i < vha->hw->max_req_queues; i++) {
397 struct req_que *req = vha->hw->req_q_map[i]; 397 struct req_que *req = vha->hw->req_q_map[i];
398
399 if (!test_bit(i, vha->hw->req_qid_map))
400 continue;
401
398 if (req || !buf) { 402 if (req || !buf) {
399 length = req ? 403 length = req ?
400 req->length : REQUEST_ENTRY_CNT_24XX; 404 req->length : REQUEST_ENTRY_CNT_24XX;
@@ -408,6 +412,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
408 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) { 412 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
409 for (i = 0; i < vha->hw->max_rsp_queues; i++) { 413 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
410 struct rsp_que *rsp = vha->hw->rsp_q_map[i]; 414 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
415
416 if (!test_bit(i, vha->hw->rsp_qid_map))
417 continue;
418
411 if (rsp || !buf) { 419 if (rsp || !buf) {
412 length = rsp ? 420 length = rsp ?
413 rsp->length : RESPONSE_ENTRY_CNT_MQ; 421 rsp->length : RESPONSE_ENTRY_CNT_MQ;
@@ -634,6 +642,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
634 if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) { 642 if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
635 for (i = 0; i < vha->hw->max_req_queues; i++) { 643 for (i = 0; i < vha->hw->max_req_queues; i++) {
636 struct req_que *req = vha->hw->req_q_map[i]; 644 struct req_que *req = vha->hw->req_q_map[i];
645
646 if (!test_bit(i, vha->hw->req_qid_map))
647 continue;
648
637 if (req || !buf) { 649 if (req || !buf) {
638 qla27xx_insert16(i, buf, len); 650 qla27xx_insert16(i, buf, len);
639 qla27xx_insert16(1, buf, len); 651 qla27xx_insert16(1, buf, len);
@@ -645,6 +657,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
645 } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) { 657 } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
646 for (i = 0; i < vha->hw->max_rsp_queues; i++) { 658 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
647 struct rsp_que *rsp = vha->hw->rsp_q_map[i]; 659 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
660
661 if (!test_bit(i, vha->hw->rsp_qid_map))
662 continue;
663
648 if (rsp || !buf) { 664 if (rsp || !buf) {
649 qla27xx_insert16(i, buf, len); 665 qla27xx_insert16(i, buf, len);
650 qla27xx_insert16(1, buf, len); 666 qla27xx_insert16(1, buf, len);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index faf0a126627f..1808a01cfb7e 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -298,6 +298,10 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
298{ 298{
299 cmd->vha->tgt_counters.core_qla_free_cmd++; 299 cmd->vha->tgt_counters.core_qla_free_cmd++;
300 cmd->cmd_in_wq = 1; 300 cmd->cmd_in_wq = 1;
301
302 BUG_ON(cmd->cmd_flags & BIT_20);
303 cmd->cmd_flags |= BIT_20;
304
301 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); 305 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
302 queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); 306 queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
303} 307}
@@ -374,6 +378,20 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
374{ 378{
375 struct qla_tgt_cmd *cmd = container_of(se_cmd, 379 struct qla_tgt_cmd *cmd = container_of(se_cmd,
376 struct qla_tgt_cmd, se_cmd); 380 struct qla_tgt_cmd, se_cmd);
381
382 if (cmd->aborted) {
383 /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
384 * can get ahead of this cmd. tcm_qla2xxx_aborted_task
385 * already kick start the free.
386 */
387 pr_debug("write_pending aborted cmd[%p] refcount %d "
388 "transport_state %x, t_state %x, se_cmd_flags %x\n",
389 cmd,cmd->se_cmd.cmd_kref.refcount.counter,
390 cmd->se_cmd.transport_state,
391 cmd->se_cmd.t_state,
392 cmd->se_cmd.se_cmd_flags);
393 return 0;
394 }
377 cmd->cmd_flags |= BIT_3; 395 cmd->cmd_flags |= BIT_3;
378 cmd->bufflen = se_cmd->data_length; 396 cmd->bufflen = se_cmd->data_length;
379 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 397 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -405,7 +423,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
405 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { 423 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
406 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 424 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
407 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, 425 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
408 3 * HZ); 426 50);
409 return 0; 427 return 0;
410 } 428 }
411 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 429 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -444,6 +462,9 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
444 if (bidi) 462 if (bidi)
445 flags |= TARGET_SCF_BIDI_OP; 463 flags |= TARGET_SCF_BIDI_OP;
446 464
465 if (se_cmd->cpuid != WORK_CPU_UNBOUND)
466 flags |= TARGET_SCF_USE_CPUID;
467
447 sess = cmd->sess; 468 sess = cmd->sess;
448 if (!sess) { 469 if (!sess) {
449 pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n"); 470 pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
@@ -465,13 +486,25 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
465static void tcm_qla2xxx_handle_data_work(struct work_struct *work) 486static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
466{ 487{
467 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 488 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
489 unsigned long flags;
468 490
469 /* 491 /*
470 * Ensure that the complete FCP WRITE payload has been received. 492 * Ensure that the complete FCP WRITE payload has been received.
471 * Otherwise return an exception via CHECK_CONDITION status. 493 * Otherwise return an exception via CHECK_CONDITION status.
472 */ 494 */
473 cmd->cmd_in_wq = 0; 495 cmd->cmd_in_wq = 0;
474 cmd->cmd_flags |= BIT_11; 496
497 spin_lock_irqsave(&cmd->cmd_lock, flags);
498 cmd->cmd_flags |= CMD_FLAG_DATA_WORK;
499 if (cmd->aborted) {
500 cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
501 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
502
503 tcm_qla2xxx_free_cmd(cmd);
504 return;
505 }
506 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
507
475 cmd->vha->tgt_counters.qla_core_ret_ctio++; 508 cmd->vha->tgt_counters.qla_core_ret_ctio++;
476 if (!cmd->write_data_transferred) { 509 if (!cmd->write_data_transferred) {
477 /* 510 /*
@@ -546,6 +579,20 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
546 struct qla_tgt_cmd *cmd = container_of(se_cmd, 579 struct qla_tgt_cmd *cmd = container_of(se_cmd,
547 struct qla_tgt_cmd, se_cmd); 580 struct qla_tgt_cmd, se_cmd);
548 581
582 if (cmd->aborted) {
583 /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
584 * can get ahead of this cmd. tcm_qla2xxx_aborted_task
585 * already kick start the free.
586 */
587 pr_debug("queue_data_in aborted cmd[%p] refcount %d "
588 "transport_state %x, t_state %x, se_cmd_flags %x\n",
589 cmd,cmd->se_cmd.cmd_kref.refcount.counter,
590 cmd->se_cmd.transport_state,
591 cmd->se_cmd.t_state,
592 cmd->se_cmd.se_cmd_flags);
593 return 0;
594 }
595
549 cmd->cmd_flags |= BIT_4; 596 cmd->cmd_flags |= BIT_4;
550 cmd->bufflen = se_cmd->data_length; 597 cmd->bufflen = se_cmd->data_length;
551 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 598 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -637,11 +684,34 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
637 qlt_xmit_tm_rsp(mcmd); 684 qlt_xmit_tm_rsp(mcmd);
638} 685}
639 686
687
688#define DATA_WORK_NOT_FREE(_flags) \
689 (( _flags & (CMD_FLAG_DATA_WORK|CMD_FLAG_DATA_WORK_FREE)) == \
690 CMD_FLAG_DATA_WORK)
640static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) 691static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
641{ 692{
642 struct qla_tgt_cmd *cmd = container_of(se_cmd, 693 struct qla_tgt_cmd *cmd = container_of(se_cmd,
643 struct qla_tgt_cmd, se_cmd); 694 struct qla_tgt_cmd, se_cmd);
644 qlt_abort_cmd(cmd); 695 unsigned long flags;
696
697 if (qlt_abort_cmd(cmd))
698 return;
699
700 spin_lock_irqsave(&cmd->cmd_lock, flags);
701 if ((cmd->state == QLA_TGT_STATE_NEW)||
702 ((cmd->state == QLA_TGT_STATE_DATA_IN) &&
703 DATA_WORK_NOT_FREE(cmd->cmd_flags)) ) {
704
705 cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
706 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
707 /* Cmd have not reached firmware.
708 * Use this trigger to free it. */
709 tcm_qla2xxx_free_cmd(cmd);
710 return;
711 }
712 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
713 return;
714
645} 715}
646 716
647static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, 717static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 47b9d13f97b8..bbfbfd9e5aa3 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -205,6 +205,8 @@ static struct {
205 {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC}, 205 {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
206 {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, 206 {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
207 {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, 207 {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
208 {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES},
209 {"Marvell", "91xx Config", "1.01", BLIST_SKIP_VPD_PAGES},
208 {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 210 {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
209 {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, 211 {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
210 {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, 212 {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index fa6b2c4eb7a2..8c6e31874171 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1344,6 +1344,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1344 1344
1345 switch (ret) { 1345 switch (ret) {
1346 case BLKPREP_KILL: 1346 case BLKPREP_KILL:
1347 case BLKPREP_INVALID:
1347 req->errors = DID_NO_CONNECT << 16; 1348 req->errors = DID_NO_CONNECT << 16;
1348 /* release the command and kill it */ 1349 /* release the command and kill it */
1349 if (req->special) { 1350 if (req->special) {
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 4f18a851e2c7..00bc7218a7f8 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1272,16 +1272,18 @@ static void __scsi_remove_target(struct scsi_target *starget)
1272void scsi_remove_target(struct device *dev) 1272void scsi_remove_target(struct device *dev)
1273{ 1273{
1274 struct Scsi_Host *shost = dev_to_shost(dev->parent); 1274 struct Scsi_Host *shost = dev_to_shost(dev->parent);
1275 struct scsi_target *starget; 1275 struct scsi_target *starget, *last_target = NULL;
1276 unsigned long flags; 1276 unsigned long flags;
1277 1277
1278restart: 1278restart:
1279 spin_lock_irqsave(shost->host_lock, flags); 1279 spin_lock_irqsave(shost->host_lock, flags);
1280 list_for_each_entry(starget, &shost->__targets, siblings) { 1280 list_for_each_entry(starget, &shost->__targets, siblings) {
1281 if (starget->state == STARGET_DEL) 1281 if (starget->state == STARGET_DEL ||
1282 starget == last_target)
1282 continue; 1283 continue;
1283 if (starget->dev.parent == dev || &starget->dev == dev) { 1284 if (starget->dev.parent == dev || &starget->dev == dev) {
1284 kref_get(&starget->reap_ref); 1285 kref_get(&starget->reap_ref);
1286 last_target = starget;
1285 spin_unlock_irqrestore(shost->host_lock, flags); 1287 spin_unlock_irqrestore(shost->host_lock, flags);
1286 __scsi_remove_target(starget); 1288 __scsi_remove_target(starget);
1287 scsi_target_reap(starget); 1289 scsi_target_reap(starget);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 4e08d1cd704d..d749da765df1 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -761,7 +761,7 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
761 break; 761 break;
762 762
763 default: 763 default:
764 ret = BLKPREP_KILL; 764 ret = BLKPREP_INVALID;
765 goto out; 765 goto out;
766 } 766 }
767 767
@@ -839,7 +839,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
839 int ret; 839 int ret;
840 840
841 if (sdkp->device->no_write_same) 841 if (sdkp->device->no_write_same)
842 return BLKPREP_KILL; 842 return BLKPREP_INVALID;
843 843
844 BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size); 844 BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
845 845
@@ -2893,7 +2893,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
2893 sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && 2893 sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
2894 sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE) 2894 sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
2895 rw_max = q->limits.io_opt = 2895 rw_max = q->limits.io_opt =
2896 logical_to_sectors(sdp, sdkp->opt_xfer_blocks); 2896 sdkp->opt_xfer_blocks * sdp->sector_size;
2897 else 2897 else
2898 rw_max = BLK_DEF_MAX_SECTORS; 2898 rw_max = BLK_DEF_MAX_SECTORS;
2899 2899
@@ -3268,8 +3268,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
3268 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3268 struct scsi_disk *sdkp = dev_get_drvdata(dev);
3269 int ret = 0; 3269 int ret = 0;
3270 3270
3271 if (!sdkp) 3271 if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
3272 return 0; /* this can happen */ 3272 return 0;
3273 3273
3274 if (sdkp->WCE && sdkp->media_present) { 3274 if (sdkp->WCE && sdkp->media_present) {
3275 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); 3275 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
@@ -3308,6 +3308,9 @@ static int sd_resume(struct device *dev)
3308{ 3308{
3309 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3309 struct scsi_disk *sdkp = dev_get_drvdata(dev);
3310 3310
3311 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
3312 return 0;
3313
3311 if (!sdkp->device->manage_start_stop) 3314 if (!sdkp->device->manage_start_stop)
3312 return 0; 3315 return 0;
3313 3316
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 503ab8b46c0b..5e820674432c 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1261,7 +1261,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1261 } 1261 }
1262 1262
1263 sfp->mmap_called = 1; 1263 sfp->mmap_called = 1;
1264 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 1264 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
1265 vma->vm_private_data = sfp; 1265 vma->vm_private_data = sfp;
1266 vma->vm_ops = &sg_mmap_vm_ops; 1266 vma->vm_ops = &sg_mmap_vm_ops;
1267 return 0; 1267 return 0;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 8bd54a64efd6..64c867405ad4 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -144,6 +144,9 @@ static int sr_runtime_suspend(struct device *dev)
144{ 144{
145 struct scsi_cd *cd = dev_get_drvdata(dev); 145 struct scsi_cd *cd = dev_get_drvdata(dev);
146 146
147 if (!cd) /* E.g.: runtime suspend following sr_remove() */
148 return 0;
149
147 if (cd->media_present) 150 if (cd->media_present)
148 return -EBUSY; 151 return -EBUSY;
149 else 152 else
@@ -985,6 +988,7 @@ static int sr_remove(struct device *dev)
985 scsi_autopm_get_device(cd->device); 988 scsi_autopm_get_device(cd->device);
986 989
987 del_gendisk(cd->disk); 990 del_gendisk(cd->disk);
991 dev_set_drvdata(dev, NULL);
988 992
989 mutex_lock(&sr_ref_mutex); 993 mutex_lock(&sr_ref_mutex);
990 kref_put(&cd->kref, sr_kref_release); 994 kref_put(&cd->kref, sr_kref_release);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 55627d097873..292c04eec9ad 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -42,6 +42,7 @@
42#include <scsi/scsi_devinfo.h> 42#include <scsi/scsi_devinfo.h>
43#include <scsi/scsi_dbg.h> 43#include <scsi/scsi_dbg.h>
44#include <scsi/scsi_transport_fc.h> 44#include <scsi/scsi_transport_fc.h>
45#include <scsi/scsi_transport.h>
45 46
46/* 47/*
47 * All wire protocol details (storage protocol between the guest and the host) 48 * All wire protocol details (storage protocol between the guest and the host)
@@ -477,19 +478,18 @@ struct hv_host_device {
477struct storvsc_scan_work { 478struct storvsc_scan_work {
478 struct work_struct work; 479 struct work_struct work;
479 struct Scsi_Host *host; 480 struct Scsi_Host *host;
480 uint lun; 481 u8 lun;
482 u8 tgt_id;
481}; 483};
482 484
483static void storvsc_device_scan(struct work_struct *work) 485static void storvsc_device_scan(struct work_struct *work)
484{ 486{
485 struct storvsc_scan_work *wrk; 487 struct storvsc_scan_work *wrk;
486 uint lun;
487 struct scsi_device *sdev; 488 struct scsi_device *sdev;
488 489
489 wrk = container_of(work, struct storvsc_scan_work, work); 490 wrk = container_of(work, struct storvsc_scan_work, work);
490 lun = wrk->lun;
491 491
492 sdev = scsi_device_lookup(wrk->host, 0, 0, lun); 492 sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
493 if (!sdev) 493 if (!sdev)
494 goto done; 494 goto done;
495 scsi_rescan_device(&sdev->sdev_gendev); 495 scsi_rescan_device(&sdev->sdev_gendev);
@@ -540,7 +540,7 @@ static void storvsc_remove_lun(struct work_struct *work)
540 if (!scsi_host_get(wrk->host)) 540 if (!scsi_host_get(wrk->host))
541 goto done; 541 goto done;
542 542
543 sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun); 543 sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
544 544
545 if (sdev) { 545 if (sdev) {
546 scsi_remove_device(sdev); 546 scsi_remove_device(sdev);
@@ -940,6 +940,7 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
940 940
941 wrk->host = host; 941 wrk->host = host;
942 wrk->lun = vm_srb->lun; 942 wrk->lun = vm_srb->lun;
943 wrk->tgt_id = vm_srb->target_id;
943 INIT_WORK(&wrk->work, process_err_fn); 944 INIT_WORK(&wrk->work, process_err_fn);
944 schedule_work(&wrk->work); 945 schedule_work(&wrk->work);
945} 946}
@@ -1770,6 +1771,11 @@ static int __init storvsc_drv_init(void)
1770 fc_transport_template = fc_attach_transport(&fc_transport_functions); 1771 fc_transport_template = fc_attach_transport(&fc_transport_functions);
1771 if (!fc_transport_template) 1772 if (!fc_transport_template)
1772 return -ENODEV; 1773 return -ENODEV;
1774
1775 /*
1776 * Install Hyper-V specific timeout handler.
1777 */
1778 fc_transport_template->eh_timed_out = storvsc_eh_timed_out;
1773#endif 1779#endif
1774 1780
1775 ret = vmbus_driver_register(&storvsc_drv); 1781 ret = vmbus_driver_register(&storvsc_drv);
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index 91a003011acf..a9bac3bf20de 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
34 34
35static int __init sh_pm_runtime_init(void) 35static int __init sh_pm_runtime_init(void)
36{ 36{
37 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) { 37 if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
38 if (!of_find_compatible_node(NULL, NULL, 38 if (!of_find_compatible_node(NULL, NULL,
39 "renesas,cpg-mstp-clocks")) 39 "renesas,cpg-mstp-clocks"))
40 return 0; 40 return 0;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index aebad36391c9..8feac599e9ab 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1571,6 +1571,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
1571 1571
1572 as->use_cs_gpios = true; 1572 as->use_cs_gpios = true;
1573 if (atmel_spi_is_v2(as) && 1573 if (atmel_spi_is_v2(as) &&
1574 pdev->dev.of_node &&
1574 !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) { 1575 !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) {
1575 as->use_cs_gpios = false; 1576 as->use_cs_gpios = false;
1576 master->num_chipselect = 4; 1577 master->num_chipselect = 4;
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 7de6f8472a81..ecc73c0a97cf 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -73,8 +73,8 @@
73 73
74/* Bitfields in CNTL1 */ 74/* Bitfields in CNTL1 */
75#define BCM2835_AUX_SPI_CNTL1_CSHIGH 0x00000700 75#define BCM2835_AUX_SPI_CNTL1_CSHIGH 0x00000700
76#define BCM2835_AUX_SPI_CNTL1_IDLE 0x00000080 76#define BCM2835_AUX_SPI_CNTL1_TXEMPTY 0x00000080
77#define BCM2835_AUX_SPI_CNTL1_TXEMPTY 0x00000040 77#define BCM2835_AUX_SPI_CNTL1_IDLE 0x00000040
78#define BCM2835_AUX_SPI_CNTL1_MSBF_IN 0x00000002 78#define BCM2835_AUX_SPI_CNTL1_MSBF_IN 0x00000002
79#define BCM2835_AUX_SPI_CNTL1_KEEP_IN 0x00000001 79#define BCM2835_AUX_SPI_CNTL1_KEEP_IN 0x00000001
80 80
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 7fd6a4c009d2..7cb0c1921495 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -84,7 +84,7 @@ struct fsl_espi_transfer {
84/* SPCOM register values */ 84/* SPCOM register values */
85#define SPCOM_CS(x) ((x) << 30) 85#define SPCOM_CS(x) ((x) << 30)
86#define SPCOM_TRANLEN(x) ((x) << 0) 86#define SPCOM_TRANLEN(x) ((x) << 0)
87#define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */ 87#define SPCOM_TRANLEN_MAX 0x10000 /* Max transaction length */
88 88
89#define AUTOSUSPEND_TIMEOUT 2000 89#define AUTOSUSPEND_TIMEOUT 2000
90 90
@@ -233,7 +233,7 @@ static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
233 reinit_completion(&mpc8xxx_spi->done); 233 reinit_completion(&mpc8xxx_spi->done);
234 234
235 /* Set SPCOM[CS] and SPCOM[TRANLEN] field */ 235 /* Set SPCOM[CS] and SPCOM[TRANLEN] field */
236 if ((t->len - 1) > SPCOM_TRANLEN_MAX) { 236 if (t->len > SPCOM_TRANLEN_MAX) {
237 dev_err(mpc8xxx_spi->dev, "Transaction length (%d)" 237 dev_err(mpc8xxx_spi->dev, "Transaction length (%d)"
238 " beyond the SPCOM[TRANLEN] field\n", t->len); 238 " beyond the SPCOM[TRANLEN] field\n", t->len);
239 return -EINVAL; 239 return -EINVAL;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index d98c33cb64f9..c688efa95e29 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -204,8 +204,8 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
204{ 204{
205 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 205 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
206 206
207 if (spi_imx->dma_is_inited && 207 if (spi_imx->dma_is_inited && transfer->len >= spi_imx->wml &&
208 transfer->len > spi_imx->wml * sizeof(u32)) 208 (transfer->len % spi_imx->wml) == 0)
209 return true; 209 return true;
210 return false; 210 return false;
211} 211}
@@ -919,8 +919,6 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
919 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL; 919 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
920 int ret; 920 int ret;
921 unsigned long timeout; 921 unsigned long timeout;
922 u32 dma;
923 int left;
924 struct spi_master *master = spi_imx->bitbang.master; 922 struct spi_master *master = spi_imx->bitbang.master;
925 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg; 923 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
926 924
@@ -929,7 +927,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
929 tx->sgl, tx->nents, DMA_MEM_TO_DEV, 927 tx->sgl, tx->nents, DMA_MEM_TO_DEV,
930 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 928 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
931 if (!desc_tx) 929 if (!desc_tx)
932 goto no_dma; 930 goto tx_nodma;
933 931
934 desc_tx->callback = spi_imx_dma_tx_callback; 932 desc_tx->callback = spi_imx_dma_tx_callback;
935 desc_tx->callback_param = (void *)spi_imx; 933 desc_tx->callback_param = (void *)spi_imx;
@@ -941,7 +939,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
941 rx->sgl, rx->nents, DMA_DEV_TO_MEM, 939 rx->sgl, rx->nents, DMA_DEV_TO_MEM,
942 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 940 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
943 if (!desc_rx) 941 if (!desc_rx)
944 goto no_dma; 942 goto rx_nodma;
945 943
946 desc_rx->callback = spi_imx_dma_rx_callback; 944 desc_rx->callback = spi_imx_dma_rx_callback;
947 desc_rx->callback_param = (void *)spi_imx; 945 desc_rx->callback_param = (void *)spi_imx;
@@ -954,13 +952,6 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
954 /* Trigger the cspi module. */ 952 /* Trigger the cspi module. */
955 spi_imx->dma_finished = 0; 953 spi_imx->dma_finished = 0;
956 954
957 dma = readl(spi_imx->base + MX51_ECSPI_DMA);
958 dma = dma & (~MX51_ECSPI_DMA_RXT_WML_MASK);
959 /* Change RX_DMA_LENGTH trigger dma fetch tail data */
960 left = transfer->len % spi_imx->wml;
961 if (left)
962 writel(dma | (left << MX51_ECSPI_DMA_RXT_WML_OFFSET),
963 spi_imx->base + MX51_ECSPI_DMA);
964 /* 955 /*
965 * Set these order to avoid potential RX overflow. The overflow may 956 * Set these order to avoid potential RX overflow. The overflow may
966 * happen if we enable SPI HW before starting RX DMA due to rescheduling 957 * happen if we enable SPI HW before starting RX DMA due to rescheduling
@@ -992,10 +983,6 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
992 spi_imx->devtype_data->reset(spi_imx); 983 spi_imx->devtype_data->reset(spi_imx);
993 dmaengine_terminate_all(master->dma_rx); 984 dmaengine_terminate_all(master->dma_rx);
994 } 985 }
995 dma &= ~MX51_ECSPI_DMA_RXT_WML_MASK;
996 writel(dma |
997 spi_imx->wml << MX51_ECSPI_DMA_RXT_WML_OFFSET,
998 spi_imx->base + MX51_ECSPI_DMA);
999 } 986 }
1000 987
1001 spi_imx->dma_finished = 1; 988 spi_imx->dma_finished = 1;
@@ -1008,7 +995,9 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
1008 995
1009 return ret; 996 return ret;
1010 997
1011no_dma: 998rx_nodma:
999 dmaengine_terminate_all(master->dma_tx);
1000tx_nodma:
1012 pr_warn_once("%s %s: DMA not available, falling back to PIO\n", 1001 pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
1013 dev_driver_string(&master->dev), 1002 dev_driver_string(&master->dev),
1014 dev_name(&master->dev)); 1003 dev_name(&master->dev));
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index 894616f687b0..cf4bb36bee25 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -761,6 +761,7 @@ static int spi_test_run_iter(struct spi_device *spi,
761 test.iterate_transfer_mask = 1; 761 test.iterate_transfer_mask = 1;
762 762
763 /* count number of transfers with tx/rx_buf != NULL */ 763 /* count number of transfers with tx/rx_buf != NULL */
764 rx_count = tx_count = 0;
764 for (i = 0; i < test.transfer_count; i++) { 765 for (i = 0; i < test.transfer_count; i++) {
765 if (test.transfers[i].tx_buf) 766 if (test.transfers[i].tx_buf)
766 tx_count++; 767 tx_count++;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 7273820275e9..0caa3c8bef46 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1490,6 +1490,8 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
1490 return status; 1490 return status;
1491 1491
1492disable_pm: 1492disable_pm:
1493 pm_runtime_dont_use_autosuspend(&pdev->dev);
1494 pm_runtime_put_sync(&pdev->dev);
1493 pm_runtime_disable(&pdev->dev); 1495 pm_runtime_disable(&pdev->dev);
1494free_master: 1496free_master:
1495 spi_master_put(master); 1497 spi_master_put(master);
@@ -1501,6 +1503,7 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
1501 struct spi_master *master = platform_get_drvdata(pdev); 1503 struct spi_master *master = platform_get_drvdata(pdev);
1502 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 1504 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
1503 1505
1506 pm_runtime_dont_use_autosuspend(mcspi->dev);
1504 pm_runtime_put_sync(mcspi->dev); 1507 pm_runtime_put_sync(mcspi->dev);
1505 pm_runtime_disable(&pdev->dev); 1508 pm_runtime_disable(&pdev->dev);
1506 1509
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 79a8bc4f6cec..7cb1b2d710c1 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -749,6 +749,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
749 return 0; 749 return 0;
750 750
751err_register_master: 751err_register_master:
752 pm_runtime_disable(&pdev->dev);
752 if (rs->dma_tx.ch) 753 if (rs->dma_tx.ch)
753 dma_release_channel(rs->dma_tx.ch); 754 dma_release_channel(rs->dma_tx.ch);
754 if (rs->dma_rx.ch) 755 if (rs->dma_rx.ch)
@@ -778,6 +779,8 @@ static int rockchip_spi_remove(struct platform_device *pdev)
778 if (rs->dma_rx.ch) 779 if (rs->dma_rx.ch)
779 dma_release_channel(rs->dma_rx.ch); 780 dma_release_channel(rs->dma_rx.ch);
780 781
782 spi_master_put(master);
783
781 return 0; 784 return 0;
782} 785}
783 786
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 0c675861623f..d8e4219c2324 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -83,6 +83,7 @@ config SSB_SDIOHOST
83config SSB_HOST_SOC 83config SSB_HOST_SOC
84 bool "Support for SSB bus on SoC" 84 bool "Support for SSB bus on SoC"
85 depends on SSB && BCM47XX_NVRAM 85 depends on SSB && BCM47XX_NVRAM
86 select SSB_SPROM
86 help 87 help
87 Host interface for a SSB directly mapped into memory. This is 88 Host interface for a SSB directly mapped into memory. This is
88 for some Broadcom SoCs from the BCM47xx and BCM53xx lines. 89 for some Broadcom SoCs from the BCM47xx and BCM53xx lines.
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index cde5ff7529eb..d1a750760cf3 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -613,9 +613,10 @@ out:
613 return err; 613 return err;
614} 614}
615 615
616static int ssb_bus_register(struct ssb_bus *bus, 616static int __maybe_unused
617 ssb_invariants_func_t get_invariants, 617ssb_bus_register(struct ssb_bus *bus,
618 unsigned long baseaddr) 618 ssb_invariants_func_t get_invariants,
619 unsigned long baseaddr)
619{ 620{
620 int err; 621 int err;
621 622
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index 58d4517e1836..b9519be90fda 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -6,6 +6,7 @@ menu "Analog to digital converters"
6config AD7606 6config AD7606
7 tristate "Analog Devices AD7606 ADC driver" 7 tristate "Analog Devices AD7606 ADC driver"
8 depends on GPIOLIB || COMPILE_TEST 8 depends on GPIOLIB || COMPILE_TEST
9 depends on HAS_IOMEM
9 select IIO_BUFFER 10 select IIO_BUFFER
10 select IIO_TRIGGERED_BUFFER 11 select IIO_TRIGGERED_BUFFER
11 help 12 help
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index f129039bece3..69287108f793 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -217,8 +217,12 @@ error_ret:
217static int ade7753_reset(struct device *dev) 217static int ade7753_reset(struct device *dev)
218{ 218{
219 u16 val; 219 u16 val;
220 int ret;
221
222 ret = ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
223 if (ret)
224 return ret;
220 225
221 ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
222 val |= BIT(6); /* Software Chip Reset */ 226 val |= BIT(6); /* Software Chip Reset */
223 227
224 return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val); 228 return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val);
@@ -343,8 +347,12 @@ error_ret:
343static int ade7753_stop_device(struct device *dev) 347static int ade7753_stop_device(struct device *dev)
344{ 348{
345 u16 val; 349 u16 val;
350 int ret;
351
352 ret = ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
353 if (ret)
354 return ret;
346 355
347 ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
348 val |= BIT(4); /* AD converters can be turned off */ 356 val |= BIT(4); /* AD converters can be turned off */
349 357
350 return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val); 358 return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val);
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index 79ac19246548..70b8f4fabfad 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -825,8 +825,7 @@ static void lcd_write_cmd_s(int cmd)
825 lcd_send_serial(0x1F); /* R/W=W, RS=0 */ 825 lcd_send_serial(0x1F); /* R/W=W, RS=0 */
826 lcd_send_serial(cmd & 0x0F); 826 lcd_send_serial(cmd & 0x0F);
827 lcd_send_serial((cmd >> 4) & 0x0F); 827 lcd_send_serial((cmd >> 4) & 0x0F);
828 /* the shortest command takes at least 40 us */ 828 udelay(40); /* the shortest command takes at least 40 us */
829 usleep_range(40, 100);
830 spin_unlock_irq(&pprt_lock); 829 spin_unlock_irq(&pprt_lock);
831} 830}
832 831
@@ -837,8 +836,7 @@ static void lcd_write_data_s(int data)
837 lcd_send_serial(0x5F); /* R/W=W, RS=1 */ 836 lcd_send_serial(0x5F); /* R/W=W, RS=1 */
838 lcd_send_serial(data & 0x0F); 837 lcd_send_serial(data & 0x0F);
839 lcd_send_serial((data >> 4) & 0x0F); 838 lcd_send_serial((data >> 4) & 0x0F);
840 /* the shortest data takes at least 40 us */ 839 udelay(40); /* the shortest data takes at least 40 us */
841 usleep_range(40, 100);
842 spin_unlock_irq(&pprt_lock); 840 spin_unlock_irq(&pprt_lock);
843} 841}
844 842
@@ -848,20 +846,19 @@ static void lcd_write_cmd_p8(int cmd)
848 spin_lock_irq(&pprt_lock); 846 spin_lock_irq(&pprt_lock);
849 /* present the data to the data port */ 847 /* present the data to the data port */
850 w_dtr(pprt, cmd); 848 w_dtr(pprt, cmd);
851 /* maintain the data during 20 us before the strobe */ 849 udelay(20); /* maintain the data during 20 us before the strobe */
852 usleep_range(20, 100);
853 850
854 bits.e = BIT_SET; 851 bits.e = BIT_SET;
855 bits.rs = BIT_CLR; 852 bits.rs = BIT_CLR;
856 bits.rw = BIT_CLR; 853 bits.rw = BIT_CLR;
857 set_ctrl_bits(); 854 set_ctrl_bits();
858 855
859 usleep_range(40, 100); /* maintain the strobe during 40 us */ 856 udelay(40); /* maintain the strobe during 40 us */
860 857
861 bits.e = BIT_CLR; 858 bits.e = BIT_CLR;
862 set_ctrl_bits(); 859 set_ctrl_bits();
863 860
864 usleep_range(120, 500); /* the shortest command takes at least 120 us */ 861 udelay(120); /* the shortest command takes at least 120 us */
865 spin_unlock_irq(&pprt_lock); 862 spin_unlock_irq(&pprt_lock);
866} 863}
867 864
@@ -871,20 +868,19 @@ static void lcd_write_data_p8(int data)
871 spin_lock_irq(&pprt_lock); 868 spin_lock_irq(&pprt_lock);
872 /* present the data to the data port */ 869 /* present the data to the data port */
873 w_dtr(pprt, data); 870 w_dtr(pprt, data);
874 /* maintain the data during 20 us before the strobe */ 871 udelay(20); /* maintain the data during 20 us before the strobe */
875 usleep_range(20, 100);
876 872
877 bits.e = BIT_SET; 873 bits.e = BIT_SET;
878 bits.rs = BIT_SET; 874 bits.rs = BIT_SET;
879 bits.rw = BIT_CLR; 875 bits.rw = BIT_CLR;
880 set_ctrl_bits(); 876 set_ctrl_bits();
881 877
882 usleep_range(40, 100); /* maintain the strobe during 40 us */ 878 udelay(40); /* maintain the strobe during 40 us */
883 879
884 bits.e = BIT_CLR; 880 bits.e = BIT_CLR;
885 set_ctrl_bits(); 881 set_ctrl_bits();
886 882
887 usleep_range(45, 100); /* the shortest data takes at least 45 us */ 883 udelay(45); /* the shortest data takes at least 45 us */
888 spin_unlock_irq(&pprt_lock); 884 spin_unlock_irq(&pprt_lock);
889} 885}
890 886
@@ -894,7 +890,7 @@ static void lcd_write_cmd_tilcd(int cmd)
894 spin_lock_irq(&pprt_lock); 890 spin_lock_irq(&pprt_lock);
895 /* present the data to the control port */ 891 /* present the data to the control port */
896 w_ctr(pprt, cmd); 892 w_ctr(pprt, cmd);
897 usleep_range(60, 120); 893 udelay(60);
898 spin_unlock_irq(&pprt_lock); 894 spin_unlock_irq(&pprt_lock);
899} 895}
900 896
@@ -904,7 +900,7 @@ static void lcd_write_data_tilcd(int data)
904 spin_lock_irq(&pprt_lock); 900 spin_lock_irq(&pprt_lock);
905 /* present the data to the data port */ 901 /* present the data to the data port */
906 w_dtr(pprt, data); 902 w_dtr(pprt, data);
907 usleep_range(60, 120); 903 udelay(60);
908 spin_unlock_irq(&pprt_lock); 904 spin_unlock_irq(&pprt_lock);
909} 905}
910 906
@@ -947,7 +943,7 @@ static void lcd_clear_fast_s(void)
947 lcd_send_serial(0x5F); /* R/W=W, RS=1 */ 943 lcd_send_serial(0x5F); /* R/W=W, RS=1 */
948 lcd_send_serial(' ' & 0x0F); 944 lcd_send_serial(' ' & 0x0F);
949 lcd_send_serial((' ' >> 4) & 0x0F); 945 lcd_send_serial((' ' >> 4) & 0x0F);
950 usleep_range(40, 100); /* the shortest data takes at least 40 us */ 946 udelay(40); /* the shortest data takes at least 40 us */
951 } 947 }
952 spin_unlock_irq(&pprt_lock); 948 spin_unlock_irq(&pprt_lock);
953 949
@@ -971,7 +967,7 @@ static void lcd_clear_fast_p8(void)
971 w_dtr(pprt, ' '); 967 w_dtr(pprt, ' ');
972 968
973 /* maintain the data during 20 us before the strobe */ 969 /* maintain the data during 20 us before the strobe */
974 usleep_range(20, 100); 970 udelay(20);
975 971
976 bits.e = BIT_SET; 972 bits.e = BIT_SET;
977 bits.rs = BIT_SET; 973 bits.rs = BIT_SET;
@@ -979,13 +975,13 @@ static void lcd_clear_fast_p8(void)
979 set_ctrl_bits(); 975 set_ctrl_bits();
980 976
981 /* maintain the strobe during 40 us */ 977 /* maintain the strobe during 40 us */
982 usleep_range(40, 100); 978 udelay(40);
983 979
984 bits.e = BIT_CLR; 980 bits.e = BIT_CLR;
985 set_ctrl_bits(); 981 set_ctrl_bits();
986 982
987 /* the shortest data takes at least 45 us */ 983 /* the shortest data takes at least 45 us */
988 usleep_range(45, 100); 984 udelay(45);
989 } 985 }
990 spin_unlock_irq(&pprt_lock); 986 spin_unlock_irq(&pprt_lock);
991 987
@@ -1007,7 +1003,7 @@ static void lcd_clear_fast_tilcd(void)
1007 for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) { 1003 for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
1008 /* present the data to the data port */ 1004 /* present the data to the data port */
1009 w_dtr(pprt, ' '); 1005 w_dtr(pprt, ' ');
1010 usleep_range(60, 120); 1006 udelay(60);
1011 } 1007 }
1012 1008
1013 spin_unlock_irq(&pprt_lock); 1009 spin_unlock_irq(&pprt_lock);
diff --git a/drivers/staging/rdma/Kconfig b/drivers/staging/rdma/Kconfig
index ba8765063174..f1f3ecadf0fb 100644
--- a/drivers/staging/rdma/Kconfig
+++ b/drivers/staging/rdma/Kconfig
@@ -22,12 +22,6 @@ menuconfig STAGING_RDMA
22# Please keep entries in alphabetic order 22# Please keep entries in alphabetic order
23if STAGING_RDMA 23if STAGING_RDMA
24 24
25source "drivers/staging/rdma/amso1100/Kconfig"
26
27source "drivers/staging/rdma/ehca/Kconfig"
28
29source "drivers/staging/rdma/hfi1/Kconfig" 25source "drivers/staging/rdma/hfi1/Kconfig"
30 26
31source "drivers/staging/rdma/ipath/Kconfig"
32
33endif 27endif
diff --git a/drivers/staging/rdma/Makefile b/drivers/staging/rdma/Makefile
index 139d78ef2c24..8c7fc1de48a7 100644
--- a/drivers/staging/rdma/Makefile
+++ b/drivers/staging/rdma/Makefile
@@ -1,5 +1,2 @@
1# Entries for RDMA_STAGING tree 1# Entries for RDMA_STAGING tree
2obj-$(CONFIG_INFINIBAND_AMSO1100) += amso1100/
3obj-$(CONFIG_INFINIBAND_EHCA) += ehca/
4obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/ 2obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
5obj-$(CONFIG_INFINIBAND_IPATH) += ipath/
diff --git a/drivers/staging/rdma/amso1100/Kbuild b/drivers/staging/rdma/amso1100/Kbuild
deleted file mode 100644
index 950dfabcd89d..000000000000
--- a/drivers/staging/rdma/amso1100/Kbuild
+++ /dev/null
@@ -1,6 +0,0 @@
1ccflags-$(CONFIG_INFINIBAND_AMSO1100_DEBUG) := -DDEBUG
2
3obj-$(CONFIG_INFINIBAND_AMSO1100) += iw_c2.o
4
5iw_c2-y := c2.o c2_provider.o c2_rnic.o c2_alloc.o c2_mq.o c2_ae.o c2_vq.o \
6 c2_intr.o c2_cq.o c2_qp.o c2_cm.o c2_mm.o c2_pd.o
diff --git a/drivers/staging/rdma/amso1100/Kconfig b/drivers/staging/rdma/amso1100/Kconfig
deleted file mode 100644
index e6ce5f209e47..000000000000
--- a/drivers/staging/rdma/amso1100/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
1config INFINIBAND_AMSO1100
2 tristate "Ammasso 1100 HCA support"
3 depends on PCI && INET
4 ---help---
5 This is a low-level driver for the Ammasso 1100 host
6 channel adapter (HCA).
7
8config INFINIBAND_AMSO1100_DEBUG
9 bool "Verbose debugging output"
10 depends on INFINIBAND_AMSO1100
11 default n
12 ---help---
13 This option causes the amso1100 driver to produce a bunch of
14 debug messages. Select this if you are developing the driver
15 or trying to diagnose a problem.
diff --git a/drivers/staging/rdma/amso1100/TODO b/drivers/staging/rdma/amso1100/TODO
deleted file mode 100644
index 18b00a5cb549..000000000000
--- a/drivers/staging/rdma/amso1100/TODO
+++ /dev/null
@@ -1,4 +0,0 @@
17/2015
2
3The amso1100 driver has been deprecated and moved to drivers/staging.
4It will be removed in the 4.6 merge window.
diff --git a/drivers/staging/rdma/amso1100/c2.c b/drivers/staging/rdma/amso1100/c2.c
deleted file mode 100644
index b46ebd1ae15a..000000000000
--- a/drivers/staging/rdma/amso1100/c2.c
+++ /dev/null
@@ -1,1240 +0,0 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include <linux/module.h>
34#include <linux/moduleparam.h>
35#include <linux/pci.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/inetdevice.h>
39#include <linux/interrupt.h>
40#include <linux/delay.h>
41#include <linux/ethtool.h>
42#include <linux/mii.h>
43#include <linux/if_vlan.h>
44#include <linux/crc32.h>
45#include <linux/in.h>
46#include <linux/ip.h>
47#include <linux/tcp.h>
48#include <linux/init.h>
49#include <linux/dma-mapping.h>
50#include <linux/slab.h>
51#include <linux/prefetch.h>
52
53#include <asm/io.h>
54#include <asm/irq.h>
55#include <asm/byteorder.h>
56
57#include <rdma/ib_smi.h>
58#include "c2.h"
59#include "c2_provider.h"
60
61MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
62MODULE_DESCRIPTION("Ammasso AMSO1100 Low-level iWARP Driver");
63MODULE_LICENSE("Dual BSD/GPL");
64MODULE_VERSION(DRV_VERSION);
65
66static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
67 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
68
69static int debug = -1; /* defaults above */
70module_param(debug, int, 0);
71MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
72
73static int c2_up(struct net_device *netdev);
74static int c2_down(struct net_device *netdev);
75static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
76static void c2_tx_interrupt(struct net_device *netdev);
77static void c2_rx_interrupt(struct net_device *netdev);
78static irqreturn_t c2_interrupt(int irq, void *dev_id);
79static void c2_tx_timeout(struct net_device *netdev);
80static int c2_change_mtu(struct net_device *netdev, int new_mtu);
81static void c2_reset(struct c2_port *c2_port);
82
83static struct pci_device_id c2_pci_table[] = {
84 { PCI_DEVICE(0x18b8, 0xb001) },
85 { 0 }
86};
87
88MODULE_DEVICE_TABLE(pci, c2_pci_table);
89
90static void c2_set_rxbufsize(struct c2_port *c2_port)
91{
92 struct net_device *netdev = c2_port->netdev;
93
94 if (netdev->mtu > RX_BUF_SIZE)
95 c2_port->rx_buf_size =
96 netdev->mtu + ETH_HLEN + sizeof(struct c2_rxp_hdr) +
97 NET_IP_ALIGN;
98 else
99 c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE;
100}
101
102/*
103 * Allocate TX ring elements and chain them together.
104 * One-to-one association of adapter descriptors with ring elements.
105 */
106static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,
107 dma_addr_t base, void __iomem * mmio_txp_ring)
108{
109 struct c2_tx_desc *tx_desc;
110 struct c2_txp_desc __iomem *txp_desc;
111 struct c2_element *elem;
112 int i;
113
114 tx_ring->start = kmalloc_array(tx_ring->count, sizeof(*elem),
115 GFP_KERNEL);
116 if (!tx_ring->start)
117 return -ENOMEM;
118
119 elem = tx_ring->start;
120 tx_desc = vaddr;
121 txp_desc = mmio_txp_ring;
122 for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) {
123 tx_desc->len = 0;
124 tx_desc->status = 0;
125
126 /* Set TXP_HTXD_UNINIT */
127 __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
128 (void __iomem *) txp_desc + C2_TXP_ADDR);
129 __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);
130 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
131 (void __iomem *) txp_desc + C2_TXP_FLAGS);
132
133 elem->skb = NULL;
134 elem->ht_desc = tx_desc;
135 elem->hw_desc = txp_desc;
136
137 if (i == tx_ring->count - 1) {
138 elem->next = tx_ring->start;
139 tx_desc->next_offset = base;
140 } else {
141 elem->next = elem + 1;
142 tx_desc->next_offset =
143 base + (i + 1) * sizeof(*tx_desc);
144 }
145 }
146
147 tx_ring->to_use = tx_ring->to_clean = tx_ring->start;
148
149 return 0;
150}
151
152/*
153 * Allocate RX ring elements and chain them together.
154 * One-to-one association of adapter descriptors with ring elements.
155 */
156static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,
157 dma_addr_t base, void __iomem * mmio_rxp_ring)
158{
159 struct c2_rx_desc *rx_desc;
160 struct c2_rxp_desc __iomem *rxp_desc;
161 struct c2_element *elem;
162 int i;
163
164 rx_ring->start = kmalloc_array(rx_ring->count, sizeof(*elem),
165 GFP_KERNEL);
166 if (!rx_ring->start)
167 return -ENOMEM;
168
169 elem = rx_ring->start;
170 rx_desc = vaddr;
171 rxp_desc = mmio_rxp_ring;
172 for (i = 0; i < rx_ring->count; i++, elem++, rx_desc++, rxp_desc++) {
173 rx_desc->len = 0;
174 rx_desc->status = 0;
175
176 /* Set RXP_HRXD_UNINIT */
177 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_OK),
178 (void __iomem *) rxp_desc + C2_RXP_STATUS);
179 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);
180 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);
181 __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
182 (void __iomem *) rxp_desc + C2_RXP_ADDR);
183 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
184 (void __iomem *) rxp_desc + C2_RXP_FLAGS);
185
186 elem->skb = NULL;
187 elem->ht_desc = rx_desc;
188 elem->hw_desc = rxp_desc;
189
190 if (i == rx_ring->count - 1) {
191 elem->next = rx_ring->start;
192 rx_desc->next_offset = base;
193 } else {
194 elem->next = elem + 1;
195 rx_desc->next_offset =
196 base + (i + 1) * sizeof(*rx_desc);
197 }
198 }
199
200 rx_ring->to_use = rx_ring->to_clean = rx_ring->start;
201
202 return 0;
203}
204
205/* Setup buffer for receiving */
206static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
207{
208 struct c2_dev *c2dev = c2_port->c2dev;
209 struct c2_rx_desc *rx_desc = elem->ht_desc;
210 struct sk_buff *skb;
211 dma_addr_t mapaddr;
212 u32 maplen;
213 struct c2_rxp_hdr *rxp_hdr;
214
215 skb = dev_alloc_skb(c2_port->rx_buf_size);
216 if (unlikely(!skb)) {
217 pr_debug("%s: out of memory for receive\n",
218 c2_port->netdev->name);
219 return -ENOMEM;
220 }
221
222 /* Zero out the rxp hdr in the sk_buff */
223 memset(skb->data, 0, sizeof(*rxp_hdr));
224
225 skb->dev = c2_port->netdev;
226
227 maplen = c2_port->rx_buf_size;
228 mapaddr =
229 pci_map_single(c2dev->pcidev, skb->data, maplen,
230 PCI_DMA_FROMDEVICE);
231
232 /* Set the sk_buff RXP_header to RXP_HRXD_READY */
233 rxp_hdr = (struct c2_rxp_hdr *) skb->data;
234 rxp_hdr->flags = RXP_HRXD_READY;
235
236 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
237 __raw_writew((__force u16) cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
238 elem->hw_desc + C2_RXP_LEN);
239 __raw_writeq((__force u64) cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
240 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
241 elem->hw_desc + C2_RXP_FLAGS);
242
243 elem->skb = skb;
244 elem->mapaddr = mapaddr;
245 elem->maplen = maplen;
246 rx_desc->len = maplen;
247
248 return 0;
249}
250
251/*
252 * Allocate buffers for the Rx ring
253 * For receive: rx_ring.to_clean is next received frame
254 */
255static int c2_rx_fill(struct c2_port *c2_port)
256{
257 struct c2_ring *rx_ring = &c2_port->rx_ring;
258 struct c2_element *elem;
259 int ret = 0;
260
261 elem = rx_ring->start;
262 do {
263 if (c2_rx_alloc(c2_port, elem)) {
264 ret = 1;
265 break;
266 }
267 } while ((elem = elem->next) != rx_ring->start);
268
269 rx_ring->to_clean = rx_ring->start;
270 return ret;
271}
272
273/* Free all buffers in RX ring, assumes receiver stopped */
274static void c2_rx_clean(struct c2_port *c2_port)
275{
276 struct c2_dev *c2dev = c2_port->c2dev;
277 struct c2_ring *rx_ring = &c2_port->rx_ring;
278 struct c2_element *elem;
279 struct c2_rx_desc *rx_desc;
280
281 elem = rx_ring->start;
282 do {
283 rx_desc = elem->ht_desc;
284 rx_desc->len = 0;
285
286 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
287 __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
288 __raw_writew(0, elem->hw_desc + C2_RXP_LEN);
289 __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
290 elem->hw_desc + C2_RXP_ADDR);
291 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
292 elem->hw_desc + C2_RXP_FLAGS);
293
294 if (elem->skb) {
295 pci_unmap_single(c2dev->pcidev, elem->mapaddr,
296 elem->maplen, PCI_DMA_FROMDEVICE);
297 dev_kfree_skb(elem->skb);
298 elem->skb = NULL;
299 }
300 } while ((elem = elem->next) != rx_ring->start);
301}
302
303static inline int c2_tx_free(struct c2_dev *c2dev, struct c2_element *elem)
304{
305 struct c2_tx_desc *tx_desc = elem->ht_desc;
306
307 tx_desc->len = 0;
308
309 pci_unmap_single(c2dev->pcidev, elem->mapaddr, elem->maplen,
310 PCI_DMA_TODEVICE);
311
312 if (elem->skb) {
313 dev_kfree_skb_any(elem->skb);
314 elem->skb = NULL;
315 }
316
317 return 0;
318}
319
320/* Free all buffers in TX ring, assumes transmitter stopped */
321static void c2_tx_clean(struct c2_port *c2_port)
322{
323 struct c2_ring *tx_ring = &c2_port->tx_ring;
324 struct c2_element *elem;
325 struct c2_txp_desc txp_htxd;
326 int retry;
327 unsigned long flags;
328
329 spin_lock_irqsave(&c2_port->tx_lock, flags);
330
331 elem = tx_ring->start;
332
333 do {
334 retry = 0;
335 do {
336 txp_htxd.flags =
337 readw(elem->hw_desc + C2_TXP_FLAGS);
338
339 if (txp_htxd.flags == TXP_HTXD_READY) {
340 retry = 1;
341 __raw_writew(0,
342 elem->hw_desc + C2_TXP_LEN);
343 __raw_writeq(0,
344 elem->hw_desc + C2_TXP_ADDR);
345 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_DONE),
346 elem->hw_desc + C2_TXP_FLAGS);
347 c2_port->netdev->stats.tx_dropped++;
348 break;
349 } else {
350 __raw_writew(0,
351 elem->hw_desc + C2_TXP_LEN);
352 __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
353 elem->hw_desc + C2_TXP_ADDR);
354 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
355 elem->hw_desc + C2_TXP_FLAGS);
356 }
357
358 c2_tx_free(c2_port->c2dev, elem);
359
360 } while ((elem = elem->next) != tx_ring->start);
361 } while (retry);
362
363 c2_port->tx_avail = c2_port->tx_ring.count - 1;
364 c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start;
365
366 if (c2_port->tx_avail > MAX_SKB_FRAGS + 1)
367 netif_wake_queue(c2_port->netdev);
368
369 spin_unlock_irqrestore(&c2_port->tx_lock, flags);
370}
371
372/*
373 * Process transmit descriptors marked 'DONE' by the firmware,
374 * freeing up their unneeded sk_buffs.
375 */
376static void c2_tx_interrupt(struct net_device *netdev)
377{
378 struct c2_port *c2_port = netdev_priv(netdev);
379 struct c2_dev *c2dev = c2_port->c2dev;
380 struct c2_ring *tx_ring = &c2_port->tx_ring;
381 struct c2_element *elem;
382 struct c2_txp_desc txp_htxd;
383
384 spin_lock(&c2_port->tx_lock);
385
386 for (elem = tx_ring->to_clean; elem != tx_ring->to_use;
387 elem = elem->next) {
388 txp_htxd.flags =
389 be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_FLAGS));
390
391 if (txp_htxd.flags != TXP_HTXD_DONE)
392 break;
393
394 if (netif_msg_tx_done(c2_port)) {
395 /* PCI reads are expensive in fast path */
396 txp_htxd.len =
397 be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_LEN));
398 pr_debug("%s: tx done slot %3Zu status 0x%x len "
399 "%5u bytes\n",
400 netdev->name, elem - tx_ring->start,
401 txp_htxd.flags, txp_htxd.len);
402 }
403
404 c2_tx_free(c2dev, elem);
405 ++(c2_port->tx_avail);
406 }
407
408 tx_ring->to_clean = elem;
409
410 if (netif_queue_stopped(netdev)
411 && c2_port->tx_avail > MAX_SKB_FRAGS + 1)
412 netif_wake_queue(netdev);
413
414 spin_unlock(&c2_port->tx_lock);
415}
416
417static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
418{
419 struct c2_rx_desc *rx_desc = elem->ht_desc;
420 struct c2_rxp_hdr *rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
421
422 if (rxp_hdr->status != RXP_HRXD_OK ||
423 rxp_hdr->len > (rx_desc->len - sizeof(*rxp_hdr))) {
424 pr_debug("BAD RXP_HRXD\n");
425 pr_debug(" rx_desc : %p\n", rx_desc);
426 pr_debug(" index : %Zu\n",
427 elem - c2_port->rx_ring.start);
428 pr_debug(" len : %u\n", rx_desc->len);
429 pr_debug(" rxp_hdr : %p [PA %p]\n", rxp_hdr,
430 (void *) __pa((unsigned long) rxp_hdr));
431 pr_debug(" flags : 0x%x\n", rxp_hdr->flags);
432 pr_debug(" status: 0x%x\n", rxp_hdr->status);
433 pr_debug(" len : %u\n", rxp_hdr->len);
434 pr_debug(" rsvd : 0x%x\n", rxp_hdr->rsvd);
435 }
436
437 /* Setup the skb for reuse since we're dropping this pkt */
438 elem->skb->data = elem->skb->head;
439 skb_reset_tail_pointer(elem->skb);
440
441 /* Zero out the rxp hdr in the sk_buff */
442 memset(elem->skb->data, 0, sizeof(*rxp_hdr));
443
444 /* Write the descriptor to the adapter's rx ring */
445 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
446 __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
447 __raw_writew((__force u16) cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
448 elem->hw_desc + C2_RXP_LEN);
449 __raw_writeq((__force u64) cpu_to_be64(elem->mapaddr),
450 elem->hw_desc + C2_RXP_ADDR);
451 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
452 elem->hw_desc + C2_RXP_FLAGS);
453
454 pr_debug("packet dropped\n");
455 c2_port->netdev->stats.rx_dropped++;
456}
457
458static void c2_rx_interrupt(struct net_device *netdev)
459{
460 struct c2_port *c2_port = netdev_priv(netdev);
461 struct c2_dev *c2dev = c2_port->c2dev;
462 struct c2_ring *rx_ring = &c2_port->rx_ring;
463 struct c2_element *elem;
464 struct c2_rx_desc *rx_desc;
465 struct c2_rxp_hdr *rxp_hdr;
466 struct sk_buff *skb;
467 dma_addr_t mapaddr;
468 u32 maplen, buflen;
469 unsigned long flags;
470
471 spin_lock_irqsave(&c2dev->lock, flags);
472
473 /* Begin where we left off */
474 rx_ring->to_clean = rx_ring->start + c2dev->cur_rx;
475
476 for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean;
477 elem = elem->next) {
478 rx_desc = elem->ht_desc;
479 mapaddr = elem->mapaddr;
480 maplen = elem->maplen;
481 skb = elem->skb;
482 rxp_hdr = (struct c2_rxp_hdr *) skb->data;
483
484 if (rxp_hdr->flags != RXP_HRXD_DONE)
485 break;
486 buflen = rxp_hdr->len;
487
488 /* Sanity check the RXP header */
489 if (rxp_hdr->status != RXP_HRXD_OK ||
490 buflen > (rx_desc->len - sizeof(*rxp_hdr))) {
491 c2_rx_error(c2_port, elem);
492 continue;
493 }
494
495 /*
496 * Allocate and map a new skb for replenishing the host
497 * RX desc
498 */
499 if (c2_rx_alloc(c2_port, elem)) {
500 c2_rx_error(c2_port, elem);
501 continue;
502 }
503
504 /* Unmap the old skb */
505 pci_unmap_single(c2dev->pcidev, mapaddr, maplen,
506 PCI_DMA_FROMDEVICE);
507
508 prefetch(skb->data);
509
510 /*
511 * Skip past the leading 8 bytes comprising of the
512 * "struct c2_rxp_hdr", prepended by the adapter
513 * to the usual Ethernet header ("struct ethhdr"),
514 * to the start of the raw Ethernet packet.
515 *
516 * Fix up the various fields in the sk_buff before
517 * passing it up to netif_rx(). The transfer size
518 * (in bytes) specified by the adapter len field of
519 * the "struct rxp_hdr_t" does NOT include the
520 * "sizeof(struct c2_rxp_hdr)".
521 */
522 skb->data += sizeof(*rxp_hdr);
523 skb_set_tail_pointer(skb, buflen);
524 skb->len = buflen;
525 skb->protocol = eth_type_trans(skb, netdev);
526
527 netif_rx(skb);
528
529 netdev->stats.rx_packets++;
530 netdev->stats.rx_bytes += buflen;
531 }
532
533 /* Save where we left off */
534 rx_ring->to_clean = elem;
535 c2dev->cur_rx = elem - rx_ring->start;
536 C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
537
538 spin_unlock_irqrestore(&c2dev->lock, flags);
539}
540
541/*
542 * Handle netisr0 TX & RX interrupts.
543 */
544static irqreturn_t c2_interrupt(int irq, void *dev_id)
545{
546 unsigned int netisr0, dmaisr;
547 int handled = 0;
548 struct c2_dev *c2dev = dev_id;
549
550 /* Process CCILNET interrupts */
551 netisr0 = readl(c2dev->regs + C2_NISR0);
552 if (netisr0) {
553
554 /*
555 * There is an issue with the firmware that always
556 * provides the status of RX for both TX & RX
557 * interrupts. So process both queues here.
558 */
559 c2_rx_interrupt(c2dev->netdev);
560 c2_tx_interrupt(c2dev->netdev);
561
562 /* Clear the interrupt */
563 writel(netisr0, c2dev->regs + C2_NISR0);
564 handled++;
565 }
566
567 /* Process RNIC interrupts */
568 dmaisr = readl(c2dev->regs + C2_DISR);
569 if (dmaisr) {
570 writel(dmaisr, c2dev->regs + C2_DISR);
571 c2_rnic_interrupt(c2dev);
572 handled++;
573 }
574
575 if (handled) {
576 return IRQ_HANDLED;
577 } else {
578 return IRQ_NONE;
579 }
580}
581
582static int c2_up(struct net_device *netdev)
583{
584 struct c2_port *c2_port = netdev_priv(netdev);
585 struct c2_dev *c2dev = c2_port->c2dev;
586 struct c2_element *elem;
587 struct c2_rxp_hdr *rxp_hdr;
588 struct in_device *in_dev;
589 size_t rx_size, tx_size;
590 int ret, i;
591 unsigned int netimr0;
592
593 if (netif_msg_ifup(c2_port))
594 pr_debug("%s: enabling interface\n", netdev->name);
595
596 /* Set the Rx buffer size based on MTU */
597 c2_set_rxbufsize(c2_port);
598
599 /* Allocate DMA'able memory for Tx/Rx host descriptor rings */
600 rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc);
601 tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);
602
603 c2_port->mem_size = tx_size + rx_size;
604 c2_port->mem = pci_zalloc_consistent(c2dev->pcidev, c2_port->mem_size,
605 &c2_port->dma);
606 if (c2_port->mem == NULL) {
607 pr_debug("Unable to allocate memory for "
608 "host descriptor rings\n");
609 return -ENOMEM;
610 }
611
612 /* Create the Rx host descriptor ring */
613 if ((ret =
614 c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
615 c2dev->mmio_rxp_ring))) {
616 pr_debug("Unable to create RX ring\n");
617 goto bail0;
618 }
619
620 /* Allocate Rx buffers for the host descriptor ring */
621 if (c2_rx_fill(c2_port)) {
622 pr_debug("Unable to fill RX ring\n");
623 goto bail1;
624 }
625
626 /* Create the Tx host descriptor ring */
627 if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size,
628 c2_port->dma + rx_size,
629 c2dev->mmio_txp_ring))) {
630 pr_debug("Unable to create TX ring\n");
631 goto bail1;
632 }
633
634 /* Set the TX pointer to where we left off */
635 c2_port->tx_avail = c2_port->tx_ring.count - 1;
636 c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean =
637 c2_port->tx_ring.start + c2dev->cur_tx;
638
639 /* missing: Initialize MAC */
640
641 BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean);
642
643 /* Reset the adapter, ensures the driver is in sync with the RXP */
644 c2_reset(c2_port);
645
646 /* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */
647 for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count;
648 i++, elem++) {
649 rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
650 rxp_hdr->flags = 0;
651 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
652 elem->hw_desc + C2_RXP_FLAGS);
653 }
654
655 /* Enable network packets */
656 netif_start_queue(netdev);
657
658 /* Enable IRQ */
659 writel(0, c2dev->regs + C2_IDIS);
660 netimr0 = readl(c2dev->regs + C2_NIMR0);
661 netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT);
662 writel(netimr0, c2dev->regs + C2_NIMR0);
663
664 /* Tell the stack to ignore arp requests for ipaddrs bound to
665 * other interfaces. This is needed to prevent the host stack
666 * from responding to arp requests to the ipaddr bound on the
667 * rdma interface.
668 */
669 in_dev = in_dev_get(netdev);
670 IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1);
671 in_dev_put(in_dev);
672
673 return 0;
674
675bail1:
676 c2_rx_clean(c2_port);
677 kfree(c2_port->rx_ring.start);
678
679bail0:
680 pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
681 c2_port->dma);
682
683 return ret;
684}
685
686static int c2_down(struct net_device *netdev)
687{
688 struct c2_port *c2_port = netdev_priv(netdev);
689 struct c2_dev *c2dev = c2_port->c2dev;
690
691 if (netif_msg_ifdown(c2_port))
692 pr_debug("%s: disabling interface\n",
693 netdev->name);
694
695 /* Wait for all the queued packets to get sent */
696 c2_tx_interrupt(netdev);
697
698 /* Disable network packets */
699 netif_stop_queue(netdev);
700
701 /* Disable IRQs by clearing the interrupt mask */
702 writel(1, c2dev->regs + C2_IDIS);
703 writel(0, c2dev->regs + C2_NIMR0);
704
705 /* missing: Stop transmitter */
706
707 /* missing: Stop receiver */
708
709 /* Reset the adapter, ensures the driver is in sync with the RXP */
710 c2_reset(c2_port);
711
712 /* missing: Turn off LEDs here */
713
714 /* Free all buffers in the host descriptor rings */
715 c2_tx_clean(c2_port);
716 c2_rx_clean(c2_port);
717
718 /* Free the host descriptor rings */
719 kfree(c2_port->rx_ring.start);
720 kfree(c2_port->tx_ring.start);
721 pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
722 c2_port->dma);
723
724 return 0;
725}
726
727static void c2_reset(struct c2_port *c2_port)
728{
729 struct c2_dev *c2dev = c2_port->c2dev;
730 unsigned int cur_rx = c2dev->cur_rx;
731
732 /* Tell the hardware to quiesce */
733 C2_SET_CUR_RX(c2dev, cur_rx | C2_PCI_HRX_QUI);
734
735 /*
736 * The hardware will reset the C2_PCI_HRX_QUI bit once
737 * the RXP is quiesced. Wait 2 seconds for this.
738 */
739 ssleep(2);
740
741 cur_rx = C2_GET_CUR_RX(c2dev);
742
743 if (cur_rx & C2_PCI_HRX_QUI)
744 pr_debug("c2_reset: failed to quiesce the hardware!\n");
745
746 cur_rx &= ~C2_PCI_HRX_QUI;
747
748 c2dev->cur_rx = cur_rx;
749
750 pr_debug("Current RX: %u\n", c2dev->cur_rx);
751}
752
753static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
754{
755 struct c2_port *c2_port = netdev_priv(netdev);
756 struct c2_dev *c2dev = c2_port->c2dev;
757 struct c2_ring *tx_ring = &c2_port->tx_ring;
758 struct c2_element *elem;
759 dma_addr_t mapaddr;
760 u32 maplen;
761 unsigned long flags;
762 unsigned int i;
763
764 spin_lock_irqsave(&c2_port->tx_lock, flags);
765
766 if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) {
767 netif_stop_queue(netdev);
768 spin_unlock_irqrestore(&c2_port->tx_lock, flags);
769
770 pr_debug("%s: Tx ring full when queue awake!\n",
771 netdev->name);
772 return NETDEV_TX_BUSY;
773 }
774
775 maplen = skb_headlen(skb);
776 mapaddr =
777 pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE);
778
779 elem = tx_ring->to_use;
780 elem->skb = skb;
781 elem->mapaddr = mapaddr;
782 elem->maplen = maplen;
783
784 /* Tell HW to xmit */
785 __raw_writeq((__force u64) cpu_to_be64(mapaddr),
786 elem->hw_desc + C2_TXP_ADDR);
787 __raw_writew((__force u16) cpu_to_be16(maplen),
788 elem->hw_desc + C2_TXP_LEN);
789 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
790 elem->hw_desc + C2_TXP_FLAGS);
791
792 netdev->stats.tx_packets++;
793 netdev->stats.tx_bytes += maplen;
794
795 /* Loop thru additional data fragments and queue them */
796 if (skb_shinfo(skb)->nr_frags) {
797 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
798 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
799 maplen = skb_frag_size(frag);
800 mapaddr = skb_frag_dma_map(&c2dev->pcidev->dev, frag,
801 0, maplen, DMA_TO_DEVICE);
802 elem = elem->next;
803 elem->skb = NULL;
804 elem->mapaddr = mapaddr;
805 elem->maplen = maplen;
806
807 /* Tell HW to xmit */
808 __raw_writeq((__force u64) cpu_to_be64(mapaddr),
809 elem->hw_desc + C2_TXP_ADDR);
810 __raw_writew((__force u16) cpu_to_be16(maplen),
811 elem->hw_desc + C2_TXP_LEN);
812 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
813 elem->hw_desc + C2_TXP_FLAGS);
814
815 netdev->stats.tx_packets++;
816 netdev->stats.tx_bytes += maplen;
817 }
818 }
819
820 tx_ring->to_use = elem->next;
821 c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1);
822
823 if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) {
824 netif_stop_queue(netdev);
825 if (netif_msg_tx_queued(c2_port))
826 pr_debug("%s: transmit queue full\n",
827 netdev->name);
828 }
829
830 spin_unlock_irqrestore(&c2_port->tx_lock, flags);
831
832 netdev->trans_start = jiffies;
833
834 return NETDEV_TX_OK;
835}
836
837static void c2_tx_timeout(struct net_device *netdev)
838{
839 struct c2_port *c2_port = netdev_priv(netdev);
840
841 if (netif_msg_timer(c2_port))
842 pr_debug("%s: tx timeout\n", netdev->name);
843
844 c2_tx_clean(c2_port);
845}
846
847static int c2_change_mtu(struct net_device *netdev, int new_mtu)
848{
849 int ret = 0;
850
851 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
852 return -EINVAL;
853
854 netdev->mtu = new_mtu;
855
856 if (netif_running(netdev)) {
857 c2_down(netdev);
858
859 c2_up(netdev);
860 }
861
862 return ret;
863}
864
865static const struct net_device_ops c2_netdev = {
866 .ndo_open = c2_up,
867 .ndo_stop = c2_down,
868 .ndo_start_xmit = c2_xmit_frame,
869 .ndo_tx_timeout = c2_tx_timeout,
870 .ndo_change_mtu = c2_change_mtu,
871 .ndo_set_mac_address = eth_mac_addr,
872 .ndo_validate_addr = eth_validate_addr,
873};
874
875/* Initialize network device */
876static struct net_device *c2_devinit(struct c2_dev *c2dev,
877 void __iomem * mmio_addr)
878{
879 struct c2_port *c2_port = NULL;
880 struct net_device *netdev = alloc_etherdev(sizeof(*c2_port));
881
882 if (!netdev) {
883 pr_debug("c2_port etherdev alloc failed");
884 return NULL;
885 }
886
887 SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
888
889 netdev->netdev_ops = &c2_netdev;
890 netdev->watchdog_timeo = C2_TX_TIMEOUT;
891 netdev->irq = c2dev->pcidev->irq;
892
893 c2_port = netdev_priv(netdev);
894 c2_port->netdev = netdev;
895 c2_port->c2dev = c2dev;
896 c2_port->msg_enable = netif_msg_init(debug, default_msg);
897 c2_port->tx_ring.count = C2_NUM_TX_DESC;
898 c2_port->rx_ring.count = C2_NUM_RX_DESC;
899
900 spin_lock_init(&c2_port->tx_lock);
901
902 /* Copy our 48-bit ethernet hardware address */
903 memcpy_fromio(netdev->dev_addr, mmio_addr + C2_REGS_ENADDR, 6);
904
905 /* Validate the MAC address */
906 if (!is_valid_ether_addr(netdev->dev_addr)) {
907 pr_debug("Invalid MAC Address\n");
908 pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name,
909 netdev->dev_addr, netdev->irq);
910 free_netdev(netdev);
911 return NULL;
912 }
913
914 c2dev->netdev = netdev;
915
916 return netdev;
917}
918
919static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
920{
921 int ret = 0, i;
922 unsigned long reg0_start, reg0_flags, reg0_len;
923 unsigned long reg2_start, reg2_flags, reg2_len;
924 unsigned long reg4_start, reg4_flags, reg4_len;
925 unsigned kva_map_size;
926 struct net_device *netdev = NULL;
927 struct c2_dev *c2dev = NULL;
928 void __iomem *mmio_regs = NULL;
929
930 printk(KERN_INFO PFX "AMSO1100 Gigabit Ethernet driver v%s loaded\n",
931 DRV_VERSION);
932
933 /* Enable PCI device */
934 ret = pci_enable_device(pcidev);
935 if (ret) {
936 printk(KERN_ERR PFX "%s: Unable to enable PCI device\n",
937 pci_name(pcidev));
938 goto bail0;
939 }
940
941 reg0_start = pci_resource_start(pcidev, BAR_0);
942 reg0_len = pci_resource_len(pcidev, BAR_0);
943 reg0_flags = pci_resource_flags(pcidev, BAR_0);
944
945 reg2_start = pci_resource_start(pcidev, BAR_2);
946 reg2_len = pci_resource_len(pcidev, BAR_2);
947 reg2_flags = pci_resource_flags(pcidev, BAR_2);
948
949 reg4_start = pci_resource_start(pcidev, BAR_4);
950 reg4_len = pci_resource_len(pcidev, BAR_4);
951 reg4_flags = pci_resource_flags(pcidev, BAR_4);
952
953 pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len);
954 pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len);
955 pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len);
956
957 /* Make sure PCI base addr are MMIO */
958 if (!(reg0_flags & IORESOURCE_MEM) ||
959 !(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) {
960 printk(KERN_ERR PFX "PCI regions not an MMIO resource\n");
961 ret = -ENODEV;
962 goto bail1;
963 }
964
965 /* Check for weird/broken PCI region reporting */
966 if ((reg0_len < C2_REG0_SIZE) ||
967 (reg2_len < C2_REG2_SIZE) || (reg4_len < C2_REG4_SIZE)) {
968 printk(KERN_ERR PFX "Invalid PCI region sizes\n");
969 ret = -ENODEV;
970 goto bail1;
971 }
972
973 /* Reserve PCI I/O and memory resources */
974 ret = pci_request_regions(pcidev, DRV_NAME);
975 if (ret) {
976 printk(KERN_ERR PFX "%s: Unable to request regions\n",
977 pci_name(pcidev));
978 goto bail1;
979 }
980
981 if ((sizeof(dma_addr_t) > 4)) {
982 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
983 if (ret < 0) {
984 printk(KERN_ERR PFX "64b DMA configuration failed\n");
985 goto bail2;
986 }
987 } else {
988 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
989 if (ret < 0) {
990 printk(KERN_ERR PFX "32b DMA configuration failed\n");
991 goto bail2;
992 }
993 }
994
995 /* Enables bus-mastering on the device */
996 pci_set_master(pcidev);
997
998 /* Remap the adapter PCI registers in BAR4 */
999 mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
1000 sizeof(struct c2_adapter_pci_regs));
1001 if (!mmio_regs) {
1002 printk(KERN_ERR PFX
1003 "Unable to remap adapter PCI registers in BAR4\n");
1004 ret = -EIO;
1005 goto bail2;
1006 }
1007
1008 /* Validate PCI regs magic */
1009 for (i = 0; i < sizeof(c2_magic); i++) {
1010 if (c2_magic[i] != readb(mmio_regs + C2_REGS_MAGIC + i)) {
1011 printk(KERN_ERR PFX "Downlevel Firmware boot loader "
1012 "[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash "
1013 "utility to update your boot loader\n",
1014 i + 1, sizeof(c2_magic),
1015 readb(mmio_regs + C2_REGS_MAGIC + i),
1016 c2_magic[i]);
1017 printk(KERN_ERR PFX "Adapter not claimed\n");
1018 iounmap(mmio_regs);
1019 ret = -EIO;
1020 goto bail2;
1021 }
1022 }
1023
1024 /* Validate the adapter version */
1025 if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
1026 printk(KERN_ERR PFX "Version mismatch "
1027 "[fw=%u, c2=%u], Adapter not claimed\n",
1028 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)),
1029 C2_VERSION);
1030 ret = -EINVAL;
1031 iounmap(mmio_regs);
1032 goto bail2;
1033 }
1034
1035 /* Validate the adapter IVN */
1036 if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
1037 printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using "
1038 "the OpenIB device support kit. "
1039 "[fw=0x%x, c2=0x%x], Adapter not claimed\n",
1040 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)),
1041 C2_IVN);
1042 ret = -EINVAL;
1043 iounmap(mmio_regs);
1044 goto bail2;
1045 }
1046
1047 /* Allocate hardware structure */
1048 c2dev = (struct c2_dev *) ib_alloc_device(sizeof(*c2dev));
1049 if (!c2dev) {
1050 printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n",
1051 pci_name(pcidev));
1052 ret = -ENOMEM;
1053 iounmap(mmio_regs);
1054 goto bail2;
1055 }
1056
1057 memset(c2dev, 0, sizeof(*c2dev));
1058 spin_lock_init(&c2dev->lock);
1059 c2dev->pcidev = pcidev;
1060 c2dev->cur_tx = 0;
1061
1062 /* Get the last RX index */
1063 c2dev->cur_rx =
1064 (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) -
1065 0xffffc000) / sizeof(struct c2_rxp_desc);
1066
1067 /* Request an interrupt line for the driver */
1068 ret = request_irq(pcidev->irq, c2_interrupt, IRQF_SHARED, DRV_NAME, c2dev);
1069 if (ret) {
1070 printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n",
1071 pci_name(pcidev), pcidev->irq);
1072 iounmap(mmio_regs);
1073 goto bail3;
1074 }
1075
1076 /* Set driver specific data */
1077 pci_set_drvdata(pcidev, c2dev);
1078
1079 /* Initialize network device */
1080 if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
1081 ret = -ENOMEM;
1082 iounmap(mmio_regs);
1083 goto bail4;
1084 }
1085
1086 /* Save off the actual size prior to unmapping mmio_regs */
1087 kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE));
1088
1089 /* Unmap the adapter PCI registers in BAR4 */
1090 iounmap(mmio_regs);
1091
1092 /* Register network device */
1093 ret = register_netdev(netdev);
1094 if (ret) {
1095 printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n",
1096 ret);
1097 goto bail5;
1098 }
1099
1100 /* Disable network packets */
1101 netif_stop_queue(netdev);
1102
1103 /* Remap the adapter HRXDQ PA space to kernel VA space */
1104 c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET,
1105 C2_RXP_HRXDQ_SIZE);
1106 if (!c2dev->mmio_rxp_ring) {
1107 printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n");
1108 ret = -EIO;
1109 goto bail6;
1110 }
1111
1112 /* Remap the adapter HTXDQ PA space to kernel VA space */
1113 c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET,
1114 C2_TXP_HTXDQ_SIZE);
1115 if (!c2dev->mmio_txp_ring) {
1116 printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n");
1117 ret = -EIO;
1118 goto bail7;
1119 }
1120
1121 /* Save off the current RX index in the last 4 bytes of the TXP Ring */
1122 C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
1123
1124 /* Remap the PCI registers in adapter BAR0 to kernel VA space */
1125 c2dev->regs = ioremap_nocache(reg0_start, reg0_len);
1126 if (!c2dev->regs) {
1127 printk(KERN_ERR PFX "Unable to remap BAR0\n");
1128 ret = -EIO;
1129 goto bail8;
1130 }
1131
1132 /* Remap the PCI registers in adapter BAR4 to kernel VA space */
1133 c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET;
1134 c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
1135 kva_map_size);
1136 if (!c2dev->kva) {
1137 printk(KERN_ERR PFX "Unable to remap BAR4\n");
1138 ret = -EIO;
1139 goto bail9;
1140 }
1141
1142 /* Print out the MAC address */
1143 pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr,
1144 netdev->irq);
1145
1146 ret = c2_rnic_init(c2dev);
1147 if (ret) {
1148 printk(KERN_ERR PFX "c2_rnic_init failed: %d\n", ret);
1149 goto bail10;
1150 }
1151
1152 ret = c2_register_device(c2dev);
1153 if (ret)
1154 goto bail10;
1155
1156 return 0;
1157
1158 bail10:
1159 iounmap(c2dev->kva);
1160
1161 bail9:
1162 iounmap(c2dev->regs);
1163
1164 bail8:
1165 iounmap(c2dev->mmio_txp_ring);
1166
1167 bail7:
1168 iounmap(c2dev->mmio_rxp_ring);
1169
1170 bail6:
1171 unregister_netdev(netdev);
1172
1173 bail5:
1174 free_netdev(netdev);
1175
1176 bail4:
1177 free_irq(pcidev->irq, c2dev);
1178
1179 bail3:
1180 ib_dealloc_device(&c2dev->ibdev);
1181
1182 bail2:
1183 pci_release_regions(pcidev);
1184
1185 bail1:
1186 pci_disable_device(pcidev);
1187
1188 bail0:
1189 return ret;
1190}
1191
1192static void c2_remove(struct pci_dev *pcidev)
1193{
1194 struct c2_dev *c2dev = pci_get_drvdata(pcidev);
1195 struct net_device *netdev = c2dev->netdev;
1196
1197 /* Unregister with OpenIB */
1198 c2_unregister_device(c2dev);
1199
1200 /* Clean up the RNIC resources */
1201 c2_rnic_term(c2dev);
1202
1203 /* Remove network device from the kernel */
1204 unregister_netdev(netdev);
1205
1206 /* Free network device */
1207 free_netdev(netdev);
1208
1209 /* Free the interrupt line */
1210 free_irq(pcidev->irq, c2dev);
1211
1212 /* missing: Turn LEDs off here */
1213
1214 /* Unmap adapter PA space */
1215 iounmap(c2dev->kva);
1216 iounmap(c2dev->regs);
1217 iounmap(c2dev->mmio_txp_ring);
1218 iounmap(c2dev->mmio_rxp_ring);
1219
1220 /* Free the hardware structure */
1221 ib_dealloc_device(&c2dev->ibdev);
1222
1223 /* Release reserved PCI I/O and memory resources */
1224 pci_release_regions(pcidev);
1225
1226 /* Disable PCI device */
1227 pci_disable_device(pcidev);
1228
1229 /* Clear driver specific data */
1230 pci_set_drvdata(pcidev, NULL);
1231}
1232
1233static struct pci_driver c2_pci_driver = {
1234 .name = DRV_NAME,
1235 .id_table = c2_pci_table,
1236 .probe = c2_probe,
1237 .remove = c2_remove,
1238};
1239
1240module_pci_driver(c2_pci_driver);
diff --git a/drivers/staging/rdma/amso1100/c2.h b/drivers/staging/rdma/amso1100/c2.h
deleted file mode 100644
index 21b565a91fd6..000000000000
--- a/drivers/staging/rdma/amso1100/c2.h
+++ /dev/null
@@ -1,547 +0,0 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef __C2_H
35#define __C2_H
36
37#include <linux/netdevice.h>
38#include <linux/spinlock.h>
39#include <linux/kernel.h>
40#include <linux/pci.h>
41#include <linux/dma-mapping.h>
42#include <linux/idr.h>
43
44#include "c2_provider.h"
45#include "c2_mq.h"
46#include "c2_status.h"
47
48#define DRV_NAME "c2"
49#define DRV_VERSION "1.1"
50#define PFX DRV_NAME ": "
51
52#define BAR_0 0
53#define BAR_2 2
54#define BAR_4 4
55
56#define RX_BUF_SIZE (1536 + 8)
57#define ETH_JUMBO_MTU 9000
58#define C2_MAGIC "CEPHEUS"
59#define C2_VERSION 4
60#define C2_IVN (18 & 0x7fffffff)
61
62#define C2_REG0_SIZE (16 * 1024)
63#define C2_REG2_SIZE (2 * 1024 * 1024)
64#define C2_REG4_SIZE (256 * 1024 * 1024)
65#define C2_NUM_TX_DESC 341
66#define C2_NUM_RX_DESC 256
67#define C2_PCI_REGS_OFFSET (0x10000)
68#define C2_RXP_HRXDQ_OFFSET (((C2_REG4_SIZE)/2))
69#define C2_RXP_HRXDQ_SIZE (4096)
70#define C2_TXP_HTXDQ_OFFSET (((C2_REG4_SIZE)/2) + C2_RXP_HRXDQ_SIZE)
71#define C2_TXP_HTXDQ_SIZE (4096)
72#define C2_TX_TIMEOUT (6*HZ)
73
74/* CEPHEUS */
75static const u8 c2_magic[] = {
76 0x43, 0x45, 0x50, 0x48, 0x45, 0x55, 0x53
77};
78
79enum adapter_pci_regs {
80 C2_REGS_MAGIC = 0x0000,
81 C2_REGS_VERS = 0x0008,
82 C2_REGS_IVN = 0x000C,
83 C2_REGS_PCI_WINSIZE = 0x0010,
84 C2_REGS_Q0_QSIZE = 0x0014,
85 C2_REGS_Q0_MSGSIZE = 0x0018,
86 C2_REGS_Q0_POOLSTART = 0x001C,
87 C2_REGS_Q0_SHARED = 0x0020,
88 C2_REGS_Q1_QSIZE = 0x0024,
89 C2_REGS_Q1_MSGSIZE = 0x0028,
90 C2_REGS_Q1_SHARED = 0x0030,
91 C2_REGS_Q2_QSIZE = 0x0034,
92 C2_REGS_Q2_MSGSIZE = 0x0038,
93 C2_REGS_Q2_SHARED = 0x0040,
94 C2_REGS_ENADDR = 0x004C,
95 C2_REGS_RDMA_ENADDR = 0x0054,
96 C2_REGS_HRX_CUR = 0x006C,
97};
98
99struct c2_adapter_pci_regs {
100 char reg_magic[8];
101 u32 version;
102 u32 ivn;
103 u32 pci_window_size;
104 u32 q0_q_size;
105 u32 q0_msg_size;
106 u32 q0_pool_start;
107 u32 q0_shared;
108 u32 q1_q_size;
109 u32 q1_msg_size;
110 u32 q1_pool_start;
111 u32 q1_shared;
112 u32 q2_q_size;
113 u32 q2_msg_size;
114 u32 q2_pool_start;
115 u32 q2_shared;
116 u32 log_start;
117 u32 log_size;
118 u8 host_enaddr[8];
119 u8 rdma_enaddr[8];
120 u32 crash_entry;
121 u32 crash_ready[2];
122 u32 fw_txd_cur;
123 u32 fw_hrxd_cur;
124 u32 fw_rxd_cur;
125};
126
127enum pci_regs {
128 C2_HISR = 0x0000,
129 C2_DISR = 0x0004,
130 C2_HIMR = 0x0008,
131 C2_DIMR = 0x000C,
132 C2_NISR0 = 0x0010,
133 C2_NISR1 = 0x0014,
134 C2_NIMR0 = 0x0018,
135 C2_NIMR1 = 0x001C,
136 C2_IDIS = 0x0020,
137};
138
139enum {
140 C2_PCI_HRX_INT = 1 << 8,
141 C2_PCI_HTX_INT = 1 << 17,
142 C2_PCI_HRX_QUI = 1 << 31,
143};
144
145/*
146 * Cepheus registers in BAR0.
147 */
148struct c2_pci_regs {
149 u32 hostisr;
150 u32 dmaisr;
151 u32 hostimr;
152 u32 dmaimr;
153 u32 netisr0;
154 u32 netisr1;
155 u32 netimr0;
156 u32 netimr1;
157 u32 int_disable;
158};
159
160/* TXP flags */
161enum c2_txp_flags {
162 TXP_HTXD_DONE = 0,
163 TXP_HTXD_READY = 1 << 0,
164 TXP_HTXD_UNINIT = 1 << 1,
165};
166
167/* RXP flags */
168enum c2_rxp_flags {
169 RXP_HRXD_UNINIT = 0,
170 RXP_HRXD_READY = 1 << 0,
171 RXP_HRXD_DONE = 1 << 1,
172};
173
174/* RXP status */
175enum c2_rxp_status {
176 RXP_HRXD_ZERO = 0,
177 RXP_HRXD_OK = 1 << 0,
178 RXP_HRXD_BUF_OV = 1 << 1,
179};
180
181/* TXP descriptor fields */
182enum txp_desc {
183 C2_TXP_FLAGS = 0x0000,
184 C2_TXP_LEN = 0x0002,
185 C2_TXP_ADDR = 0x0004,
186};
187
188/* RXP descriptor fields */
189enum rxp_desc {
190 C2_RXP_FLAGS = 0x0000,
191 C2_RXP_STATUS = 0x0002,
192 C2_RXP_COUNT = 0x0004,
193 C2_RXP_LEN = 0x0006,
194 C2_RXP_ADDR = 0x0008,
195};
196
197struct c2_txp_desc {
198 u16 flags;
199 u16 len;
200 u64 addr;
201} __attribute__ ((packed));
202
203struct c2_rxp_desc {
204 u16 flags;
205 u16 status;
206 u16 count;
207 u16 len;
208 u64 addr;
209} __attribute__ ((packed));
210
211struct c2_rxp_hdr {
212 u16 flags;
213 u16 status;
214 u16 len;
215 u16 rsvd;
216} __attribute__ ((packed));
217
218struct c2_tx_desc {
219 u32 len;
220 u32 status;
221 dma_addr_t next_offset;
222};
223
224struct c2_rx_desc {
225 u32 len;
226 u32 status;
227 dma_addr_t next_offset;
228};
229
230struct c2_alloc {
231 u32 last;
232 u32 max;
233 spinlock_t lock;
234 unsigned long *table;
235};
236
237struct c2_array {
238 struct {
239 void **page;
240 int used;
241 } *page_list;
242};
243
244/*
245 * The MQ shared pointer pool is organized as a linked list of
246 * chunks. Each chunk contains a linked list of free shared pointers
247 * that can be allocated to a given user mode client.
248 *
249 */
250struct sp_chunk {
251 struct sp_chunk *next;
252 dma_addr_t dma_addr;
253 DEFINE_DMA_UNMAP_ADDR(mapping);
254 u16 head;
255 u16 shared_ptr[0];
256};
257
258struct c2_pd_table {
259 u32 last;
260 u32 max;
261 spinlock_t lock;
262 unsigned long *table;
263};
264
265struct c2_qp_table {
266 struct idr idr;
267 spinlock_t lock;
268};
269
270struct c2_element {
271 struct c2_element *next;
272 void *ht_desc; /* host descriptor */
273 void __iomem *hw_desc; /* hardware descriptor */
274 struct sk_buff *skb;
275 dma_addr_t mapaddr;
276 u32 maplen;
277};
278
279struct c2_ring {
280 struct c2_element *to_clean;
281 struct c2_element *to_use;
282 struct c2_element *start;
283 unsigned long count;
284};
285
286struct c2_dev {
287 struct ib_device ibdev;
288 void __iomem *regs;
289 void __iomem *mmio_txp_ring; /* remapped adapter memory for hw rings */
290 void __iomem *mmio_rxp_ring;
291 spinlock_t lock;
292 struct pci_dev *pcidev;
293 struct net_device *netdev;
294 struct net_device *pseudo_netdev;
295 unsigned int cur_tx;
296 unsigned int cur_rx;
297 u32 adapter_handle;
298 int device_cap_flags;
299 void __iomem *kva; /* KVA device memory */
300 unsigned long pa; /* PA device memory */
301 void **qptr_array;
302
303 struct kmem_cache *host_msg_cache;
304
305 struct list_head cca_link; /* adapter list */
306 struct list_head eh_wakeup_list; /* event wakeup list */
307 wait_queue_head_t req_vq_wo;
308
309 /* Cached RNIC properties */
310 struct ib_device_attr props;
311
312 struct c2_pd_table pd_table;
313 struct c2_qp_table qp_table;
314 int ports; /* num of GigE ports */
315 int devnum;
316 spinlock_t vqlock; /* sync vbs req MQ */
317
318 /* Verbs Queues */
319 struct c2_mq req_vq; /* Verbs Request MQ */
320 struct c2_mq rep_vq; /* Verbs Reply MQ */
321 struct c2_mq aeq; /* Async Events MQ */
322
323 /* Kernel client MQs */
324 struct sp_chunk *kern_mqsp_pool;
325
326 /* Device updates these values when posting messages to a host
327 * target queue */
328 u16 req_vq_shared;
329 u16 rep_vq_shared;
330 u16 aeq_shared;
331 u16 irq_claimed;
332
333 /*
334 * Shared host target pages for user-accessible MQs.
335 */
336 int hthead; /* index of first free entry */
337 void *htpages; /* kernel vaddr */
338 int htlen; /* length of htpages memory */
339 void *htuva; /* user mapped vaddr */
340 spinlock_t htlock; /* serialize allocation */
341
342 u64 adapter_hint_uva; /* access to the activity FIFO */
343
344 // spinlock_t aeq_lock;
345 // spinlock_t rnic_lock;
346
347 __be16 *hint_count;
348 dma_addr_t hint_count_dma;
349 u16 hints_read;
350
351 int init; /* TRUE if it's ready */
352 char ae_cache_name[16];
353 char vq_cache_name[16];
354};
355
356struct c2_port {
357 u32 msg_enable;
358 struct c2_dev *c2dev;
359 struct net_device *netdev;
360
361 spinlock_t tx_lock;
362 u32 tx_avail;
363 struct c2_ring tx_ring;
364 struct c2_ring rx_ring;
365
366 void *mem; /* PCI memory for host rings */
367 dma_addr_t dma;
368 unsigned long mem_size;
369
370 u32 rx_buf_size;
371};
372
373/*
374 * Activity FIFO registers in BAR0.
375 */
376#define PCI_BAR0_HOST_HINT 0x100
377#define PCI_BAR0_ADAPTER_HINT 0x2000
378
379/*
380 * Ammasso PCI vendor id and Cepheus PCI device id.
381 */
382#define CQ_ARMED 0x01
383#define CQ_WAIT_FOR_DMA 0x80
384
385/*
386 * The format of a hint is as follows:
387 * Lower 16 bits are the count of hints for the queue.
388 * Next 15 bits are the qp_index
389 * Upper most bit depends on who reads it:
390 * If read by producer, then it means Full (1) or Not-Full (0)
391 * If read by consumer, then it means Empty (1) or Not-Empty (0)
392 */
393#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
394#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
395#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
396
397
398/*
399 * The following defines the offset in SDRAM for the c2_adapter_pci_regs_t
400 * struct.
401 */
402#define C2_ADAPTER_PCI_REGS_OFFSET 0x10000
403
404#ifndef readq
405static inline u64 readq(const void __iomem * addr)
406{
407 u64 ret = readl(addr + 4);
408 ret <<= 32;
409 ret |= readl(addr);
410
411 return ret;
412}
413#endif
414
415#ifndef writeq
416static inline void __raw_writeq(u64 val, void __iomem * addr)
417{
418 __raw_writel((u32) (val), addr);
419 __raw_writel((u32) (val >> 32), (addr + 4));
420}
421#endif
422
423#define C2_SET_CUR_RX(c2dev, cur_rx) \
424 __raw_writel((__force u32) cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
425
426#define C2_GET_CUR_RX(c2dev) \
427 be32_to_cpu((__force __be32) readl(c2dev->mmio_txp_ring + 4092))
428
429static inline struct c2_dev *to_c2dev(struct ib_device *ibdev)
430{
431 return container_of(ibdev, struct c2_dev, ibdev);
432}
433
434static inline int c2_errno(void *reply)
435{
436 switch (c2_wr_get_result(reply)) {
437 case C2_OK:
438 return 0;
439 case CCERR_NO_BUFS:
440 case CCERR_INSUFFICIENT_RESOURCES:
441 case CCERR_ZERO_RDMA_READ_RESOURCES:
442 return -ENOMEM;
443 case CCERR_MR_IN_USE:
444 case CCERR_QP_IN_USE:
445 return -EBUSY;
446 case CCERR_ADDR_IN_USE:
447 return -EADDRINUSE;
448 case CCERR_ADDR_NOT_AVAIL:
449 return -EADDRNOTAVAIL;
450 case CCERR_CONN_RESET:
451 return -ECONNRESET;
452 case CCERR_NOT_IMPLEMENTED:
453 case CCERR_INVALID_WQE:
454 return -ENOSYS;
455 case CCERR_QP_NOT_PRIVILEGED:
456 return -EPERM;
457 case CCERR_STACK_ERROR:
458 return -EPROTO;
459 case CCERR_ACCESS_VIOLATION:
460 case CCERR_BASE_AND_BOUNDS_VIOLATION:
461 return -EFAULT;
462 case CCERR_STAG_STATE_NOT_INVALID:
463 case CCERR_INVALID_ADDRESS:
464 case CCERR_INVALID_CQ:
465 case CCERR_INVALID_EP:
466 case CCERR_INVALID_MODIFIER:
467 case CCERR_INVALID_MTU:
468 case CCERR_INVALID_PD_ID:
469 case CCERR_INVALID_QP:
470 case CCERR_INVALID_RNIC:
471 case CCERR_INVALID_STAG:
472 return -EINVAL;
473 default:
474 return -EAGAIN;
475 }
476}
477
478/* Device */
479int c2_register_device(struct c2_dev *c2dev);
480void c2_unregister_device(struct c2_dev *c2dev);
481int c2_rnic_init(struct c2_dev *c2dev);
482void c2_rnic_term(struct c2_dev *c2dev);
483void c2_rnic_interrupt(struct c2_dev *c2dev);
484int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
485int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
486
487/* QPs */
488int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd,
489 struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp);
490void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp);
491struct ib_qp *c2_get_qp(struct ib_device *device, int qpn);
492int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
493 struct ib_qp_attr *attr, int attr_mask);
494int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
495 int ord, int ird);
496int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
497 struct ib_send_wr **bad_wr);
498int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
499 struct ib_recv_wr **bad_wr);
500void c2_init_qp_table(struct c2_dev *c2dev);
501void c2_cleanup_qp_table(struct c2_dev *c2dev);
502void c2_set_qp_state(struct c2_qp *, int);
503struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn);
504
505/* PDs */
506int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd);
507void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd);
508int c2_init_pd_table(struct c2_dev *c2dev);
509void c2_cleanup_pd_table(struct c2_dev *c2dev);
510
511/* CQs */
512int c2_init_cq(struct c2_dev *c2dev, int entries,
513 struct c2_ucontext *ctx, struct c2_cq *cq);
514void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq);
515void c2_cq_event(struct c2_dev *c2dev, u32 mq_index);
516void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
517int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
518int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
519
520/* CM */
521int c2_llp_connect(struct iw_cm_id *cm_id,
522 struct iw_cm_conn_param *iw_param);
523int c2_llp_accept(struct iw_cm_id *cm_id,
524 struct iw_cm_conn_param *iw_param);
525int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata,
526 u8 pdata_len);
527int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog);
528int c2_llp_service_destroy(struct iw_cm_id *cm_id);
529
530/* MM */
531int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
532 int page_size, int pbl_depth, u32 length,
533 u32 off, u64 *va, enum c2_acf acf,
534 struct c2_mr *mr);
535int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index);
536
537/* AE */
538void c2_ae_event(struct c2_dev *c2dev, u32 mq_index);
539
540/* MQSP Allocator */
541int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
542 struct sp_chunk **root);
543void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root);
544__be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
545 dma_addr_t *dma_addr, gfp_t gfp_mask);
546void c2_free_mqsp(__be16* mqsp);
547#endif
diff --git a/drivers/staging/rdma/amso1100/c2_ae.c b/drivers/staging/rdma/amso1100/c2_ae.c
deleted file mode 100644
index eb7a92b2692f..000000000000
--- a/drivers/staging/rdma/amso1100/c2_ae.c
+++ /dev/null
@@ -1,327 +0,0 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include "c2.h"
34#include <rdma/iw_cm.h>
35#include "c2_status.h"
36#include "c2_ae.h"
37
38static int c2_convert_cm_status(u32 c2_status)
39{
40 switch (c2_status) {
41 case C2_CONN_STATUS_SUCCESS:
42 return 0;
43 case C2_CONN_STATUS_REJECTED:
44 return -ENETRESET;
45 case C2_CONN_STATUS_REFUSED:
46 return -ECONNREFUSED;
47 case C2_CONN_STATUS_TIMEDOUT:
48 return -ETIMEDOUT;
49 case C2_CONN_STATUS_NETUNREACH:
50 return -ENETUNREACH;
51 case C2_CONN_STATUS_HOSTUNREACH:
52 return -EHOSTUNREACH;
53 case C2_CONN_STATUS_INVALID_RNIC:
54 return -EINVAL;
55 case C2_CONN_STATUS_INVALID_QP:
56 return -EINVAL;
57 case C2_CONN_STATUS_INVALID_QP_STATE:
58 return -EINVAL;
59 case C2_CONN_STATUS_ADDR_NOT_AVAIL:
60 return -EADDRNOTAVAIL;
61 default:
62 printk(KERN_ERR PFX
63 "%s - Unable to convert CM status: %d\n",
64 __func__, c2_status);
65 return -EIO;
66 }
67}
68
69static const char* to_event_str(int event)
70{
71 static const char* event_str[] = {
72 "CCAE_REMOTE_SHUTDOWN",
73 "CCAE_ACTIVE_CONNECT_RESULTS",
74 "CCAE_CONNECTION_REQUEST",
75 "CCAE_LLP_CLOSE_COMPLETE",
76 "CCAE_TERMINATE_MESSAGE_RECEIVED",
77 "CCAE_LLP_CONNECTION_RESET",
78 "CCAE_LLP_CONNECTION_LOST",
79 "CCAE_LLP_SEGMENT_SIZE_INVALID",
80 "CCAE_LLP_INVALID_CRC",
81 "CCAE_LLP_BAD_FPDU",
82 "CCAE_INVALID_DDP_VERSION",
83 "CCAE_INVALID_RDMA_VERSION",
84 "CCAE_UNEXPECTED_OPCODE",
85 "CCAE_INVALID_DDP_QUEUE_NUMBER",
86 "CCAE_RDMA_READ_NOT_ENABLED",
87 "CCAE_RDMA_WRITE_NOT_ENABLED",
88 "CCAE_RDMA_READ_TOO_SMALL",
89 "CCAE_NO_L_BIT",
90 "CCAE_TAGGED_INVALID_STAG",
91 "CCAE_TAGGED_BASE_BOUNDS_VIOLATION",
92 "CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION",
93 "CCAE_TAGGED_INVALID_PD",
94 "CCAE_WRAP_ERROR",
95 "CCAE_BAD_CLOSE",
96 "CCAE_BAD_LLP_CLOSE",
97 "CCAE_INVALID_MSN_RANGE",
98 "CCAE_INVALID_MSN_GAP",
99 "CCAE_IRRQ_OVERFLOW",
100 "CCAE_IRRQ_MSN_GAP",
101 "CCAE_IRRQ_MSN_RANGE",
102 "CCAE_IRRQ_INVALID_STAG",
103 "CCAE_IRRQ_BASE_BOUNDS_VIOLATION",
104 "CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION",
105 "CCAE_IRRQ_INVALID_PD",
106 "CCAE_IRRQ_WRAP_ERROR",
107 "CCAE_CQ_SQ_COMPLETION_OVERFLOW",
108 "CCAE_CQ_RQ_COMPLETION_ERROR",
109 "CCAE_QP_SRQ_WQE_ERROR",
110 "CCAE_QP_LOCAL_CATASTROPHIC_ERROR",
111 "CCAE_CQ_OVERFLOW",
112 "CCAE_CQ_OPERATION_ERROR",
113 "CCAE_SRQ_LIMIT_REACHED",
114 "CCAE_QP_RQ_LIMIT_REACHED",
115 "CCAE_SRQ_CATASTROPHIC_ERROR",
116 "CCAE_RNIC_CATASTROPHIC_ERROR"
117 };
118
119 if (event < CCAE_REMOTE_SHUTDOWN ||
120 event > CCAE_RNIC_CATASTROPHIC_ERROR)
121 return "<invalid event>";
122
123 event -= CCAE_REMOTE_SHUTDOWN;
124 return event_str[event];
125}
126
127static const char *to_qp_state_str(int state)
128{
129 switch (state) {
130 case C2_QP_STATE_IDLE:
131 return "C2_QP_STATE_IDLE";
132 case C2_QP_STATE_CONNECTING:
133 return "C2_QP_STATE_CONNECTING";
134 case C2_QP_STATE_RTS:
135 return "C2_QP_STATE_RTS";
136 case C2_QP_STATE_CLOSING:
137 return "C2_QP_STATE_CLOSING";
138 case C2_QP_STATE_TERMINATE:
139 return "C2_QP_STATE_TERMINATE";
140 case C2_QP_STATE_ERROR:
141 return "C2_QP_STATE_ERROR";
142 default:
143 return "<invalid QP state>";
144 }
145}
146
147void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
148{
149 struct c2_mq *mq = c2dev->qptr_array[mq_index];
150 union c2wr *wr;
151 void *resource_user_context;
152 struct iw_cm_event cm_event;
153 struct ib_event ib_event;
154 enum c2_resource_indicator resource_indicator;
155 enum c2_event_id event_id;
156 unsigned long flags;
157 int status;
158 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_event.local_addr;
159 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_event.remote_addr;
160
161 /*
162 * retrieve the message
163 */
164 wr = c2_mq_consume(mq);
165 if (!wr)
166 return;
167
168 memset(&ib_event, 0, sizeof(ib_event));
169 memset(&cm_event, 0, sizeof(cm_event));
170
171 event_id = c2_wr_get_id(wr);
172 resource_indicator = be32_to_cpu(wr->ae.ae_generic.resource_type);
173 resource_user_context =
174 (void *) (unsigned long) wr->ae.ae_generic.user_context;
175
176 status = cm_event.status = c2_convert_cm_status(c2_wr_get_result(wr));
177
178 pr_debug("event received c2_dev=%p, event_id=%d, "
179 "resource_indicator=%d, user_context=%p, status = %d\n",
180 c2dev, event_id, resource_indicator, resource_user_context,
181 status);
182
183 switch (resource_indicator) {
184 case C2_RES_IND_QP:{
185
186 struct c2_qp *qp = resource_user_context;
187 struct iw_cm_id *cm_id = qp->cm_id;
188 struct c2wr_ae_active_connect_results *res;
189
190 if (!cm_id) {
191 pr_debug("event received, but cm_id is <nul>, qp=%p!\n",
192 qp);
193 goto ignore_it;
194 }
195 pr_debug("%s: event = %s, user_context=%llx, "
196 "resource_type=%x, "
197 "resource=%x, qp_state=%s\n",
198 __func__,
199 to_event_str(event_id),
200 (unsigned long long) wr->ae.ae_generic.user_context,
201 be32_to_cpu(wr->ae.ae_generic.resource_type),
202 be32_to_cpu(wr->ae.ae_generic.resource),
203 to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state)));
204
205 c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state));
206
207 switch (event_id) {
208 case CCAE_ACTIVE_CONNECT_RESULTS:
209 res = &wr->ae.ae_active_connect_results;
210 cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
211 laddr->sin_addr.s_addr = res->laddr;
212 raddr->sin_addr.s_addr = res->raddr;
213 laddr->sin_port = res->lport;
214 raddr->sin_port = res->rport;
215 if (status == 0) {
216 cm_event.private_data_len =
217 be32_to_cpu(res->private_data_length);
218 cm_event.private_data = res->private_data;
219 } else {
220 spin_lock_irqsave(&qp->lock, flags);
221 if (qp->cm_id) {
222 qp->cm_id->rem_ref(qp->cm_id);
223 qp->cm_id = NULL;
224 }
225 spin_unlock_irqrestore(&qp->lock, flags);
226 cm_event.private_data_len = 0;
227 cm_event.private_data = NULL;
228 }
229 if (cm_id->event_handler)
230 cm_id->event_handler(cm_id, &cm_event);
231 break;
232 case CCAE_TERMINATE_MESSAGE_RECEIVED:
233 case CCAE_CQ_SQ_COMPLETION_OVERFLOW:
234 ib_event.device = &c2dev->ibdev;
235 ib_event.element.qp = &qp->ibqp;
236 ib_event.event = IB_EVENT_QP_REQ_ERR;
237
238 if (qp->ibqp.event_handler)
239 qp->ibqp.event_handler(&ib_event,
240 qp->ibqp.
241 qp_context);
242 break;
243 case CCAE_BAD_CLOSE:
244 case CCAE_LLP_CLOSE_COMPLETE:
245 case CCAE_LLP_CONNECTION_RESET:
246 case CCAE_LLP_CONNECTION_LOST:
247 BUG_ON(cm_id->event_handler==(void*)0x6b6b6b6b);
248
249 spin_lock_irqsave(&qp->lock, flags);
250 if (qp->cm_id) {
251 qp->cm_id->rem_ref(qp->cm_id);
252 qp->cm_id = NULL;
253 }
254 spin_unlock_irqrestore(&qp->lock, flags);
255 cm_event.event = IW_CM_EVENT_CLOSE;
256 cm_event.status = 0;
257 if (cm_id->event_handler)
258 cm_id->event_handler(cm_id, &cm_event);
259 break;
260 default:
261 BUG_ON(1);
262 pr_debug("%s:%d Unexpected event_id=%d on QP=%p, "
263 "CM_ID=%p\n",
264 __func__, __LINE__,
265 event_id, qp, cm_id);
266 break;
267 }
268 break;
269 }
270
271 case C2_RES_IND_EP:{
272
273 struct c2wr_ae_connection_request *req =
274 &wr->ae.ae_connection_request;
275 struct iw_cm_id *cm_id =
276 resource_user_context;
277
278 pr_debug("C2_RES_IND_EP event_id=%d\n", event_id);
279 if (event_id != CCAE_CONNECTION_REQUEST) {
280 pr_debug("%s: Invalid event_id: %d\n",
281 __func__, event_id);
282 break;
283 }
284 cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
285 cm_event.provider_data = (void*)(unsigned long)req->cr_handle;
286 laddr->sin_addr.s_addr = req->laddr;
287 raddr->sin_addr.s_addr = req->raddr;
288 laddr->sin_port = req->lport;
289 raddr->sin_port = req->rport;
290 cm_event.private_data_len =
291 be32_to_cpu(req->private_data_length);
292 cm_event.private_data = req->private_data;
293 /*
294 * Until ird/ord negotiation via MPAv2 support is added, send
295 * max supported values
296 */
297 cm_event.ird = cm_event.ord = 128;
298
299 if (cm_id->event_handler)
300 cm_id->event_handler(cm_id, &cm_event);
301 break;
302 }
303
304 case C2_RES_IND_CQ:{
305 struct c2_cq *cq =
306 resource_user_context;
307
308 pr_debug("IB_EVENT_CQ_ERR\n");
309 ib_event.device = &c2dev->ibdev;
310 ib_event.element.cq = &cq->ibcq;
311 ib_event.event = IB_EVENT_CQ_ERR;
312
313 if (cq->ibcq.event_handler)
314 cq->ibcq.event_handler(&ib_event,
315 cq->ibcq.cq_context);
316 break;
317 }
318
319 default:
320 printk("Bad resource indicator = %d\n",
321 resource_indicator);
322 break;
323 }
324
325 ignore_it:
326 c2_mq_free(mq);
327}
diff --git a/drivers/staging/rdma/amso1100/c2_ae.h b/drivers/staging/rdma/amso1100/c2_ae.h
deleted file mode 100644
index 3a065c33b83b..000000000000
--- a/drivers/staging/rdma/amso1100/c2_ae.h
+++ /dev/null
@@ -1,108 +0,0 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _C2_AE_H_
34#define _C2_AE_H_
35
36/*
37 * WARNING: If you change this file, also bump C2_IVN_BASE
38 * in common/include/clustercore/c2_ivn.h.
39 */
40
41/*
42 * Asynchronous Event Identifiers
43 *
44 * These start at 0x80 only so it's obvious from inspection that
45 * they are not work-request statuses. This isn't critical.
46 *
47 * NOTE: these event id's must fit in eight bits.
48 */
49enum c2_event_id {
50 CCAE_REMOTE_SHUTDOWN = 0x80,
51 CCAE_ACTIVE_CONNECT_RESULTS,
52 CCAE_CONNECTION_REQUEST,
53 CCAE_LLP_CLOSE_COMPLETE,
54 CCAE_TERMINATE_MESSAGE_RECEIVED,
55 CCAE_LLP_CONNECTION_RESET,
56 CCAE_LLP_CONNECTION_LOST,
57 CCAE_LLP_SEGMENT_SIZE_INVALID,
58 CCAE_LLP_INVALID_CRC,
59 CCAE_LLP_BAD_FPDU,
60 CCAE_INVALID_DDP_VERSION,
61 CCAE_INVALID_RDMA_VERSION,
62 CCAE_UNEXPECTED_OPCODE,
63 CCAE_INVALID_DDP_QUEUE_NUMBER,
64 CCAE_RDMA_READ_NOT_ENABLED,
65 CCAE_RDMA_WRITE_NOT_ENABLED,
66 CCAE_RDMA_READ_TOO_SMALL,
67 CCAE_NO_L_BIT,
68 CCAE_TAGGED_INVALID_STAG,
69 CCAE_TAGGED_BASE_BOUNDS_VIOLATION,
70 CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION,
71 CCAE_TAGGED_INVALID_PD,
72 CCAE_WRAP_ERROR,
73 CCAE_BAD_CLOSE,
74 CCAE_BAD_LLP_CLOSE,
75 CCAE_INVALID_MSN_RANGE,
76 CCAE_INVALID_MSN_GAP,
77 CCAE_IRRQ_OVERFLOW,
78 CCAE_IRRQ_MSN_GAP,
79 CCAE_IRRQ_MSN_RANGE,
80 CCAE_IRRQ_INVALID_STAG,
81 CCAE_IRRQ_BASE_BOUNDS_VIOLATION,
82 CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION,
83 CCAE_IRRQ_INVALID_PD,
84 CCAE_IRRQ_WRAP_ERROR,
85 CCAE_CQ_SQ_COMPLETION_OVERFLOW,
86 CCAE_CQ_RQ_COMPLETION_ERROR,
87 CCAE_QP_SRQ_WQE_ERROR,
88 CCAE_QP_LOCAL_CATASTROPHIC_ERROR,
89 CCAE_CQ_OVERFLOW,
90 CCAE_CQ_OPERATION_ERROR,
91 CCAE_SRQ_LIMIT_REACHED,
92 CCAE_QP_RQ_LIMIT_REACHED,
93 CCAE_SRQ_CATASTROPHIC_ERROR,
94 CCAE_RNIC_CATASTROPHIC_ERROR
95/* WARNING If you add more id's, make sure their values fit in eight bits. */
96};
97
98/*
99 * Resource Indicators and Identifiers
100 */
101enum c2_resource_indicator {
102 C2_RES_IND_QP = 1,
103 C2_RES_IND_EP,
104 C2_RES_IND_CQ,
105 C2_RES_IND_SRQ,
106};
107
108#endif /* _C2_AE_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_alloc.c b/drivers/staging/rdma/amso1100/c2_alloc.c
deleted file mode 100644
index 039872dfabbc..000000000000
--- a/drivers/staging/rdma/amso1100/c2_alloc.c
+++ /dev/null
@@ -1,142 +0,0 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/errno.h>
35#include <linux/bitmap.h>
36
37#include "c2.h"
38
39static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
40 struct sp_chunk **head)
41{
42 int i;
43 struct sp_chunk *new_head;
44 dma_addr_t dma_addr;
45
46 new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE,
47 &dma_addr, gfp_mask);
48 if (new_head == NULL)
49 return -ENOMEM;
50
51 new_head->dma_addr = dma_addr;
52 dma_unmap_addr_set(new_head, mapping, new_head->dma_addr);
53
54 new_head->next = NULL;
55 new_head->head = 0;
56
57 /* build list where each index is the next free slot */
58 for (i = 0;
59 i < (PAGE_SIZE - sizeof(struct sp_chunk) -
60 sizeof(u16)) / sizeof(u16) - 1;
61 i++) {
62 new_head->shared_ptr[i] = i + 1;
63 }
64 /* terminate list */
65 new_head->shared_ptr[i] = 0xFFFF;
66
67 *head = new_head;
68 return 0;
69}
70
71int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
72 struct sp_chunk **root)
73{
74 return c2_alloc_mqsp_chunk(c2dev, gfp_mask, root);
75}
76
77void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
78{
79 struct sp_chunk *next;
80
81 while (root) {
82 next = root->next;
83 dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
84 dma_unmap_addr(root, mapping));
85 root = next;
86 }
87}
88
89__be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
90 dma_addr_t *dma_addr, gfp_t gfp_mask)
91{
92 u16 mqsp;
93
94 while (head) {
95 mqsp = head->head;
96 if (mqsp != 0xFFFF) {
97 head->head = head->shared_ptr[mqsp];
98 break;
99 } else if (head->next == NULL) {
100 if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) ==
101 0) {
102 head = head->next;
103 mqsp = head->head;
104 head->head = head->shared_ptr[mqsp];
105 break;
106 } else
107 return NULL;
108 } else
109 head = head->next;
110 }
111 if (head) {
112 *dma_addr = head->dma_addr +
113 ((unsigned long) &(head->shared_ptr[mqsp]) -
114 (unsigned long) head);
115 pr_debug("%s addr %p dma_addr %llx\n", __func__,
116 &(head->shared_ptr[mqsp]), (unsigned long long) *dma_addr);
117 return (__force __be16 *) &(head->shared_ptr[mqsp]);
118 }
119 return NULL;
120}
121
122void c2_free_mqsp(__be16 *mqsp)
123{
124 struct sp_chunk *head;
125 u16 idx;
126
127 /* The chunk containing this ptr begins at the page boundary */
128 head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK);
129
130 /* Link head to new mqsp */
131 *mqsp = (__force __be16) head->head;
132
133 /* Compute the shared_ptr index */
134 idx = (offset_in_page(mqsp)) >> 1;
135 idx -= (unsigned long) &(((struct sp_chunk *) 0)->shared_ptr[0]) >> 1;
136
137 /* Point this index at the head */
138 head->shared_ptr[idx] = head->head;
139
140 /* Point head at this index */
141 head->head = idx;
142}
diff --git a/drivers/staging/rdma/amso1100/c2_cm.c b/drivers/staging/rdma/amso1100/c2_cm.c
deleted file mode 100644
index f8dbdb9e0f66..000000000000
--- a/drivers/staging/rdma/amso1100/c2_cm.c
+++ /dev/null
@@ -1,458 +0,0 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34#include <linux/slab.h>
35
36#include "c2.h"
37#include "c2_wr.h"
38#include "c2_vq.h"
39#include <rdma/iw_cm.h>
40
41int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
42{
43 struct c2_dev *c2dev = to_c2dev(cm_id->device);
44 struct ib_qp *ibqp;
45 struct c2_qp *qp;
46 struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */
47 struct c2_vq_req *vq_req;
48 int err;
49 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
50
51 if (cm_id->remote_addr.ss_family != AF_INET)
52 return -ENOSYS;
53
54 ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
55 if (!ibqp)
56 return -EINVAL;
57 qp = to_c2qp(ibqp);
58
59 /* Associate QP <--> CM_ID */
60 cm_id->provider_data = qp;
61 cm_id->add_ref(cm_id);
62 qp->cm_id = cm_id;
63
64 /*
65 * only support the max private_data length
66 */
67 if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
68 err = -EINVAL;
69 goto bail0;
70 }
71 /*
72 * Set the rdma read limits
73 */
74 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
75 if (err)
76 goto bail0;
77
78 /*
79 * Create and send a WR_QP_CONNECT...
80 */
81 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
82 if (!wr) {
83 err = -ENOMEM;
84 goto bail0;
85 }
86
87 vq_req = vq_req_alloc(c2dev);
88 if (!vq_req) {
89 err = -ENOMEM;
90 goto bail1;
91 }
92
93 c2_wr_set_id(wr, CCWR_QP_CONNECT);
94 wr->hdr.context = 0;
95 wr->rnic_handle = c2dev->adapter_handle;
96 wr->qp_handle = qp->adapter_handle;
97
98 wr->remote_addr = raddr->sin_addr.s_addr;
99 wr->remote_port = raddr->sin_port;
100
101 /*
102 * Move any private data from the callers's buf into
103 * the WR.
104 */
105 if (iw_param->private_data) {
106 wr->private_data_length =
107 cpu_to_be32(iw_param->private_data_len);
108 memcpy(&wr->private_data[0], iw_param->private_data,
109 iw_param->private_data_len);
110 } else
111 wr->private_data_length = 0;
112
113 /*
114 * Send WR to adapter. NOTE: There is no synch reply from
115 * the adapter.
116 */
117 err = vq_send_wr(c2dev, (union c2wr *) wr);
118 vq_req_free(c2dev, vq_req);
119
120 bail1:
121 kfree(wr);
122 bail0:
123 if (err) {
124 /*
125 * If we fail, release reference on QP and
126 * disassociate QP from CM_ID
127 */
128 cm_id->provider_data = NULL;
129 qp->cm_id = NULL;
130 cm_id->rem_ref(cm_id);
131 }
132 return err;
133}
134
135int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
136{
137 struct c2_dev *c2dev;
138 struct c2wr_ep_listen_create_req wr;
139 struct c2wr_ep_listen_create_rep *reply;
140 struct c2_vq_req *vq_req;
141 int err;
142 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
143
144 if (cm_id->local_addr.ss_family != AF_INET)
145 return -ENOSYS;
146
147 c2dev = to_c2dev(cm_id->device);
148 if (c2dev == NULL)
149 return -EINVAL;
150
151 /*
152 * Allocate verbs request.
153 */
154 vq_req = vq_req_alloc(c2dev);
155 if (!vq_req)
156 return -ENOMEM;
157
158 /*
159 * Build the WR
160 */
161 c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
162 wr.hdr.context = (u64) (unsigned long) vq_req;
163 wr.rnic_handle = c2dev->adapter_handle;
164 wr.local_addr = laddr->sin_addr.s_addr;
165 wr.local_port = laddr->sin_port;
166 wr.backlog = cpu_to_be32(backlog);
167 wr.user_context = (u64) (unsigned long) cm_id;
168
169 /*
170 * Reference the request struct. Dereferenced in the int handler.
171 */
172 vq_req_get(c2dev, vq_req);
173
174 /*
175 * Send WR to adapter
176 */
177 err = vq_send_wr(c2dev, (union c2wr *) & wr);
178 if (err) {
179 vq_req_put(c2dev, vq_req);
180 goto bail0;
181 }
182
183 /*
184 * Wait for reply from adapter
185 */
186 err = vq_wait_for_reply(c2dev, vq_req);
187 if (err)
188 goto bail0;
189
190 /*
191 * Process reply
192 */
193 reply =
194 (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg;
195 if (!reply) {
196 err = -ENOMEM;
197 goto bail1;
198 }
199
200 if ((err = c2_errno(reply)) != 0)
201 goto bail1;
202
203 /*
204 * Keep the adapter handle. Used in subsequent destroy
205 */
206 cm_id->provider_data = (void*)(unsigned long) reply->ep_handle;
207
208 /*
209 * free vq stuff
210 */
211 vq_repbuf_free(c2dev, reply);
212 vq_req_free(c2dev, vq_req);
213
214 return 0;
215
216 bail1:
217 vq_repbuf_free(c2dev, reply);
218 bail0:
219 vq_req_free(c2dev, vq_req);
220 return err;
221}
222
223
224int c2_llp_service_destroy(struct iw_cm_id *cm_id)
225{
226
227 struct c2_dev *c2dev;
228 struct c2wr_ep_listen_destroy_req wr;
229 struct c2wr_ep_listen_destroy_rep *reply;
230 struct c2_vq_req *vq_req;
231 int err;
232
233 c2dev = to_c2dev(cm_id->device);
234 if (c2dev == NULL)
235 return -EINVAL;
236
237 /*
238 * Allocate verbs request.
239 */
240 vq_req = vq_req_alloc(c2dev);
241 if (!vq_req)
242 return -ENOMEM;
243
244 /*
245 * Build the WR
246 */
247 c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY);
248 wr.hdr.context = (unsigned long) vq_req;
249 wr.rnic_handle = c2dev->adapter_handle;
250 wr.ep_handle = (u32)(unsigned long)cm_id->provider_data;
251
252 /*
253 * reference the request struct. dereferenced in the int handler.
254 */
255 vq_req_get(c2dev, vq_req);
256
257 /*
258 * Send WR to adapter
259 */
260 err = vq_send_wr(c2dev, (union c2wr *) & wr);
261 if (err) {
262 vq_req_put(c2dev, vq_req);
263 goto bail0;
264 }
265
266 /*
267 * Wait for reply from adapter
268 */
269 err = vq_wait_for_reply(c2dev, vq_req);
270 if (err)
271 goto bail0;
272
273 /*
274 * Process reply
275 */
276 reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg;
277 if (!reply) {
278 err = -ENOMEM;
279 goto bail0;
280 }
281
282 vq_repbuf_free(c2dev, reply);
283 bail0:
284 vq_req_free(c2dev, vq_req);
285 return err;
286}
287
288int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
289{
290 struct c2_dev *c2dev = to_c2dev(cm_id->device);
291 struct c2_qp *qp;
292 struct ib_qp *ibqp;
293 struct c2wr_cr_accept_req *wr; /* variable length WR */
294 struct c2_vq_req *vq_req;
295 struct c2wr_cr_accept_rep *reply; /* VQ Reply msg ptr. */
296 int err;
297
298 ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
299 if (!ibqp)
300 return -EINVAL;
301 qp = to_c2qp(ibqp);
302
303 /* Set the RDMA read limits */
304 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
305 if (err)
306 goto bail0;
307
308 /* Allocate verbs request. */
309 vq_req = vq_req_alloc(c2dev);
310 if (!vq_req) {
311 err = -ENOMEM;
312 goto bail0;
313 }
314 vq_req->qp = qp;
315 vq_req->cm_id = cm_id;
316 vq_req->event = IW_CM_EVENT_ESTABLISHED;
317
318 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
319 if (!wr) {
320 err = -ENOMEM;
321 goto bail1;
322 }
323
324 /* Build the WR */
325 c2_wr_set_id(wr, CCWR_CR_ACCEPT);
326 wr->hdr.context = (unsigned long) vq_req;
327 wr->rnic_handle = c2dev->adapter_handle;
328 wr->ep_handle = (u32) (unsigned long) cm_id->provider_data;
329 wr->qp_handle = qp->adapter_handle;
330
331 /* Replace the cr_handle with the QP after accept */
332 cm_id->provider_data = qp;
333 cm_id->add_ref(cm_id);
334 qp->cm_id = cm_id;
335
336 cm_id->provider_data = qp;
337
338 /* Validate private_data length */
339 if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
340 err = -EINVAL;
341 goto bail1;
342 }
343
344 if (iw_param->private_data) {
345 wr->private_data_length = cpu_to_be32(iw_param->private_data_len);
346 memcpy(&wr->private_data[0],
347 iw_param->private_data, iw_param->private_data_len);
348 } else
349 wr->private_data_length = 0;
350
351 /* Reference the request struct. Dereferenced in the int handler. */
352 vq_req_get(c2dev, vq_req);
353
354 /* Send WR to adapter */
355 err = vq_send_wr(c2dev, (union c2wr *) wr);
356 if (err) {
357 vq_req_put(c2dev, vq_req);
358 goto bail1;
359 }
360
361 /* Wait for reply from adapter */
362 err = vq_wait_for_reply(c2dev, vq_req);
363 if (err)
364 goto bail1;
365
366 /* Check that reply is present */
367 reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg;
368 if (!reply) {
369 err = -ENOMEM;
370 goto bail1;
371 }
372
373 err = c2_errno(reply);
374 vq_repbuf_free(c2dev, reply);
375
376 if (!err)
377 c2_set_qp_state(qp, C2_QP_STATE_RTS);
378 bail1:
379 kfree(wr);
380 vq_req_free(c2dev, vq_req);
381 bail0:
382 if (err) {
383 /*
384 * If we fail, release reference on QP and
385 * disassociate QP from CM_ID
386 */
387 cm_id->provider_data = NULL;
388 qp->cm_id = NULL;
389 cm_id->rem_ref(cm_id);
390 }
391 return err;
392}
393
394int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
395{
396 struct c2_dev *c2dev;
397 struct c2wr_cr_reject_req wr;
398 struct c2_vq_req *vq_req;
399 struct c2wr_cr_reject_rep *reply;
400 int err;
401
402 c2dev = to_c2dev(cm_id->device);
403
404 /*
405 * Allocate verbs request.
406 */
407 vq_req = vq_req_alloc(c2dev);
408 if (!vq_req)
409 return -ENOMEM;
410
411 /*
412 * Build the WR
413 */
414 c2_wr_set_id(&wr, CCWR_CR_REJECT);
415 wr.hdr.context = (unsigned long) vq_req;
416 wr.rnic_handle = c2dev->adapter_handle;
417 wr.ep_handle = (u32) (unsigned long) cm_id->provider_data;
418
419 /*
420 * reference the request struct. dereferenced in the int handler.
421 */
422 vq_req_get(c2dev, vq_req);
423
424 /*
425 * Send WR to adapter
426 */
427 err = vq_send_wr(c2dev, (union c2wr *) & wr);
428 if (err) {
429 vq_req_put(c2dev, vq_req);
430 goto bail0;
431 }
432
433 /*
434 * Wait for reply from adapter
435 */
436 err = vq_wait_for_reply(c2dev, vq_req);
437 if (err)
438 goto bail0;
439
440 /*
441 * Process reply
442 */
443 reply = (struct c2wr_cr_reject_rep *) (unsigned long)
444 vq_req->reply_msg;
445 if (!reply) {
446 err = -ENOMEM;
447 goto bail0;
448 }
449 err = c2_errno(reply);
450 /*
451 * free vq stuff
452 */
453 vq_repbuf_free(c2dev, reply);
454
455 bail0:
456 vq_req_free(c2dev, vq_req);
457 return err;
458}
diff --git a/drivers/staging/rdma/amso1100/c2_cq.c b/drivers/staging/rdma/amso1100/c2_cq.c
deleted file mode 100644
index 7ad0c082485a..000000000000
--- a/drivers/staging/rdma/amso1100/c2_cq.c
+++ /dev/null
@@ -1,437 +0,0 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
8 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 *
37 */
38#include <linux/gfp.h>
39
40#include "c2.h"
41#include "c2_vq.h"
42#include "c2_status.h"
43
44#define C2_CQ_MSG_SIZE ((sizeof(struct c2wr_ce) + 32-1) & ~(32-1))
45
46static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn)
47{
48 struct c2_cq *cq;
49 unsigned long flags;
50
51 spin_lock_irqsave(&c2dev->lock, flags);
52 cq = c2dev->qptr_array[cqn];
53 if (!cq) {
54 spin_unlock_irqrestore(&c2dev->lock, flags);
55 return NULL;
56 }
57 atomic_inc(&cq->refcount);
58 spin_unlock_irqrestore(&c2dev->lock, flags);
59 return cq;
60}
61
62static void c2_cq_put(struct c2_cq *cq)
63{
64 if (atomic_dec_and_test(&cq->refcount))
65 wake_up(&cq->wait);
66}
67
68void c2_cq_event(struct c2_dev *c2dev, u32 mq_index)
69{
70 struct c2_cq *cq;
71
72 cq = c2_cq_get(c2dev, mq_index);
73 if (!cq) {
74 printk("discarding events on destroyed CQN=%d\n", mq_index);
75 return;
76 }
77
78 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
79 c2_cq_put(cq);
80}
81
82void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index)
83{
84 struct c2_cq *cq;
85 struct c2_mq *q;
86
87 cq = c2_cq_get(c2dev, mq_index);
88 if (!cq)
89 return;
90
91 spin_lock_irq(&cq->lock);
92 q = &cq->mq;
93 if (q && !c2_mq_empty(q)) {
94 u16 priv = q->priv;
95 struct c2wr_ce *msg;
96
97 while (priv != be16_to_cpu(*q->shared)) {
98 msg = (struct c2wr_ce *)
99 (q->msg_pool.host + priv * q->msg_size);
100 if (msg->qp_user_context == (u64) (unsigned long) qp) {
101 msg->qp_user_context = (u64) 0;
102 }
103 priv = (priv + 1) % q->q_size;
104 }
105 }
106 spin_unlock_irq(&cq->lock);
107 c2_cq_put(cq);
108}
109
110static inline enum ib_wc_status c2_cqe_status_to_openib(u8 status)
111{
112 switch (status) {
113 case C2_OK:
114 return IB_WC_SUCCESS;
115 case CCERR_FLUSHED:
116 return IB_WC_WR_FLUSH_ERR;
117 case CCERR_BASE_AND_BOUNDS_VIOLATION:
118 return IB_WC_LOC_PROT_ERR;
119 case CCERR_ACCESS_VIOLATION:
120 return IB_WC_LOC_ACCESS_ERR;
121 case CCERR_TOTAL_LENGTH_TOO_BIG:
122 return IB_WC_LOC_LEN_ERR;
123 case CCERR_INVALID_WINDOW:
124 return IB_WC_MW_BIND_ERR;
125 default:
126 return IB_WC_GENERAL_ERR;
127 }
128}
129
130
131static inline int c2_poll_one(struct c2_dev *c2dev,
132 struct c2_cq *cq, struct ib_wc *entry)
133{
134 struct c2wr_ce *ce;
135 struct c2_qp *qp;
136 int is_recv = 0;
137
138 ce = c2_mq_consume(&cq->mq);
139 if (!ce) {
140 return -EAGAIN;
141 }
142
143 /*
144 * if the qp returned is null then this qp has already
145 * been freed and we are unable process the completion.
146 * try pulling the next message
147 */
148 while ((qp =
149 (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
150 c2_mq_free(&cq->mq);
151 ce = c2_mq_consume(&cq->mq);
152 if (!ce)
153 return -EAGAIN;
154 }
155
156 entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
157 entry->wr_id = ce->hdr.context;
158 entry->qp = &qp->ibqp;
159 entry->wc_flags = 0;
160 entry->slid = 0;
161 entry->sl = 0;
162 entry->src_qp = 0;
163 entry->dlid_path_bits = 0;
164 entry->pkey_index = 0;
165
166 switch (c2_wr_get_id(ce)) {
167 case C2_WR_TYPE_SEND:
168 entry->opcode = IB_WC_SEND;
169 break;
170 case C2_WR_TYPE_RDMA_WRITE:
171 entry->opcode = IB_WC_RDMA_WRITE;
172 break;
173 case C2_WR_TYPE_RDMA_READ:
174 entry->opcode = IB_WC_RDMA_READ;
175 break;
176 case C2_WR_TYPE_RECV:
177 entry->byte_len = be32_to_cpu(ce->bytes_rcvd);
178 entry->opcode = IB_WC_RECV;
179 is_recv = 1;
180 break;
181 default:
182 break;
183 }
184
185 /* consume the WQEs */
186 if (is_recv)
187 c2_mq_lconsume(&qp->rq_mq, 1);
188 else
189 c2_mq_lconsume(&qp->sq_mq,
190 be32_to_cpu(c2_wr_get_wqe_count(ce)) + 1);
191
192 /* free the message */
193 c2_mq_free(&cq->mq);
194
195 return 0;
196}
197
198int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
199{
200 struct c2_dev *c2dev = to_c2dev(ibcq->device);
201 struct c2_cq *cq = to_c2cq(ibcq);
202 unsigned long flags;
203 int npolled, err;
204
205 spin_lock_irqsave(&cq->lock, flags);
206
207 for (npolled = 0; npolled < num_entries; ++npolled) {
208
209 err = c2_poll_one(c2dev, cq, entry + npolled);
210 if (err)
211 break;
212 }
213
214 spin_unlock_irqrestore(&cq->lock, flags);
215
216 return npolled;
217}
218
219int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
220{
221 struct c2_mq_shared __iomem *shared;
222 struct c2_cq *cq;
223 unsigned long flags;
224 int ret = 0;
225
226 cq = to_c2cq(ibcq);
227 shared = cq->mq.peer;
228
229 if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
230 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
231 else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
232 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
233 else
234 return -EINVAL;
235
236 writeb(CQ_WAIT_FOR_DMA | CQ_ARMED, &shared->armed);
237
238 /*
239 * Now read back shared->armed to make the PCI
240 * write synchronous. This is necessary for
241 * correct cq notification semantics.
242 */
243 readb(&shared->armed);
244
245 if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
246 spin_lock_irqsave(&cq->lock, flags);
247 ret = !c2_mq_empty(&cq->mq);
248 spin_unlock_irqrestore(&cq->lock, flags);
249 }
250
251 return ret;
252}
253
254static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
255{
256 dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
257 mq->msg_pool.host, dma_unmap_addr(mq, mapping));
258}
259
260static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq,
261 size_t q_size, size_t msg_size)
262{
263 u8 *pool_start;
264
265 if (q_size > SIZE_MAX / msg_size)
266 return -EINVAL;
267
268 pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
269 &mq->host_dma, GFP_KERNEL);
270 if (!pool_start)
271 return -ENOMEM;
272
273 c2_mq_rep_init(mq,
274 0, /* index (currently unknown) */
275 q_size,
276 msg_size,
277 pool_start,
278 NULL, /* peer (currently unknown) */
279 C2_MQ_HOST_TARGET);
280
281 dma_unmap_addr_set(mq, mapping, mq->host_dma);
282
283 return 0;
284}
285
286int c2_init_cq(struct c2_dev *c2dev, int entries,
287 struct c2_ucontext *ctx, struct c2_cq *cq)
288{
289 struct c2wr_cq_create_req wr;
290 struct c2wr_cq_create_rep *reply;
291 unsigned long peer_pa;
292 struct c2_vq_req *vq_req;
293 int err;
294
295 might_sleep();
296
297 cq->ibcq.cqe = entries - 1;
298 cq->is_kernel = !ctx;
299
300 /* Allocate a shared pointer */
301 cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
302 &cq->mq.shared_dma, GFP_KERNEL);
303 if (!cq->mq.shared)
304 return -ENOMEM;
305
306 /* Allocate pages for the message pool */
307 err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
308 if (err)
309 goto bail0;
310
311 vq_req = vq_req_alloc(c2dev);
312 if (!vq_req) {
313 err = -ENOMEM;
314 goto bail1;
315 }
316
317 memset(&wr, 0, sizeof(wr));
318 c2_wr_set_id(&wr, CCWR_CQ_CREATE);
319 wr.hdr.context = (unsigned long) vq_req;
320 wr.rnic_handle = c2dev->adapter_handle;
321 wr.msg_size = cpu_to_be32(cq->mq.msg_size);
322 wr.depth = cpu_to_be32(cq->mq.q_size);
323 wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
324 wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
325 wr.user_context = (u64) (unsigned long) (cq);
326
327 vq_req_get(c2dev, vq_req);
328
329 err = vq_send_wr(c2dev, (union c2wr *) & wr);
330 if (err) {
331 vq_req_put(c2dev, vq_req);
332 goto bail2;
333 }
334
335 err = vq_wait_for_reply(c2dev, vq_req);
336 if (err)
337 goto bail2;
338
339 reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg);
340 if (!reply) {
341 err = -ENOMEM;
342 goto bail2;
343 }
344
345 if ((err = c2_errno(reply)) != 0)
346 goto bail3;
347
348 cq->adapter_handle = reply->cq_handle;
349 cq->mq.index = be32_to_cpu(reply->mq_index);
350
351 peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared);
352 cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
353 if (!cq->mq.peer) {
354 err = -ENOMEM;
355 goto bail3;
356 }
357
358 vq_repbuf_free(c2dev, reply);
359 vq_req_free(c2dev, vq_req);
360
361 spin_lock_init(&cq->lock);
362 atomic_set(&cq->refcount, 1);
363 init_waitqueue_head(&cq->wait);
364
365 /*
366 * Use the MQ index allocated by the adapter to
367 * store the CQ in the qptr_array
368 */
369 cq->cqn = cq->mq.index;
370 c2dev->qptr_array[cq->cqn] = cq;
371
372 return 0;
373
374bail3:
375 vq_repbuf_free(c2dev, reply);
376bail2:
377 vq_req_free(c2dev, vq_req);
378bail1:
379 c2_free_cq_buf(c2dev, &cq->mq);
380bail0:
381 c2_free_mqsp(cq->mq.shared);
382
383 return err;
384}
385
386void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
387{
388 int err;
389 struct c2_vq_req *vq_req;
390 struct c2wr_cq_destroy_req wr;
391 struct c2wr_cq_destroy_rep *reply;
392
393 might_sleep();
394
395 /* Clear CQ from the qptr array */
396 spin_lock_irq(&c2dev->lock);
397 c2dev->qptr_array[cq->mq.index] = NULL;
398 atomic_dec(&cq->refcount);
399 spin_unlock_irq(&c2dev->lock);
400
401 wait_event(cq->wait, !atomic_read(&cq->refcount));
402
403 vq_req = vq_req_alloc(c2dev);
404 if (!vq_req) {
405 goto bail0;
406 }
407
408 memset(&wr, 0, sizeof(wr));
409 c2_wr_set_id(&wr, CCWR_CQ_DESTROY);
410 wr.hdr.context = (unsigned long) vq_req;
411 wr.rnic_handle = c2dev->adapter_handle;
412 wr.cq_handle = cq->adapter_handle;
413
414 vq_req_get(c2dev, vq_req);
415
416 err = vq_send_wr(c2dev, (union c2wr *) & wr);
417 if (err) {
418 vq_req_put(c2dev, vq_req);
419 goto bail1;
420 }
421
422 err = vq_wait_for_reply(c2dev, vq_req);
423 if (err)
424 goto bail1;
425
426 reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
427 if (reply)
428 vq_repbuf_free(c2dev, reply);
429bail1:
430 vq_req_free(c2dev, vq_req);
431bail0:
432 if (cq->is_kernel) {
433 c2_free_cq_buf(c2dev, &cq->mq);
434 }
435
436 return;
437}
diff --git a/drivers/staging/rdma/amso1100/c2_intr.c b/drivers/staging/rdma/amso1100/c2_intr.c
deleted file mode 100644
index 74b32a971124..000000000000
--- a/drivers/staging/rdma/amso1100/c2_intr.c
+++ /dev/null
@@ -1,219 +0,0 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include "c2.h"
34#include <rdma/iw_cm.h>
35#include "c2_vq.h"
36
37static void handle_mq(struct c2_dev *c2dev, u32 index);
38static void handle_vq(struct c2_dev *c2dev, u32 mq_index);
39
40/*
41 * Handle RNIC interrupts
42 */
43void c2_rnic_interrupt(struct c2_dev *c2dev)
44{
45 unsigned int mq_index;
46
47 while (c2dev->hints_read != be16_to_cpu(*c2dev->hint_count)) {
48 mq_index = readl(c2dev->regs + PCI_BAR0_HOST_HINT);
49 if (mq_index & 0x80000000) {
50 break;
51 }
52
53 c2dev->hints_read++;
54 handle_mq(c2dev, mq_index);
55 }
56
57}
58
59/*
60 * Top level MQ handler
61 */
62static void handle_mq(struct c2_dev *c2dev, u32 mq_index)
63{
64 if (c2dev->qptr_array[mq_index] == NULL) {
65 pr_debug("handle_mq: stray activity for mq_index=%d\n",
66 mq_index);
67 return;
68 }
69
70 switch (mq_index) {
71 case (0):
72 /*
73 * An index of 0 in the activity queue
74 * indicates the req vq now has messages
75 * available...
76 *
77 * Wake up any waiters waiting on req VQ
78 * message availability.
79 */
80 wake_up(&c2dev->req_vq_wo);
81 break;
82 case (1):
83 handle_vq(c2dev, mq_index);
84 break;
85 case (2):
86 /* We have to purge the VQ in case there are pending
87 * accept reply requests that would result in the
88 * generation of an ESTABLISHED event. If we don't
89 * generate these first, a CLOSE event could end up
90 * being delivered before the ESTABLISHED event.
91 */
92 handle_vq(c2dev, 1);
93
94 c2_ae_event(c2dev, mq_index);
95 break;
96 default:
97 /* There is no event synchronization between CQ events
98 * and AE or CM events. In fact, CQE could be
99 * delivered for all of the I/O up to and including the
100 * FLUSH for a peer disconenct prior to the ESTABLISHED
101 * event being delivered to the app. The reason for this
102 * is that CM events are delivered on a thread, while AE
103 * and CM events are delivered on interrupt context.
104 */
105 c2_cq_event(c2dev, mq_index);
106 break;
107 }
108
109 return;
110}
111
112/*
113 * Handles verbs WR replies.
114 */
115static void handle_vq(struct c2_dev *c2dev, u32 mq_index)
116{
117 void *adapter_msg, *reply_msg;
118 struct c2wr_hdr *host_msg;
119 struct c2wr_hdr tmp;
120 struct c2_mq *reply_vq;
121 struct c2_vq_req *req;
122 struct iw_cm_event cm_event;
123 int err;
124
125 reply_vq = c2dev->qptr_array[mq_index];
126
127 /*
128 * get next msg from mq_index into adapter_msg.
129 * don't free it yet.
130 */
131 adapter_msg = c2_mq_consume(reply_vq);
132 if (adapter_msg == NULL) {
133 return;
134 }
135
136 host_msg = vq_repbuf_alloc(c2dev);
137
138 /*
139 * If we can't get a host buffer, then we'll still
140 * wakeup the waiter, we just won't give him the msg.
141 * It is assumed the waiter will deal with this...
142 */
143 if (!host_msg) {
144 pr_debug("handle_vq: no repbufs!\n");
145
146 /*
147 * just copy the WR header into a local variable.
148 * this allows us to still demux on the context
149 */
150 host_msg = &tmp;
151 memcpy(host_msg, adapter_msg, sizeof(tmp));
152 reply_msg = NULL;
153 } else {
154 memcpy(host_msg, adapter_msg, reply_vq->msg_size);
155 reply_msg = host_msg;
156 }
157
158 /*
159 * consume the msg from the MQ
160 */
161 c2_mq_free(reply_vq);
162
163 /*
164 * wakeup the waiter.
165 */
166 req = (struct c2_vq_req *) (unsigned long) host_msg->context;
167 if (req == NULL) {
168 /*
169 * We should never get here, as the adapter should
170 * never send us a reply that we're not expecting.
171 */
172 if (reply_msg != NULL)
173 vq_repbuf_free(c2dev, host_msg);
174 pr_debug("handle_vq: UNEXPECTEDLY got NULL req\n");
175 return;
176 }
177
178 if (reply_msg)
179 err = c2_errno(reply_msg);
180 else
181 err = -ENOMEM;
182
183 if (!err) switch (req->event) {
184 case IW_CM_EVENT_ESTABLISHED:
185 c2_set_qp_state(req->qp,
186 C2_QP_STATE_RTS);
187 /*
188 * Until ird/ord negotiation via MPAv2 support is added, send
189 * max supported values
190 */
191 cm_event.ird = cm_event.ord = 128;
192 case IW_CM_EVENT_CLOSE:
193
194 /*
195 * Move the QP to RTS if this is
196 * the established event
197 */
198 cm_event.event = req->event;
199 cm_event.status = 0;
200 cm_event.local_addr = req->cm_id->local_addr;
201 cm_event.remote_addr = req->cm_id->remote_addr;
202 cm_event.private_data = NULL;
203 cm_event.private_data_len = 0;
204 req->cm_id->event_handler(req->cm_id, &cm_event);
205 break;
206 default:
207 break;
208 }
209
210 req->reply_msg = (u64) (unsigned long) (reply_msg);
211 atomic_set(&req->reply_ready, 1);
212 wake_up(&req->wait_object);
213
214 /*
215 * If the request was cancelled, then this put will
216 * free the vq_req memory...and reply_msg!!!
217 */
218 vq_req_put(c2dev, req);
219}
diff --git a/drivers/staging/rdma/amso1100/c2_mm.c b/drivers/staging/rdma/amso1100/c2_mm.c
deleted file mode 100644
index 25081e2913de..000000000000
--- a/drivers/staging/rdma/amso1100/c2_mm.c
+++ /dev/null
@@ -1,377 +0,0 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include <linux/slab.h>
34
35#include "c2.h"
36#include "c2_vq.h"
37
38#define PBL_VIRT 1
39#define PBL_PHYS 2
40
41/*
42 * Send all the PBL messages to convey the remainder of the PBL
43 * Wait for the adapter's reply on the last one.
44 * This is indicated by setting the MEM_PBL_COMPLETE in the flags.
45 *
46 * NOTE: vq_req is _not_ freed by this function. The VQ Host
47 * Reply buffer _is_ freed by this function.
48 */
49static int
50send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index,
51 unsigned long va, u32 pbl_depth,
52 struct c2_vq_req *vq_req, int pbl_type)
53{
54 u32 pbe_count; /* amt that fits in a PBL msg */
55 u32 count; /* amt in this PBL MSG. */
56 struct c2wr_nsmr_pbl_req *wr; /* PBL WR ptr */
57 struct c2wr_nsmr_pbl_rep *reply; /* reply ptr */
58 int err, pbl_virt, pbl_index, i;
59
60 switch (pbl_type) {
61 case PBL_VIRT:
62 pbl_virt = 1;
63 break;
64 case PBL_PHYS:
65 pbl_virt = 0;
66 break;
67 default:
68 return -EINVAL;
69 break;
70 }
71
72 pbe_count = (c2dev->req_vq.msg_size -
73 sizeof(struct c2wr_nsmr_pbl_req)) / sizeof(u64);
74 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
75 if (!wr) {
76 return -ENOMEM;
77 }
78 c2_wr_set_id(wr, CCWR_NSMR_PBL);
79
80 /*
81 * Only the last PBL message will generate a reply from the verbs,
82 * so we set the context to 0 indicating there is no kernel verbs
83 * handler blocked awaiting this reply.
84 */
85 wr->hdr.context = 0;
86 wr->rnic_handle = c2dev->adapter_handle;
87 wr->stag_index = stag_index; /* already swapped */
88 wr->flags = 0;
89 pbl_index = 0;
90 while (pbl_depth) {
91 count = min(pbe_count, pbl_depth);
92 wr->addrs_length = cpu_to_be32(count);
93
94 /*
95 * If this is the last message, then reference the
96 * vq request struct cuz we're gonna wait for a reply.
97 * also make this PBL msg as the last one.
98 */
99 if (count == pbl_depth) {
100 /*
101 * reference the request struct. dereferenced in the
102 * int handler.
103 */
104 vq_req_get(c2dev, vq_req);
105 wr->flags = cpu_to_be32(MEM_PBL_COMPLETE);
106
107 /*
108 * This is the last PBL message.
109 * Set the context to our VQ Request Object so we can
110 * wait for the reply.
111 */
112 wr->hdr.context = (unsigned long) vq_req;
113 }
114
115 /*
116 * If pbl_virt is set then va is a virtual address
117 * that describes a virtually contiguous memory
118 * allocation. The wr needs the start of each virtual page
119 * to be converted to the corresponding physical address
120 * of the page. If pbl_virt is not set then va is an array
121 * of physical addresses and there is no conversion to do.
122 * Just fill in the wr with what is in the array.
123 */
124 for (i = 0; i < count; i++) {
125 if (pbl_virt) {
126 va += PAGE_SIZE;
127 } else {
128 wr->paddrs[i] =
129 cpu_to_be64(((u64 *)va)[pbl_index + i]);
130 }
131 }
132
133 /*
134 * Send WR to adapter
135 */
136 err = vq_send_wr(c2dev, (union c2wr *) wr);
137 if (err) {
138 if (count <= pbe_count) {
139 vq_req_put(c2dev, vq_req);
140 }
141 goto bail0;
142 }
143 pbl_depth -= count;
144 pbl_index += count;
145 }
146
147 /*
148 * Now wait for the reply...
149 */
150 err = vq_wait_for_reply(c2dev, vq_req);
151 if (err) {
152 goto bail0;
153 }
154
155 /*
156 * Process reply
157 */
158 reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg;
159 if (!reply) {
160 err = -ENOMEM;
161 goto bail0;
162 }
163
164 err = c2_errno(reply);
165
166 vq_repbuf_free(c2dev, reply);
167bail0:
168 kfree(wr);
169 return err;
170}
171
172#define C2_PBL_MAX_DEPTH 131072
173int
174c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
175 int page_size, int pbl_depth, u32 length,
176 u32 offset, u64 *va, enum c2_acf acf,
177 struct c2_mr *mr)
178{
179 struct c2_vq_req *vq_req;
180 struct c2wr_nsmr_register_req *wr;
181 struct c2wr_nsmr_register_rep *reply;
182 u16 flags;
183 int i, pbe_count, count;
184 int err;
185
186 if (!va || !length || !addr_list || !pbl_depth)
187 return -EINTR;
188
189 /*
190 * Verify PBL depth is within rnic max
191 */
192 if (pbl_depth > C2_PBL_MAX_DEPTH) {
193 return -EINTR;
194 }
195
196 /*
197 * allocate verbs request object
198 */
199 vq_req = vq_req_alloc(c2dev);
200 if (!vq_req)
201 return -ENOMEM;
202
203 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
204 if (!wr) {
205 err = -ENOMEM;
206 goto bail0;
207 }
208
209 /*
210 * build the WR
211 */
212 c2_wr_set_id(wr, CCWR_NSMR_REGISTER);
213 wr->hdr.context = (unsigned long) vq_req;
214 wr->rnic_handle = c2dev->adapter_handle;
215
216 flags = (acf | MEM_VA_BASED | MEM_REMOTE);
217
218 /*
219 * compute how many pbes can fit in the message
220 */
221 pbe_count = (c2dev->req_vq.msg_size -
222 sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64);
223
224 if (pbl_depth <= pbe_count) {
225 flags |= MEM_PBL_COMPLETE;
226 }
227 wr->flags = cpu_to_be16(flags);
228 wr->stag_key = 0; //stag_key;
229 wr->va = cpu_to_be64(*va);
230 wr->pd_id = mr->pd->pd_id;
231 wr->pbe_size = cpu_to_be32(page_size);
232 wr->length = cpu_to_be32(length);
233 wr->pbl_depth = cpu_to_be32(pbl_depth);
234 wr->fbo = cpu_to_be32(offset);
235 count = min(pbl_depth, pbe_count);
236 wr->addrs_length = cpu_to_be32(count);
237
238 /*
239 * fill out the PBL for this message
240 */
241 for (i = 0; i < count; i++) {
242 wr->paddrs[i] = cpu_to_be64(addr_list[i]);
243 }
244
245 /*
246 * regerence the request struct
247 */
248 vq_req_get(c2dev, vq_req);
249
250 /*
251 * send the WR to the adapter
252 */
253 err = vq_send_wr(c2dev, (union c2wr *) wr);
254 if (err) {
255 vq_req_put(c2dev, vq_req);
256 goto bail1;
257 }
258
259 /*
260 * wait for reply from adapter
261 */
262 err = vq_wait_for_reply(c2dev, vq_req);
263 if (err) {
264 goto bail1;
265 }
266
267 /*
268 * process reply
269 */
270 reply =
271 (struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg);
272 if (!reply) {
273 err = -ENOMEM;
274 goto bail1;
275 }
276 if ((err = c2_errno(reply))) {
277 goto bail2;
278 }
279 //*p_pb_entries = be32_to_cpu(reply->pbl_depth);
280 mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index);
281 vq_repbuf_free(c2dev, reply);
282
283 /*
284 * if there are still more PBEs we need to send them to
285 * the adapter and wait for a reply on the final one.
286 * reuse vq_req for this purpose.
287 */
288 pbl_depth -= count;
289 if (pbl_depth) {
290
291 vq_req->reply_msg = (unsigned long) NULL;
292 atomic_set(&vq_req->reply_ready, 0);
293 err = send_pbl_messages(c2dev,
294 cpu_to_be32(mr->ibmr.lkey),
295 (unsigned long) &addr_list[i],
296 pbl_depth, vq_req, PBL_PHYS);
297 if (err) {
298 goto bail1;
299 }
300 }
301
302 vq_req_free(c2dev, vq_req);
303 kfree(wr);
304
305 return err;
306
307bail2:
308 vq_repbuf_free(c2dev, reply);
309bail1:
310 kfree(wr);
311bail0:
312 vq_req_free(c2dev, vq_req);
313 return err;
314}
315
316int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index)
317{
318 struct c2_vq_req *vq_req; /* verbs request object */
319 struct c2wr_stag_dealloc_req wr; /* work request */
320 struct c2wr_stag_dealloc_rep *reply; /* WR reply */
321 int err;
322
323
324 /*
325 * allocate verbs request object
326 */
327 vq_req = vq_req_alloc(c2dev);
328 if (!vq_req) {
329 return -ENOMEM;
330 }
331
332 /*
333 * Build the WR
334 */
335 c2_wr_set_id(&wr, CCWR_STAG_DEALLOC);
336 wr.hdr.context = (u64) (unsigned long) vq_req;
337 wr.rnic_handle = c2dev->adapter_handle;
338 wr.stag_index = cpu_to_be32(stag_index);
339
340 /*
341 * reference the request struct. dereferenced in the int handler.
342 */
343 vq_req_get(c2dev, vq_req);
344
345 /*
346 * Send WR to adapter
347 */
348 err = vq_send_wr(c2dev, (union c2wr *) & wr);
349 if (err) {
350 vq_req_put(c2dev, vq_req);
351 goto bail0;
352 }
353
354 /*
355 * Wait for reply from adapter
356 */
357 err = vq_wait_for_reply(c2dev, vq_req);
358 if (err) {
359 goto bail0;
360 }
361
362 /*
363 * Process reply
364 */
365 reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg;
366 if (!reply) {
367 err = -ENOMEM;
368 goto bail0;
369 }
370
371 err = c2_errno(reply);
372
373 vq_repbuf_free(c2dev, reply);
374bail0:
375 vq_req_free(c2dev, vq_req);
376 return err;
377}
diff --git a/drivers/staging/rdma/amso1100/c2_mq.c b/drivers/staging/rdma/amso1100/c2_mq.c
deleted file mode 100644
index 7827fb8bdb10..000000000000
--- a/drivers/staging/rdma/amso1100/c2_mq.c
+++ /dev/null
@@ -1,175 +0,0 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include "c2.h"
34#include "c2_mq.h"
35
36void *c2_mq_alloc(struct c2_mq *q)
37{
38 BUG_ON(q->magic != C2_MQ_MAGIC);
39 BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
40
41 if (c2_mq_full(q)) {
42 return NULL;
43 } else {
44#ifdef DEBUG
45 struct c2wr_hdr *m =
46 (struct c2wr_hdr *) (q->msg_pool.host + q->priv * q->msg_size);
47#ifdef CCMSGMAGIC
48 BUG_ON(m->magic != be32_to_cpu(~CCWR_MAGIC));
49 m->magic = cpu_to_be32(CCWR_MAGIC);
50#endif
51 return m;
52#else
53 return q->msg_pool.host + q->priv * q->msg_size;
54#endif
55 }
56}
57
58void c2_mq_produce(struct c2_mq *q)
59{
60 BUG_ON(q->magic != C2_MQ_MAGIC);
61 BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
62
63 if (!c2_mq_full(q)) {
64 q->priv = (q->priv + 1) % q->q_size;
65 q->hint_count++;
66 /* Update peer's offset. */
67 __raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
68 }
69}
70
71void *c2_mq_consume(struct c2_mq *q)
72{
73 BUG_ON(q->magic != C2_MQ_MAGIC);
74 BUG_ON(q->type != C2_MQ_HOST_TARGET);
75
76 if (c2_mq_empty(q)) {
77 return NULL;
78 } else {
79#ifdef DEBUG
80 struct c2wr_hdr *m = (struct c2wr_hdr *)
81 (q->msg_pool.host + q->priv * q->msg_size);
82#ifdef CCMSGMAGIC
83 BUG_ON(m->magic != be32_to_cpu(CCWR_MAGIC));
84#endif
85 return m;
86#else
87 return q->msg_pool.host + q->priv * q->msg_size;
88#endif
89 }
90}
91
92void c2_mq_free(struct c2_mq *q)
93{
94 BUG_ON(q->magic != C2_MQ_MAGIC);
95 BUG_ON(q->type != C2_MQ_HOST_TARGET);
96
97 if (!c2_mq_empty(q)) {
98
99#ifdef CCMSGMAGIC
100 {
101 struct c2wr_hdr __iomem *m = (struct c2wr_hdr __iomem *)
102 (q->msg_pool.adapter + q->priv * q->msg_size);
103 __raw_writel(cpu_to_be32(~CCWR_MAGIC), &m->magic);
104 }
105#endif
106 q->priv = (q->priv + 1) % q->q_size;
107 /* Update peer's offset. */
108 __raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
109 }
110}
111
112
113void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count)
114{
115 BUG_ON(q->magic != C2_MQ_MAGIC);
116 BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
117
118 while (wqe_count--) {
119 BUG_ON(c2_mq_empty(q));
120 *q->shared = cpu_to_be16((be16_to_cpu(*q->shared)+1) % q->q_size);
121 }
122}
123
124#if 0
125u32 c2_mq_count(struct c2_mq *q)
126{
127 s32 count;
128
129 if (q->type == C2_MQ_HOST_TARGET)
130 count = be16_to_cpu(*q->shared) - q->priv;
131 else
132 count = q->priv - be16_to_cpu(*q->shared);
133
134 if (count < 0)
135 count += q->q_size;
136
137 return (u32) count;
138}
139#endif /* 0 */
140
141void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
142 u8 __iomem *pool_start, u16 __iomem *peer, u32 type)
143{
144 BUG_ON(!q->shared);
145
146 /* This code assumes the byte swapping has already been done! */
147 q->index = index;
148 q->q_size = q_size;
149 q->msg_size = msg_size;
150 q->msg_pool.adapter = pool_start;
151 q->peer = (struct c2_mq_shared __iomem *) peer;
152 q->magic = C2_MQ_MAGIC;
153 q->type = type;
154 q->priv = 0;
155 q->hint_count = 0;
156 return;
157}
158
159void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
160 u8 *pool_start, u16 __iomem *peer, u32 type)
161{
162 BUG_ON(!q->shared);
163
164 /* This code assumes the byte swapping has already been done! */
165 q->index = index;
166 q->q_size = q_size;
167 q->msg_size = msg_size;
168 q->msg_pool.host = pool_start;
169 q->peer = (struct c2_mq_shared __iomem *) peer;
170 q->magic = C2_MQ_MAGIC;
171 q->type = type;
172 q->priv = 0;
173 q->hint_count = 0;
174 return;
175}
diff --git a/drivers/staging/rdma/amso1100/c2_mq.h b/drivers/staging/rdma/amso1100/c2_mq.h
deleted file mode 100644
index 8e1b4d13409e..000000000000
--- a/drivers/staging/rdma/amso1100/c2_mq.h
+++ /dev/null
@@ -1,106 +0,0 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef _C2_MQ_H_
35#define _C2_MQ_H_
36#include <linux/kernel.h>
37#include <linux/dma-mapping.h>
38#include "c2_wr.h"
39
40enum c2_shared_regs {
41
42 C2_SHARED_ARMED = 0x10,
43 C2_SHARED_NOTIFY = 0x18,
44 C2_SHARED_SHARED = 0x40,
45};
46
47struct c2_mq_shared {
48 u16 unused1;
49 u8 armed;
50 u8 notification_type;
51 u32 unused2;
52 u16 shared;
53 /* Pad to 64 bytes. */
54 u8 pad[64 - sizeof(u16) - 2 * sizeof(u8) - sizeof(u32) - sizeof(u16)];
55};
56
57enum c2_mq_type {
58 C2_MQ_HOST_TARGET = 1,
59 C2_MQ_ADAPTER_TARGET = 2,
60};
61
62/*
63 * c2_mq_t is for kernel-mode MQs like the VQs Cand the AEQ.
64 * c2_user_mq_t (which is the same format) is for user-mode MQs...
65 */
66#define C2_MQ_MAGIC 0x4d512020 /* 'MQ ' */
67struct c2_mq {
68 u32 magic;
69 union {
70 u8 *host;
71 u8 __iomem *adapter;
72 } msg_pool;
73 dma_addr_t host_dma;
74 DEFINE_DMA_UNMAP_ADDR(mapping);
75 u16 hint_count;
76 u16 priv;
77 struct c2_mq_shared __iomem *peer;
78 __be16 *shared;
79 dma_addr_t shared_dma;
80 u32 q_size;
81 u32 msg_size;
82 u32 index;
83 enum c2_mq_type type;
84};
85
86static __inline__ int c2_mq_empty(struct c2_mq *q)
87{
88 return q->priv == be16_to_cpu(*q->shared);
89}
90
91static __inline__ int c2_mq_full(struct c2_mq *q)
92{
93 return q->priv == (be16_to_cpu(*q->shared) + q->q_size - 1) % q->q_size;
94}
95
96void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count);
97void *c2_mq_alloc(struct c2_mq *q);
98void c2_mq_produce(struct c2_mq *q);
99void *c2_mq_consume(struct c2_mq *q);
100void c2_mq_free(struct c2_mq *q);
101void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
102 u8 __iomem *pool_start, u16 __iomem *peer, u32 type);
103void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
104 u8 *pool_start, u16 __iomem *peer, u32 type);
105
106#endif /* _C2_MQ_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_pd.c b/drivers/staging/rdma/amso1100/c2_pd.c
deleted file mode 100644
index f3e81dc357bb..000000000000
--- a/drivers/staging/rdma/amso1100/c2_pd.c
+++ /dev/null
@@ -1,90 +0,0 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/errno.h>
39
40#include "c2.h"
41#include "c2_provider.h"
42
43int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd)
44{
45 u32 obj;
46 int ret = 0;
47
48 spin_lock(&c2dev->pd_table.lock);
49 obj = find_next_zero_bit(c2dev->pd_table.table, c2dev->pd_table.max,
50 c2dev->pd_table.last);
51 if (obj >= c2dev->pd_table.max)
52 obj = find_first_zero_bit(c2dev->pd_table.table,
53 c2dev->pd_table.max);
54 if (obj < c2dev->pd_table.max) {
55 pd->pd_id = obj;
56 __set_bit(obj, c2dev->pd_table.table);
57 c2dev->pd_table.last = obj+1;
58 if (c2dev->pd_table.last >= c2dev->pd_table.max)
59 c2dev->pd_table.last = 0;
60 } else
61 ret = -ENOMEM;
62 spin_unlock(&c2dev->pd_table.lock);
63 return ret;
64}
65
66void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd)
67{
68 spin_lock(&c2dev->pd_table.lock);
69 __clear_bit(pd->pd_id, c2dev->pd_table.table);
70 spin_unlock(&c2dev->pd_table.lock);
71}
72
73int c2_init_pd_table(struct c2_dev *c2dev)
74{
75
76 c2dev->pd_table.last = 0;
77 c2dev->pd_table.max = c2dev->props.max_pd;
78 spin_lock_init(&c2dev->pd_table.lock);
79 c2dev->pd_table.table = kmalloc(BITS_TO_LONGS(c2dev->props.max_pd) *
80 sizeof(long), GFP_KERNEL);
81 if (!c2dev->pd_table.table)
82 return -ENOMEM;
83 bitmap_zero(c2dev->pd_table.table, c2dev->props.max_pd);
84 return 0;
85}
86
87void c2_cleanup_pd_table(struct c2_dev *c2dev)
88{
89 kfree(c2dev->pd_table.table);
90}
diff --git a/drivers/staging/rdma/amso1100/c2_provider.c b/drivers/staging/rdma/amso1100/c2_provider.c
deleted file mode 100644
index de8d10e1bde3..000000000000
--- a/drivers/staging/rdma/amso1100/c2_provider.c
+++ /dev/null
@@ -1,862 +0,0 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/pci.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/inetdevice.h>
41#include <linux/delay.h>
42#include <linux/ethtool.h>
43#include <linux/mii.h>
44#include <linux/if_vlan.h>
45#include <linux/crc32.h>
46#include <linux/in.h>
47#include <linux/ip.h>
48#include <linux/tcp.h>
49#include <linux/init.h>
50#include <linux/dma-mapping.h>
51#include <linux/if_arp.h>
52#include <linux/vmalloc.h>
53#include <linux/slab.h>
54
55#include <asm/io.h>
56#include <asm/irq.h>
57#include <asm/byteorder.h>
58
59#include <rdma/ib_smi.h>
60#include <rdma/ib_umem.h>
61#include <rdma/ib_user_verbs.h>
62#include "c2.h"
63#include "c2_provider.h"
64#include "c2_user.h"
65
66static int c2_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
67 struct ib_udata *uhw)
68{
69 struct c2_dev *c2dev = to_c2dev(ibdev);
70
71 pr_debug("%s:%u\n", __func__, __LINE__);
72
73 if (uhw->inlen || uhw->outlen)
74 return -EINVAL;
75
76 *props = c2dev->props;
77 return 0;
78}
79
80static int c2_query_port(struct ib_device *ibdev,
81 u8 port, struct ib_port_attr *props)
82{
83 pr_debug("%s:%u\n", __func__, __LINE__);
84
85 props->max_mtu = IB_MTU_4096;
86 props->lid = 0;
87 props->lmc = 0;
88 props->sm_lid = 0;
89 props->sm_sl = 0;
90 props->state = IB_PORT_ACTIVE;
91 props->phys_state = 0;
92 props->port_cap_flags =
93 IB_PORT_CM_SUP |
94 IB_PORT_REINIT_SUP |
95 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
96 props->gid_tbl_len = 1;
97 props->pkey_tbl_len = 1;
98 props->qkey_viol_cntr = 0;
99 props->active_width = 1;
100 props->active_speed = IB_SPEED_SDR;
101
102 return 0;
103}
104
105static int c2_query_pkey(struct ib_device *ibdev,
106 u8 port, u16 index, u16 * pkey)
107{
108 pr_debug("%s:%u\n", __func__, __LINE__);
109 *pkey = 0;
110 return 0;
111}
112
113static int c2_query_gid(struct ib_device *ibdev, u8 port,
114 int index, union ib_gid *gid)
115{
116 struct c2_dev *c2dev = to_c2dev(ibdev);
117
118 pr_debug("%s:%u\n", __func__, __LINE__);
119 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
120 memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6);
121
122 return 0;
123}
124
125/* Allocate the user context data structure. This keeps track
126 * of all objects associated with a particular user-mode client.
127 */
128static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev,
129 struct ib_udata *udata)
130{
131 struct c2_ucontext *context;
132
133 pr_debug("%s:%u\n", __func__, __LINE__);
134 context = kmalloc(sizeof(*context), GFP_KERNEL);
135 if (!context)
136 return ERR_PTR(-ENOMEM);
137
138 return &context->ibucontext;
139}
140
141static int c2_dealloc_ucontext(struct ib_ucontext *context)
142{
143 pr_debug("%s:%u\n", __func__, __LINE__);
144 kfree(context);
145 return 0;
146}
147
148static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma)
149{
150 pr_debug("%s:%u\n", __func__, __LINE__);
151 return -ENOSYS;
152}
153
154static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev,
155 struct ib_ucontext *context,
156 struct ib_udata *udata)
157{
158 struct c2_pd *pd;
159 int err;
160
161 pr_debug("%s:%u\n", __func__, __LINE__);
162
163 pd = kmalloc(sizeof(*pd), GFP_KERNEL);
164 if (!pd)
165 return ERR_PTR(-ENOMEM);
166
167 err = c2_pd_alloc(to_c2dev(ibdev), !context, pd);
168 if (err) {
169 kfree(pd);
170 return ERR_PTR(err);
171 }
172
173 if (context) {
174 if (ib_copy_to_udata(udata, &pd->pd_id, sizeof(__u32))) {
175 c2_pd_free(to_c2dev(ibdev), pd);
176 kfree(pd);
177 return ERR_PTR(-EFAULT);
178 }
179 }
180
181 return &pd->ibpd;
182}
183
184static int c2_dealloc_pd(struct ib_pd *pd)
185{
186 pr_debug("%s:%u\n", __func__, __LINE__);
187 c2_pd_free(to_c2dev(pd->device), to_c2pd(pd));
188 kfree(pd);
189
190 return 0;
191}
192
193static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
194{
195 pr_debug("%s:%u\n", __func__, __LINE__);
196 return ERR_PTR(-ENOSYS);
197}
198
199static int c2_ah_destroy(struct ib_ah *ah)
200{
201 pr_debug("%s:%u\n", __func__, __LINE__);
202 return -ENOSYS;
203}
204
205static void c2_add_ref(struct ib_qp *ibqp)
206{
207 struct c2_qp *qp;
208 BUG_ON(!ibqp);
209 qp = to_c2qp(ibqp);
210 atomic_inc(&qp->refcount);
211}
212
213static void c2_rem_ref(struct ib_qp *ibqp)
214{
215 struct c2_qp *qp;
216 BUG_ON(!ibqp);
217 qp = to_c2qp(ibqp);
218 if (atomic_dec_and_test(&qp->refcount))
219 wake_up(&qp->wait);
220}
221
222struct ib_qp *c2_get_qp(struct ib_device *device, int qpn)
223{
224 struct c2_dev* c2dev = to_c2dev(device);
225 struct c2_qp *qp;
226
227 qp = c2_find_qpn(c2dev, qpn);
228 pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n",
229 __func__, qp, qpn, device,
230 (qp?atomic_read(&qp->refcount):0));
231
232 return (qp?&qp->ibqp:NULL);
233}
234
235static struct ib_qp *c2_create_qp(struct ib_pd *pd,
236 struct ib_qp_init_attr *init_attr,
237 struct ib_udata *udata)
238{
239 struct c2_qp *qp;
240 int err;
241
242 pr_debug("%s:%u\n", __func__, __LINE__);
243
244 if (init_attr->create_flags)
245 return ERR_PTR(-EINVAL);
246
247 switch (init_attr->qp_type) {
248 case IB_QPT_RC:
249 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
250 if (!qp) {
251 pr_debug("%s: Unable to allocate QP\n", __func__);
252 return ERR_PTR(-ENOMEM);
253 }
254 spin_lock_init(&qp->lock);
255 if (pd->uobject) {
256 /* userspace specific */
257 }
258
259 err = c2_alloc_qp(to_c2dev(pd->device),
260 to_c2pd(pd), init_attr, qp);
261
262 if (err && pd->uobject) {
263 /* userspace specific */
264 }
265
266 break;
267 default:
268 pr_debug("%s: Invalid QP type: %d\n", __func__,
269 init_attr->qp_type);
270 return ERR_PTR(-EINVAL);
271 }
272
273 if (err) {
274 kfree(qp);
275 return ERR_PTR(err);
276 }
277
278 return &qp->ibqp;
279}
280
281static int c2_destroy_qp(struct ib_qp *ib_qp)
282{
283 struct c2_qp *qp = to_c2qp(ib_qp);
284
285 pr_debug("%s:%u qp=%p,qp->state=%d\n",
286 __func__, __LINE__, ib_qp, qp->state);
287 c2_free_qp(to_c2dev(ib_qp->device), qp);
288 kfree(qp);
289 return 0;
290}
291
292static struct ib_cq *c2_create_cq(struct ib_device *ibdev,
293 const struct ib_cq_init_attr *attr,
294 struct ib_ucontext *context,
295 struct ib_udata *udata)
296{
297 int entries = attr->cqe;
298 struct c2_cq *cq;
299 int err;
300
301 if (attr->flags)
302 return ERR_PTR(-EINVAL);
303
304 cq = kmalloc(sizeof(*cq), GFP_KERNEL);
305 if (!cq) {
306 pr_debug("%s: Unable to allocate CQ\n", __func__);
307 return ERR_PTR(-ENOMEM);
308 }
309
310 err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq);
311 if (err) {
312 pr_debug("%s: error initializing CQ\n", __func__);
313 kfree(cq);
314 return ERR_PTR(err);
315 }
316
317 return &cq->ibcq;
318}
319
320static int c2_destroy_cq(struct ib_cq *ib_cq)
321{
322 struct c2_cq *cq = to_c2cq(ib_cq);
323
324 pr_debug("%s:%u\n", __func__, __LINE__);
325
326 c2_free_cq(to_c2dev(ib_cq->device), cq);
327 kfree(cq);
328
329 return 0;
330}
331
332static inline u32 c2_convert_access(int acc)
333{
334 return (acc & IB_ACCESS_REMOTE_WRITE ? C2_ACF_REMOTE_WRITE : 0) |
335 (acc & IB_ACCESS_REMOTE_READ ? C2_ACF_REMOTE_READ : 0) |
336 (acc & IB_ACCESS_LOCAL_WRITE ? C2_ACF_LOCAL_WRITE : 0) |
337 C2_ACF_LOCAL_READ | C2_ACF_WINDOW_BIND;
338}
339
340static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
341{
342 struct c2_mr *mr;
343 u64 *page_list;
344 const u32 total_len = 0xffffffff; /* AMSO1100 limit */
345 int err, page_shift, pbl_depth, i;
346 u64 kva = 0;
347
348 pr_debug("%s:%u\n", __func__, __LINE__);
349
350 /*
351 * This is a map of all phy mem...use a 32k page_shift.
352 */
353 page_shift = PAGE_SHIFT + 3;
354 pbl_depth = ALIGN(total_len, BIT(page_shift)) >> page_shift;
355
356 page_list = vmalloc(sizeof(u64) * pbl_depth);
357 if (!page_list) {
358 pr_debug("couldn't vmalloc page_list of size %zd\n",
359 (sizeof(u64) * pbl_depth));
360 return ERR_PTR(-ENOMEM);
361 }
362
363 for (i = 0; i < pbl_depth; i++)
364 page_list[i] = (i << page_shift);
365
366 mr = kmalloc(sizeof(*mr), GFP_KERNEL);
367 if (!mr) {
368 vfree(page_list);
369 return ERR_PTR(-ENOMEM);
370 }
371
372 mr->pd = to_c2pd(pd);
373 mr->umem = NULL;
374 pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, "
375 "*iova_start %llx, first pa %llx, last pa %llx\n",
376 __func__, page_shift, pbl_depth, total_len,
377 (unsigned long long) kva,
378 (unsigned long long) page_list[0],
379 (unsigned long long) page_list[pbl_depth-1]);
380 err = c2_nsmr_register_phys_kern(to_c2dev(pd->device), page_list,
381 BIT(page_shift), pbl_depth,
382 total_len, 0, &kva,
383 c2_convert_access(acc), mr);
384 vfree(page_list);
385 if (err) {
386 kfree(mr);
387 return ERR_PTR(err);
388 }
389
390 return &mr->ibmr;
391}
392
393static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
394 u64 virt, int acc, struct ib_udata *udata)
395{
396 u64 *pages;
397 u64 kva = 0;
398 int shift, n, len;
399 int i, k, entry;
400 int err = 0;
401 struct scatterlist *sg;
402 struct c2_pd *c2pd = to_c2pd(pd);
403 struct c2_mr *c2mr;
404
405 pr_debug("%s:%u\n", __func__, __LINE__);
406
407 c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL);
408 if (!c2mr)
409 return ERR_PTR(-ENOMEM);
410 c2mr->pd = c2pd;
411
412 c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
413 if (IS_ERR(c2mr->umem)) {
414 err = PTR_ERR(c2mr->umem);
415 kfree(c2mr);
416 return ERR_PTR(err);
417 }
418
419 shift = ffs(c2mr->umem->page_size) - 1;
420 n = c2mr->umem->nmap;
421
422 pages = kmalloc_array(n, sizeof(u64), GFP_KERNEL);
423 if (!pages) {
424 err = -ENOMEM;
425 goto err;
426 }
427
428 i = 0;
429 for_each_sg(c2mr->umem->sg_head.sgl, sg, c2mr->umem->nmap, entry) {
430 len = sg_dma_len(sg) >> shift;
431 for (k = 0; k < len; ++k) {
432 pages[i++] =
433 sg_dma_address(sg) +
434 (c2mr->umem->page_size * k);
435 }
436 }
437
438 kva = virt;
439 err = c2_nsmr_register_phys_kern(to_c2dev(pd->device),
440 pages,
441 c2mr->umem->page_size,
442 i,
443 length,
444 ib_umem_offset(c2mr->umem),
445 &kva,
446 c2_convert_access(acc),
447 c2mr);
448 kfree(pages);
449 if (err)
450 goto err;
451 return &c2mr->ibmr;
452
453err:
454 ib_umem_release(c2mr->umem);
455 kfree(c2mr);
456 return ERR_PTR(err);
457}
458
459static int c2_dereg_mr(struct ib_mr *ib_mr)
460{
461 struct c2_mr *mr = to_c2mr(ib_mr);
462 int err;
463
464 pr_debug("%s:%u\n", __func__, __LINE__);
465
466 err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey);
467 if (err)
468 pr_debug("c2_stag_dealloc failed: %d\n", err);
469 else {
470 if (mr->umem)
471 ib_umem_release(mr->umem);
472 kfree(mr);
473 }
474
475 return err;
476}
477
478static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
479 char *buf)
480{
481 struct c2_dev *c2dev = container_of(dev, struct c2_dev, ibdev.dev);
482 pr_debug("%s:%u\n", __func__, __LINE__);
483 return sprintf(buf, "%x\n", c2dev->props.hw_ver);
484}
485
486static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
487 char *buf)
488{
489 struct c2_dev *c2dev = container_of(dev, struct c2_dev, ibdev.dev);
490 pr_debug("%s:%u\n", __func__, __LINE__);
491 return sprintf(buf, "%x.%x.%x\n",
492 (int) (c2dev->props.fw_ver >> 32),
493 (int) (c2dev->props.fw_ver >> 16) & 0xffff,
494 (int) (c2dev->props.fw_ver & 0xffff));
495}
496
497static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
498 char *buf)
499{
500 pr_debug("%s:%u\n", __func__, __LINE__);
501 return sprintf(buf, "AMSO1100\n");
502}
503
504static ssize_t show_board(struct device *dev, struct device_attribute *attr,
505 char *buf)
506{
507 pr_debug("%s:%u\n", __func__, __LINE__);
508 return sprintf(buf, "%.*s\n", 32, "AMSO1100 Board ID");
509}
510
511static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
512static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
513static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
514static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
515
516static struct device_attribute *c2_dev_attributes[] = {
517 &dev_attr_hw_rev,
518 &dev_attr_fw_ver,
519 &dev_attr_hca_type,
520 &dev_attr_board_id
521};
522
523static int c2_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
524 int attr_mask, struct ib_udata *udata)
525{
526 int err;
527
528 err =
529 c2_qp_modify(to_c2dev(ibqp->device), to_c2qp(ibqp), attr,
530 attr_mask);
531
532 return err;
533}
534
535static int c2_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
536{
537 pr_debug("%s:%u\n", __func__, __LINE__);
538 return -ENOSYS;
539}
540
541static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
542{
543 pr_debug("%s:%u\n", __func__, __LINE__);
544 return -ENOSYS;
545}
546
547static int c2_process_mad(struct ib_device *ibdev,
548 int mad_flags,
549 u8 port_num,
550 const struct ib_wc *in_wc,
551 const struct ib_grh *in_grh,
552 const struct ib_mad_hdr *in_mad,
553 size_t in_mad_size,
554 struct ib_mad_hdr *out_mad,
555 size_t *out_mad_size,
556 u16 *out_mad_pkey_index)
557{
558 pr_debug("%s:%u\n", __func__, __LINE__);
559 return -ENOSYS;
560}
561
562static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
563{
564 pr_debug("%s:%u\n", __func__, __LINE__);
565
566 /* Request a connection */
567 return c2_llp_connect(cm_id, iw_param);
568}
569
570static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
571{
572 pr_debug("%s:%u\n", __func__, __LINE__);
573
574 /* Accept the new connection */
575 return c2_llp_accept(cm_id, iw_param);
576}
577
578static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
579{
580 pr_debug("%s:%u\n", __func__, __LINE__);
581
582 return c2_llp_reject(cm_id, pdata, pdata_len);
583}
584
585static int c2_service_create(struct iw_cm_id *cm_id, int backlog)
586{
587 int err;
588
589 pr_debug("%s:%u\n", __func__, __LINE__);
590 err = c2_llp_service_create(cm_id, backlog);
591 pr_debug("%s:%u err=%d\n",
592 __func__, __LINE__,
593 err);
594 return err;
595}
596
597static int c2_service_destroy(struct iw_cm_id *cm_id)
598{
599 pr_debug("%s:%u\n", __func__, __LINE__);
600
601 return c2_llp_service_destroy(cm_id);
602}
603
604static int c2_pseudo_up(struct net_device *netdev)
605{
606 struct in_device *ind;
607 struct c2_dev *c2dev = netdev->ml_priv;
608
609 ind = in_dev_get(netdev);
610 if (!ind)
611 return 0;
612
613 pr_debug("adding...\n");
614 for_ifa(ind) {
615#ifdef DEBUG
616 u8 *ip = (u8 *) & ifa->ifa_address;
617
618 pr_debug("%s: %d.%d.%d.%d\n",
619 ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
620#endif
621 c2_add_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
622 }
623 endfor_ifa(ind);
624 in_dev_put(ind);
625
626 return 0;
627}
628
629static int c2_pseudo_down(struct net_device *netdev)
630{
631 struct in_device *ind;
632 struct c2_dev *c2dev = netdev->ml_priv;
633
634 ind = in_dev_get(netdev);
635 if (!ind)
636 return 0;
637
638 pr_debug("deleting...\n");
639 for_ifa(ind) {
640#ifdef DEBUG
641 u8 *ip = (u8 *) & ifa->ifa_address;
642
643 pr_debug("%s: %d.%d.%d.%d\n",
644 ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
645#endif
646 c2_del_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
647 }
648 endfor_ifa(ind);
649 in_dev_put(ind);
650
651 return 0;
652}
653
654static int c2_pseudo_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
655{
656 kfree_skb(skb);
657 return NETDEV_TX_OK;
658}
659
660static int c2_pseudo_change_mtu(struct net_device *netdev, int new_mtu)
661{
662 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
663 return -EINVAL;
664
665 netdev->mtu = new_mtu;
666
667 /* TODO: Tell rnic about new rmda interface mtu */
668 return 0;
669}
670
671static const struct net_device_ops c2_pseudo_netdev_ops = {
672 .ndo_open = c2_pseudo_up,
673 .ndo_stop = c2_pseudo_down,
674 .ndo_start_xmit = c2_pseudo_xmit_frame,
675 .ndo_change_mtu = c2_pseudo_change_mtu,
676 .ndo_validate_addr = eth_validate_addr,
677};
678
679static void setup(struct net_device *netdev)
680{
681 netdev->netdev_ops = &c2_pseudo_netdev_ops;
682
683 netdev->watchdog_timeo = 0;
684 netdev->type = ARPHRD_ETHER;
685 netdev->mtu = 1500;
686 netdev->hard_header_len = ETH_HLEN;
687 netdev->addr_len = ETH_ALEN;
688 netdev->tx_queue_len = 0;
689 netdev->flags |= IFF_NOARP;
690}
691
692static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
693{
694 char name[IFNAMSIZ];
695 struct net_device *netdev;
696
697 /* change ethxxx to iwxxx */
698 strcpy(name, "iw");
699 strcat(name, &c2dev->netdev->name[3]);
700 netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, setup);
701 if (!netdev) {
702 printk(KERN_ERR PFX "%s - etherdev alloc failed",
703 __func__);
704 return NULL;
705 }
706
707 netdev->ml_priv = c2dev;
708
709 SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
710
711 memcpy_fromio(netdev->dev_addr, c2dev->kva + C2_REGS_RDMA_ENADDR, 6);
712
713 /* Print out the MAC address */
714 pr_debug("%s: MAC %pM\n", netdev->name, netdev->dev_addr);
715
716#if 0
717 /* Disable network packets */
718 netif_stop_queue(netdev);
719#endif
720 return netdev;
721}
722
723static int c2_port_immutable(struct ib_device *ibdev, u8 port_num,
724 struct ib_port_immutable *immutable)
725{
726 struct ib_port_attr attr;
727 int err;
728
729 err = c2_query_port(ibdev, port_num, &attr);
730 if (err)
731 return err;
732
733 immutable->pkey_tbl_len = attr.pkey_tbl_len;
734 immutable->gid_tbl_len = attr.gid_tbl_len;
735 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
736
737 return 0;
738}
739
740int c2_register_device(struct c2_dev *dev)
741{
742 int ret = -ENOMEM;
743 int i;
744
745 /* Register pseudo network device */
746 dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
747 if (!dev->pseudo_netdev)
748 goto out;
749
750 ret = register_netdev(dev->pseudo_netdev);
751 if (ret)
752 goto out_free_netdev;
753
754 pr_debug("%s:%u\n", __func__, __LINE__);
755 strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
756 dev->ibdev.owner = THIS_MODULE;
757 dev->ibdev.uverbs_cmd_mask =
758 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
759 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
760 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
761 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
762 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
763 (1ull << IB_USER_VERBS_CMD_REG_MR) |
764 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
765 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
766 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
767 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
768 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
769 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
770 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
771 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
772 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
773 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
774 (1ull << IB_USER_VERBS_CMD_POST_RECV);
775
776 dev->ibdev.node_type = RDMA_NODE_RNIC;
777 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
778 memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6);
779 dev->ibdev.phys_port_cnt = 1;
780 dev->ibdev.num_comp_vectors = 1;
781 dev->ibdev.dma_device = &dev->pcidev->dev;
782 dev->ibdev.query_device = c2_query_device;
783 dev->ibdev.query_port = c2_query_port;
784 dev->ibdev.query_pkey = c2_query_pkey;
785 dev->ibdev.query_gid = c2_query_gid;
786 dev->ibdev.alloc_ucontext = c2_alloc_ucontext;
787 dev->ibdev.dealloc_ucontext = c2_dealloc_ucontext;
788 dev->ibdev.mmap = c2_mmap_uar;
789 dev->ibdev.alloc_pd = c2_alloc_pd;
790 dev->ibdev.dealloc_pd = c2_dealloc_pd;
791 dev->ibdev.create_ah = c2_ah_create;
792 dev->ibdev.destroy_ah = c2_ah_destroy;
793 dev->ibdev.create_qp = c2_create_qp;
794 dev->ibdev.modify_qp = c2_modify_qp;
795 dev->ibdev.destroy_qp = c2_destroy_qp;
796 dev->ibdev.create_cq = c2_create_cq;
797 dev->ibdev.destroy_cq = c2_destroy_cq;
798 dev->ibdev.poll_cq = c2_poll_cq;
799 dev->ibdev.get_dma_mr = c2_get_dma_mr;
800 dev->ibdev.reg_user_mr = c2_reg_user_mr;
801 dev->ibdev.dereg_mr = c2_dereg_mr;
802 dev->ibdev.get_port_immutable = c2_port_immutable;
803
804 dev->ibdev.alloc_fmr = NULL;
805 dev->ibdev.unmap_fmr = NULL;
806 dev->ibdev.dealloc_fmr = NULL;
807 dev->ibdev.map_phys_fmr = NULL;
808
809 dev->ibdev.attach_mcast = c2_multicast_attach;
810 dev->ibdev.detach_mcast = c2_multicast_detach;
811 dev->ibdev.process_mad = c2_process_mad;
812
813 dev->ibdev.req_notify_cq = c2_arm_cq;
814 dev->ibdev.post_send = c2_post_send;
815 dev->ibdev.post_recv = c2_post_receive;
816
817 dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
818 if (dev->ibdev.iwcm == NULL) {
819 ret = -ENOMEM;
820 goto out_unregister_netdev;
821 }
822 dev->ibdev.iwcm->add_ref = c2_add_ref;
823 dev->ibdev.iwcm->rem_ref = c2_rem_ref;
824 dev->ibdev.iwcm->get_qp = c2_get_qp;
825 dev->ibdev.iwcm->connect = c2_connect;
826 dev->ibdev.iwcm->accept = c2_accept;
827 dev->ibdev.iwcm->reject = c2_reject;
828 dev->ibdev.iwcm->create_listen = c2_service_create;
829 dev->ibdev.iwcm->destroy_listen = c2_service_destroy;
830
831 ret = ib_register_device(&dev->ibdev, NULL);
832 if (ret)
833 goto out_free_iwcm;
834
835 for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) {
836 ret = device_create_file(&dev->ibdev.dev,
837 c2_dev_attributes[i]);
838 if (ret)
839 goto out_unregister_ibdev;
840 }
841 goto out;
842
843out_unregister_ibdev:
844 ib_unregister_device(&dev->ibdev);
845out_free_iwcm:
846 kfree(dev->ibdev.iwcm);
847out_unregister_netdev:
848 unregister_netdev(dev->pseudo_netdev);
849out_free_netdev:
850 free_netdev(dev->pseudo_netdev);
851out:
852 pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
853 return ret;
854}
855
856void c2_unregister_device(struct c2_dev *dev)
857{
858 pr_debug("%s:%u\n", __func__, __LINE__);
859 unregister_netdev(dev->pseudo_netdev);
860 free_netdev(dev->pseudo_netdev);
861 ib_unregister_device(&dev->ibdev);
862}
diff --git a/drivers/staging/rdma/amso1100/c2_provider.h b/drivers/staging/rdma/amso1100/c2_provider.h
deleted file mode 100644
index bf189987711f..000000000000
--- a/drivers/staging/rdma/amso1100/c2_provider.h
+++ /dev/null
@@ -1,182 +0,0 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34
35#ifndef C2_PROVIDER_H
36#define C2_PROVIDER_H
37#include <linux/inetdevice.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_pack.h>
41
42#include "c2_mq.h"
43#include <rdma/iw_cm.h>
44
45#define C2_MPT_FLAG_ATOMIC (1 << 14)
46#define C2_MPT_FLAG_REMOTE_WRITE (1 << 13)
47#define C2_MPT_FLAG_REMOTE_READ (1 << 12)
48#define C2_MPT_FLAG_LOCAL_WRITE (1 << 11)
49#define C2_MPT_FLAG_LOCAL_READ (1 << 10)
50
51struct c2_buf_list {
52 void *buf;
53 DEFINE_DMA_UNMAP_ADDR(mapping);
54};
55
56
57/* The user context keeps track of objects allocated for a
58 * particular user-mode client. */
59struct c2_ucontext {
60 struct ib_ucontext ibucontext;
61};
62
63struct c2_mtt;
64
65/* All objects associated with a PD are kept in the
66 * associated user context if present.
67 */
68struct c2_pd {
69 struct ib_pd ibpd;
70 u32 pd_id;
71};
72
73struct c2_mr {
74 struct ib_mr ibmr;
75 struct c2_pd *pd;
76 struct ib_umem *umem;
77};
78
79struct c2_av;
80
81enum c2_ah_type {
82 C2_AH_ON_HCA,
83 C2_AH_PCI_POOL,
84 C2_AH_KMALLOC
85};
86
87struct c2_ah {
88 struct ib_ah ibah;
89};
90
91struct c2_cq {
92 struct ib_cq ibcq;
93 spinlock_t lock;
94 atomic_t refcount;
95 int cqn;
96 int is_kernel;
97 wait_queue_head_t wait;
98
99 u32 adapter_handle;
100 struct c2_mq mq;
101};
102
103struct c2_wq {
104 spinlock_t lock;
105};
106struct iw_cm_id;
107struct c2_qp {
108 struct ib_qp ibqp;
109 struct iw_cm_id *cm_id;
110 spinlock_t lock;
111 atomic_t refcount;
112 wait_queue_head_t wait;
113 int qpn;
114
115 u32 adapter_handle;
116 u32 send_sgl_depth;
117 u32 recv_sgl_depth;
118 u32 rdma_write_sgl_depth;
119 u8 state;
120
121 struct c2_mq sq_mq;
122 struct c2_mq rq_mq;
123};
124
125struct c2_cr_query_attrs {
126 u32 local_addr;
127 u32 remote_addr;
128 u16 local_port;
129 u16 remote_port;
130};
131
132static inline struct c2_pd *to_c2pd(struct ib_pd *ibpd)
133{
134 return container_of(ibpd, struct c2_pd, ibpd);
135}
136
137static inline struct c2_ucontext *to_c2ucontext(struct ib_ucontext *ibucontext)
138{
139 return container_of(ibucontext, struct c2_ucontext, ibucontext);
140}
141
142static inline struct c2_mr *to_c2mr(struct ib_mr *ibmr)
143{
144 return container_of(ibmr, struct c2_mr, ibmr);
145}
146
147
148static inline struct c2_ah *to_c2ah(struct ib_ah *ibah)
149{
150 return container_of(ibah, struct c2_ah, ibah);
151}
152
153static inline struct c2_cq *to_c2cq(struct ib_cq *ibcq)
154{
155 return container_of(ibcq, struct c2_cq, ibcq);
156}
157
158static inline struct c2_qp *to_c2qp(struct ib_qp *ibqp)
159{
160 return container_of(ibqp, struct c2_qp, ibqp);
161}
162
163static inline int is_rnic_addr(struct net_device *netdev, u32 addr)
164{
165 struct in_device *ind;
166 int ret = 0;
167
168 ind = in_dev_get(netdev);
169 if (!ind)
170 return 0;
171
172 for_ifa(ind) {
173 if (ifa->ifa_address == addr) {
174 ret = 1;
175 break;
176 }
177 }
178 endfor_ifa(ind);
179 in_dev_put(ind);
180 return ret;
181}
182#endif /* C2_PROVIDER_H */
diff --git a/drivers/staging/rdma/amso1100/c2_qp.c b/drivers/staging/rdma/amso1100/c2_qp.c
deleted file mode 100644
index ca364dbe369c..000000000000
--- a/drivers/staging/rdma/amso1100/c2_qp.c
+++ /dev/null
@@ -1,1024 +0,0 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
36 */
37
38#include <linux/delay.h>
39#include <linux/gfp.h>
40
41#include "c2.h"
42#include "c2_vq.h"
43#include "c2_status.h"
44
45#define C2_MAX_ORD_PER_QP 128
46#define C2_MAX_IRD_PER_QP 128
47
48#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
49#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
50#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
51
52#define NO_SUPPORT -1
53static const u8 c2_opcode[] = {
54 [IB_WR_SEND] = C2_WR_TYPE_SEND,
55 [IB_WR_SEND_WITH_IMM] = NO_SUPPORT,
56 [IB_WR_RDMA_WRITE] = C2_WR_TYPE_RDMA_WRITE,
57 [IB_WR_RDMA_WRITE_WITH_IMM] = NO_SUPPORT,
58 [IB_WR_RDMA_READ] = C2_WR_TYPE_RDMA_READ,
59 [IB_WR_ATOMIC_CMP_AND_SWP] = NO_SUPPORT,
60 [IB_WR_ATOMIC_FETCH_AND_ADD] = NO_SUPPORT,
61};
62
63static int to_c2_state(enum ib_qp_state ib_state)
64{
65 switch (ib_state) {
66 case IB_QPS_RESET:
67 return C2_QP_STATE_IDLE;
68 case IB_QPS_RTS:
69 return C2_QP_STATE_RTS;
70 case IB_QPS_SQD:
71 return C2_QP_STATE_CLOSING;
72 case IB_QPS_SQE:
73 return C2_QP_STATE_CLOSING;
74 case IB_QPS_ERR:
75 return C2_QP_STATE_ERROR;
76 default:
77 return -1;
78 }
79}
80
81static int to_ib_state(enum c2_qp_state c2_state)
82{
83 switch (c2_state) {
84 case C2_QP_STATE_IDLE:
85 return IB_QPS_RESET;
86 case C2_QP_STATE_CONNECTING:
87 return IB_QPS_RTR;
88 case C2_QP_STATE_RTS:
89 return IB_QPS_RTS;
90 case C2_QP_STATE_CLOSING:
91 return IB_QPS_SQD;
92 case C2_QP_STATE_ERROR:
93 return IB_QPS_ERR;
94 case C2_QP_STATE_TERMINATE:
95 return IB_QPS_SQE;
96 default:
97 return -1;
98 }
99}
100
101static const char *to_ib_state_str(int ib_state)
102{
103 static const char *state_str[] = {
104 "IB_QPS_RESET",
105 "IB_QPS_INIT",
106 "IB_QPS_RTR",
107 "IB_QPS_RTS",
108 "IB_QPS_SQD",
109 "IB_QPS_SQE",
110 "IB_QPS_ERR"
111 };
112 if (ib_state < IB_QPS_RESET ||
113 ib_state > IB_QPS_ERR)
114 return "<invalid IB QP state>";
115
116 ib_state -= IB_QPS_RESET;
117 return state_str[ib_state];
118}
119
120void c2_set_qp_state(struct c2_qp *qp, int c2_state)
121{
122 int new_state = to_ib_state(c2_state);
123
124 pr_debug("%s: qp[%p] state modify %s --> %s\n",
125 __func__,
126 qp,
127 to_ib_state_str(qp->state),
128 to_ib_state_str(new_state));
129 qp->state = new_state;
130}
131
132#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
133
134int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
135 struct ib_qp_attr *attr, int attr_mask)
136{
137 struct c2wr_qp_modify_req wr;
138 struct c2wr_qp_modify_rep *reply;
139 struct c2_vq_req *vq_req;
140 unsigned long flags;
141 u8 next_state;
142 int err;
143
144 pr_debug("%s:%d qp=%p, %s --> %s\n",
145 __func__, __LINE__,
146 qp,
147 to_ib_state_str(qp->state),
148 to_ib_state_str(attr->qp_state));
149
150 vq_req = vq_req_alloc(c2dev);
151 if (!vq_req)
152 return -ENOMEM;
153
154 c2_wr_set_id(&wr, CCWR_QP_MODIFY);
155 wr.hdr.context = (unsigned long) vq_req;
156 wr.rnic_handle = c2dev->adapter_handle;
157 wr.qp_handle = qp->adapter_handle;
158 wr.ord = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
159 wr.ird = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
160 wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
161 wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
162
163 if (attr_mask & IB_QP_STATE) {
164 /* Ensure the state is valid */
165 if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) {
166 err = -EINVAL;
167 goto bail0;
168 }
169
170 wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state));
171
172 if (attr->qp_state == IB_QPS_ERR) {
173 spin_lock_irqsave(&qp->lock, flags);
174 if (qp->cm_id && qp->state == IB_QPS_RTS) {
175 pr_debug("Generating CLOSE event for QP-->ERR, "
176 "qp=%p, cm_id=%p\n",qp,qp->cm_id);
177 /* Generate an CLOSE event */
178 vq_req->cm_id = qp->cm_id;
179 vq_req->event = IW_CM_EVENT_CLOSE;
180 }
181 spin_unlock_irqrestore(&qp->lock, flags);
182 }
183 next_state = attr->qp_state;
184
185 } else if (attr_mask & IB_QP_CUR_STATE) {
186
187 if (attr->cur_qp_state != IB_QPS_RTR &&
188 attr->cur_qp_state != IB_QPS_RTS &&
189 attr->cur_qp_state != IB_QPS_SQD &&
190 attr->cur_qp_state != IB_QPS_SQE) {
191 err = -EINVAL;
192 goto bail0;
193 } else
194 wr.next_qp_state =
195 cpu_to_be32(to_c2_state(attr->cur_qp_state));
196
197 next_state = attr->cur_qp_state;
198
199 } else {
200 err = 0;
201 goto bail0;
202 }
203
204 /* reference the request struct */
205 vq_req_get(c2dev, vq_req);
206
207 err = vq_send_wr(c2dev, (union c2wr *) & wr);
208 if (err) {
209 vq_req_put(c2dev, vq_req);
210 goto bail0;
211 }
212
213 err = vq_wait_for_reply(c2dev, vq_req);
214 if (err)
215 goto bail0;
216
217 reply = (struct c2wr_qp_modify_rep *) (unsigned long) vq_req->reply_msg;
218 if (!reply) {
219 err = -ENOMEM;
220 goto bail0;
221 }
222
223 err = c2_errno(reply);
224 if (!err)
225 qp->state = next_state;
226#ifdef DEBUG
227 else
228 pr_debug("%s: c2_errno=%d\n", __func__, err);
229#endif
230 /*
231 * If we're going to error and generating the event here, then
232 * we need to remove the reference because there will be no
233 * close event generated by the adapter
234 */
235 spin_lock_irqsave(&qp->lock, flags);
236 if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) {
237 qp->cm_id->rem_ref(qp->cm_id);
238 qp->cm_id = NULL;
239 }
240 spin_unlock_irqrestore(&qp->lock, flags);
241
242 vq_repbuf_free(c2dev, reply);
243bail0:
244 vq_req_free(c2dev, vq_req);
245
246 pr_debug("%s:%d qp=%p, cur_state=%s\n",
247 __func__, __LINE__,
248 qp,
249 to_ib_state_str(qp->state));
250 return err;
251}
252
253int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
254 int ord, int ird)
255{
256 struct c2wr_qp_modify_req wr;
257 struct c2wr_qp_modify_rep *reply;
258 struct c2_vq_req *vq_req;
259 int err;
260
261 vq_req = vq_req_alloc(c2dev);
262 if (!vq_req)
263 return -ENOMEM;
264
265 c2_wr_set_id(&wr, CCWR_QP_MODIFY);
266 wr.hdr.context = (unsigned long) vq_req;
267 wr.rnic_handle = c2dev->adapter_handle;
268 wr.qp_handle = qp->adapter_handle;
269 wr.ord = cpu_to_be32(ord);
270 wr.ird = cpu_to_be32(ird);
271 wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
272 wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
273 wr.next_qp_state = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
274
275 /* reference the request struct */
276 vq_req_get(c2dev, vq_req);
277
278 err = vq_send_wr(c2dev, (union c2wr *) & wr);
279 if (err) {
280 vq_req_put(c2dev, vq_req);
281 goto bail0;
282 }
283
284 err = vq_wait_for_reply(c2dev, vq_req);
285 if (err)
286 goto bail0;
287
288 reply = (struct c2wr_qp_modify_rep *) (unsigned long)
289 vq_req->reply_msg;
290 if (!reply) {
291 err = -ENOMEM;
292 goto bail0;
293 }
294
295 err = c2_errno(reply);
296 vq_repbuf_free(c2dev, reply);
297bail0:
298 vq_req_free(c2dev, vq_req);
299 return err;
300}
301
302static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp)
303{
304 struct c2_vq_req *vq_req;
305 struct c2wr_qp_destroy_req wr;
306 struct c2wr_qp_destroy_rep *reply;
307 unsigned long flags;
308 int err;
309
310 /*
311 * Allocate a verb request message
312 */
313 vq_req = vq_req_alloc(c2dev);
314 if (!vq_req) {
315 return -ENOMEM;
316 }
317
318 /*
319 * Initialize the WR
320 */
321 c2_wr_set_id(&wr, CCWR_QP_DESTROY);
322 wr.hdr.context = (unsigned long) vq_req;
323 wr.rnic_handle = c2dev->adapter_handle;
324 wr.qp_handle = qp->adapter_handle;
325
326 /*
327 * reference the request struct. dereferenced in the int handler.
328 */
329 vq_req_get(c2dev, vq_req);
330
331 spin_lock_irqsave(&qp->lock, flags);
332 if (qp->cm_id && qp->state == IB_QPS_RTS) {
333 pr_debug("destroy_qp: generating CLOSE event for QP-->ERR, "
334 "qp=%p, cm_id=%p\n",qp,qp->cm_id);
335 /* Generate an CLOSE event */
336 vq_req->qp = qp;
337 vq_req->cm_id = qp->cm_id;
338 vq_req->event = IW_CM_EVENT_CLOSE;
339 }
340 spin_unlock_irqrestore(&qp->lock, flags);
341
342 /*
343 * Send WR to adapter
344 */
345 err = vq_send_wr(c2dev, (union c2wr *) & wr);
346 if (err) {
347 vq_req_put(c2dev, vq_req);
348 goto bail0;
349 }
350
351 /*
352 * Wait for reply from adapter
353 */
354 err = vq_wait_for_reply(c2dev, vq_req);
355 if (err) {
356 goto bail0;
357 }
358
359 /*
360 * Process reply
361 */
362 reply = (struct c2wr_qp_destroy_rep *) (unsigned long) (vq_req->reply_msg);
363 if (!reply) {
364 err = -ENOMEM;
365 goto bail0;
366 }
367
368 spin_lock_irqsave(&qp->lock, flags);
369 if (qp->cm_id) {
370 qp->cm_id->rem_ref(qp->cm_id);
371 qp->cm_id = NULL;
372 }
373 spin_unlock_irqrestore(&qp->lock, flags);
374
375 vq_repbuf_free(c2dev, reply);
376bail0:
377 vq_req_free(c2dev, vq_req);
378 return err;
379}
380
381static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
382{
383 int ret;
384
385 idr_preload(GFP_KERNEL);
386 spin_lock_irq(&c2dev->qp_table.lock);
387
388 ret = idr_alloc_cyclic(&c2dev->qp_table.idr, qp, 0, 0, GFP_NOWAIT);
389 if (ret >= 0)
390 qp->qpn = ret;
391
392 spin_unlock_irq(&c2dev->qp_table.lock);
393 idr_preload_end();
394 return ret < 0 ? ret : 0;
395}
396
397static void c2_free_qpn(struct c2_dev *c2dev, int qpn)
398{
399 spin_lock_irq(&c2dev->qp_table.lock);
400 idr_remove(&c2dev->qp_table.idr, qpn);
401 spin_unlock_irq(&c2dev->qp_table.lock);
402}
403
404struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn)
405{
406 unsigned long flags;
407 struct c2_qp *qp;
408
409 spin_lock_irqsave(&c2dev->qp_table.lock, flags);
410 qp = idr_find(&c2dev->qp_table.idr, qpn);
411 spin_unlock_irqrestore(&c2dev->qp_table.lock, flags);
412 return qp;
413}
414
415int c2_alloc_qp(struct c2_dev *c2dev,
416 struct c2_pd *pd,
417 struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp)
418{
419 struct c2wr_qp_create_req wr;
420 struct c2wr_qp_create_rep *reply;
421 struct c2_vq_req *vq_req;
422 struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq);
423 struct c2_cq *recv_cq = to_c2cq(qp_attrs->recv_cq);
424 unsigned long peer_pa;
425 u32 q_size, msg_size, mmap_size;
426 void __iomem *mmap;
427 int err;
428
429 err = c2_alloc_qpn(c2dev, qp);
430 if (err)
431 return err;
432 qp->ibqp.qp_num = qp->qpn;
433 qp->ibqp.qp_type = IB_QPT_RC;
434
435 /* Allocate the SQ and RQ shared pointers */
436 qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
437 &qp->sq_mq.shared_dma, GFP_KERNEL);
438 if (!qp->sq_mq.shared) {
439 err = -ENOMEM;
440 goto bail0;
441 }
442
443 qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
444 &qp->rq_mq.shared_dma, GFP_KERNEL);
445 if (!qp->rq_mq.shared) {
446 err = -ENOMEM;
447 goto bail1;
448 }
449
450 /* Allocate the verbs request */
451 vq_req = vq_req_alloc(c2dev);
452 if (vq_req == NULL) {
453 err = -ENOMEM;
454 goto bail2;
455 }
456
457 /* Initialize the work request */
458 memset(&wr, 0, sizeof(wr));
459 c2_wr_set_id(&wr, CCWR_QP_CREATE);
460 wr.hdr.context = (unsigned long) vq_req;
461 wr.rnic_handle = c2dev->adapter_handle;
462 wr.sq_cq_handle = send_cq->adapter_handle;
463 wr.rq_cq_handle = recv_cq->adapter_handle;
464 wr.sq_depth = cpu_to_be32(qp_attrs->cap.max_send_wr + 1);
465 wr.rq_depth = cpu_to_be32(qp_attrs->cap.max_recv_wr + 1);
466 wr.srq_handle = 0;
467 wr.flags = cpu_to_be32(QP_RDMA_READ | QP_RDMA_WRITE | QP_MW_BIND |
468 QP_ZERO_STAG | QP_RDMA_READ_RESPONSE);
469 wr.send_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
470 wr.recv_sgl_depth = cpu_to_be32(qp_attrs->cap.max_recv_sge);
471 wr.rdma_write_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
472 wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma);
473 wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma);
474 wr.ord = cpu_to_be32(C2_MAX_ORD_PER_QP);
475 wr.ird = cpu_to_be32(C2_MAX_IRD_PER_QP);
476 wr.pd_id = pd->pd_id;
477 wr.user_context = (unsigned long) qp;
478
479 vq_req_get(c2dev, vq_req);
480
481 /* Send the WR to the adapter */
482 err = vq_send_wr(c2dev, (union c2wr *) & wr);
483 if (err) {
484 vq_req_put(c2dev, vq_req);
485 goto bail3;
486 }
487
488 /* Wait for the verb reply */
489 err = vq_wait_for_reply(c2dev, vq_req);
490 if (err) {
491 goto bail3;
492 }
493
494 /* Process the reply */
495 reply = (struct c2wr_qp_create_rep *) (unsigned long) (vq_req->reply_msg);
496 if (!reply) {
497 err = -ENOMEM;
498 goto bail3;
499 }
500
501 if ((err = c2_wr_get_result(reply)) != 0) {
502 goto bail4;
503 }
504
505 /* Fill in the kernel QP struct */
506 atomic_set(&qp->refcount, 1);
507 qp->adapter_handle = reply->qp_handle;
508 qp->state = IB_QPS_RESET;
509 qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
510 qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
511 qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;
512 init_waitqueue_head(&qp->wait);
513
514 /* Initialize the SQ MQ */
515 q_size = be32_to_cpu(reply->sq_depth);
516 msg_size = be32_to_cpu(reply->sq_msg_size);
517 peer_pa = c2dev->pa + be32_to_cpu(reply->sq_mq_start);
518 mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
519 mmap = ioremap_nocache(peer_pa, mmap_size);
520 if (!mmap) {
521 err = -ENOMEM;
522 goto bail5;
523 }
524
525 c2_mq_req_init(&qp->sq_mq,
526 be32_to_cpu(reply->sq_mq_index),
527 q_size,
528 msg_size,
529 mmap + sizeof(struct c2_mq_shared), /* pool start */
530 mmap, /* peer */
531 C2_MQ_ADAPTER_TARGET);
532
533 /* Initialize the RQ mq */
534 q_size = be32_to_cpu(reply->rq_depth);
535 msg_size = be32_to_cpu(reply->rq_msg_size);
536 peer_pa = c2dev->pa + be32_to_cpu(reply->rq_mq_start);
537 mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
538 mmap = ioremap_nocache(peer_pa, mmap_size);
539 if (!mmap) {
540 err = -ENOMEM;
541 goto bail6;
542 }
543
544 c2_mq_req_init(&qp->rq_mq,
545 be32_to_cpu(reply->rq_mq_index),
546 q_size,
547 msg_size,
548 mmap + sizeof(struct c2_mq_shared), /* pool start */
549 mmap, /* peer */
550 C2_MQ_ADAPTER_TARGET);
551
552 vq_repbuf_free(c2dev, reply);
553 vq_req_free(c2dev, vq_req);
554
555 return 0;
556
557bail6:
558 iounmap(qp->sq_mq.peer);
559bail5:
560 destroy_qp(c2dev, qp);
561bail4:
562 vq_repbuf_free(c2dev, reply);
563bail3:
564 vq_req_free(c2dev, vq_req);
565bail2:
566 c2_free_mqsp(qp->rq_mq.shared);
567bail1:
568 c2_free_mqsp(qp->sq_mq.shared);
569bail0:
570 c2_free_qpn(c2dev, qp->qpn);
571 return err;
572}
573
574static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
575{
576 if (send_cq == recv_cq)
577 spin_lock_irq(&send_cq->lock);
578 else if (send_cq > recv_cq) {
579 spin_lock_irq(&send_cq->lock);
580 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
581 } else {
582 spin_lock_irq(&recv_cq->lock);
583 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
584 }
585}
586
587static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
588{
589 if (send_cq == recv_cq)
590 spin_unlock_irq(&send_cq->lock);
591 else if (send_cq > recv_cq) {
592 spin_unlock(&recv_cq->lock);
593 spin_unlock_irq(&send_cq->lock);
594 } else {
595 spin_unlock(&send_cq->lock);
596 spin_unlock_irq(&recv_cq->lock);
597 }
598}
599
600void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
601{
602 struct c2_cq *send_cq;
603 struct c2_cq *recv_cq;
604
605 send_cq = to_c2cq(qp->ibqp.send_cq);
606 recv_cq = to_c2cq(qp->ibqp.recv_cq);
607
608 /*
609 * Lock CQs here, so that CQ polling code can do QP lookup
610 * without taking a lock.
611 */
612 c2_lock_cqs(send_cq, recv_cq);
613 c2_free_qpn(c2dev, qp->qpn);
614 c2_unlock_cqs(send_cq, recv_cq);
615
616 /*
617 * Destroy qp in the rnic...
618 */
619 destroy_qp(c2dev, qp);
620
621 /*
622 * Mark any unreaped CQEs as null and void.
623 */
624 c2_cq_clean(c2dev, qp, send_cq->cqn);
625 if (send_cq != recv_cq)
626 c2_cq_clean(c2dev, qp, recv_cq->cqn);
627 /*
628 * Unmap the MQs and return the shared pointers
629 * to the message pool.
630 */
631 iounmap(qp->sq_mq.peer);
632 iounmap(qp->rq_mq.peer);
633 c2_free_mqsp(qp->sq_mq.shared);
634 c2_free_mqsp(qp->rq_mq.shared);
635
636 atomic_dec(&qp->refcount);
637 wait_event(qp->wait, !atomic_read(&qp->refcount));
638}
639
640/*
641 * Function: move_sgl
642 *
643 * Description:
644 * Move an SGL from the user's work request struct into a CCIL Work Request
645 * message, swapping to WR byte order and ensure the total length doesn't
646 * overflow.
647 *
648 * IN:
649 * dst - ptr to CCIL Work Request message SGL memory.
650 * src - ptr to the consumers SGL memory.
651 *
652 * OUT: none
653 *
654 * Return:
655 * CCIL status codes.
656 */
657static int
658move_sgl(struct c2_data_addr * dst, struct ib_sge *src, int count, u32 * p_len,
659 u8 * actual_count)
660{
661 u32 tot = 0; /* running total */
662 u8 acount = 0; /* running total non-0 len sge's */
663
664 while (count > 0) {
665 /*
666 * If the addition of this SGE causes the
667 * total SGL length to exceed 2^32-1, then
668 * fail-n-bail.
669 *
670 * If the current total plus the next element length
671 * wraps, then it will go negative and be less than the
672 * current total...
673 */
674 if ((tot + src->length) < tot) {
675 return -EINVAL;
676 }
677 /*
678 * Bug: 1456 (as well as 1498 & 1643)
679 * Skip over any sge's supplied with len=0
680 */
681 if (src->length) {
682 tot += src->length;
683 dst->stag = cpu_to_be32(src->lkey);
684 dst->to = cpu_to_be64(src->addr);
685 dst->length = cpu_to_be32(src->length);
686 dst++;
687 acount++;
688 }
689 src++;
690 count--;
691 }
692
693 if (acount == 0) {
694 /*
695 * Bug: 1476 (as well as 1498, 1456 and 1643)
696 * Setup the SGL in the WR to make it easier for the RNIC.
697 * This way, the FW doesn't have to deal with special cases.
698 * Setting length=0 should be sufficient.
699 */
700 dst->stag = 0;
701 dst->to = 0;
702 dst->length = 0;
703 }
704
705 *p_len = tot;
706 *actual_count = acount;
707 return 0;
708}
709
710/*
711 * Function: c2_activity (private function)
712 *
713 * Description:
714 * Post an mq index to the host->adapter activity fifo.
715 *
716 * IN:
717 * c2dev - ptr to c2dev structure
718 * mq_index - mq index to post
719 * shared - value most recently written to shared
720 *
721 * OUT:
722 *
723 * Return:
724 * none
725 */
726static inline void c2_activity(struct c2_dev *c2dev, u32 mq_index, u16 shared)
727{
728 /*
729 * First read the register to see if the FIFO is full, and if so,
730 * spin until it's not. This isn't perfect -- there is no
731 * synchronization among the clients of the register, but in
732 * practice it prevents multiple CPU from hammering the bus
733 * with PCI RETRY. Note that when this does happen, the card
734 * cannot get on the bus and the card and system hang in a
735 * deadlock -- thus the need for this code. [TOT]
736 */
737 while (readl(c2dev->regs + PCI_BAR0_ADAPTER_HINT) & 0x80000000)
738 udelay(10);
739
740 __raw_writel(C2_HINT_MAKE(mq_index, shared),
741 c2dev->regs + PCI_BAR0_ADAPTER_HINT);
742}
743
744/*
745 * Function: qp_wr_post
746 *
747 * Description:
748 * This in-line function allocates a MQ msg, then moves the host-copy of
749 * the completed WR into msg. Then it posts the message.
750 *
751 * IN:
752 * q - ptr to user MQ.
753 * wr - ptr to host-copy of the WR.
754 * qp - ptr to user qp
755 * size - Number of bytes to post. Assumed to be divisible by 4.
756 *
757 * OUT: none
758 *
759 * Return:
760 * CCIL status codes.
761 */
762static int qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size)
763{
764 union c2wr *msg;
765
766 msg = c2_mq_alloc(q);
767 if (msg == NULL) {
768 return -EINVAL;
769 }
770#ifdef CCMSGMAGIC
771 ((c2wr_hdr_t *) wr)->magic = cpu_to_be32(CCWR_MAGIC);
772#endif
773
774 /*
775 * Since all header fields in the WR are the same as the
776 * CQE, set the following so the adapter need not.
777 */
778 c2_wr_set_result(wr, CCERR_PENDING);
779
780 /*
781 * Copy the wr down to the adapter
782 */
783 memcpy((void *) msg, (void *) wr, size);
784
785 c2_mq_produce(q);
786 return 0;
787}
788
789
790int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
791 struct ib_send_wr **bad_wr)
792{
793 struct c2_dev *c2dev = to_c2dev(ibqp->device);
794 struct c2_qp *qp = to_c2qp(ibqp);
795 union c2wr wr;
796 unsigned long lock_flags;
797 int err = 0;
798
799 u32 flags;
800 u32 tot_len;
801 u8 actual_sge_count;
802 u32 msg_size;
803
804 if (qp->state > IB_QPS_RTS) {
805 err = -EINVAL;
806 goto out;
807 }
808
809 while (ib_wr) {
810
811 flags = 0;
812 wr.sqwr.sq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
813 if (ib_wr->send_flags & IB_SEND_SIGNALED) {
814 flags |= SQ_SIGNALED;
815 }
816
817 switch (ib_wr->opcode) {
818 case IB_WR_SEND:
819 case IB_WR_SEND_WITH_INV:
820 if (ib_wr->opcode == IB_WR_SEND) {
821 if (ib_wr->send_flags & IB_SEND_SOLICITED)
822 c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
823 else
824 c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
825 wr.sqwr.send.remote_stag = 0;
826 } else {
827 if (ib_wr->send_flags & IB_SEND_SOLICITED)
828 c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE_INV);
829 else
830 c2_wr_set_id(&wr, C2_WR_TYPE_SEND_INV);
831 wr.sqwr.send.remote_stag =
832 cpu_to_be32(ib_wr->ex.invalidate_rkey);
833 }
834
835 msg_size = sizeof(struct c2wr_send_req) +
836 sizeof(struct c2_data_addr) * ib_wr->num_sge;
837 if (ib_wr->num_sge > qp->send_sgl_depth) {
838 err = -EINVAL;
839 break;
840 }
841 if (ib_wr->send_flags & IB_SEND_FENCE) {
842 flags |= SQ_READ_FENCE;
843 }
844 err = move_sgl((struct c2_data_addr *) & (wr.sqwr.send.data),
845 ib_wr->sg_list,
846 ib_wr->num_sge,
847 &tot_len, &actual_sge_count);
848 wr.sqwr.send.sge_len = cpu_to_be32(tot_len);
849 c2_wr_set_sge_count(&wr, actual_sge_count);
850 break;
851 case IB_WR_RDMA_WRITE:
852 c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_WRITE);
853 msg_size = sizeof(struct c2wr_rdma_write_req) +
854 (sizeof(struct c2_data_addr) * ib_wr->num_sge);
855 if (ib_wr->num_sge > qp->rdma_write_sgl_depth) {
856 err = -EINVAL;
857 break;
858 }
859 if (ib_wr->send_flags & IB_SEND_FENCE) {
860 flags |= SQ_READ_FENCE;
861 }
862 wr.sqwr.rdma_write.remote_stag =
863 cpu_to_be32(rdma_wr(ib_wr)->rkey);
864 wr.sqwr.rdma_write.remote_to =
865 cpu_to_be64(rdma_wr(ib_wr)->remote_addr);
866 err = move_sgl((struct c2_data_addr *)
867 & (wr.sqwr.rdma_write.data),
868 ib_wr->sg_list,
869 ib_wr->num_sge,
870 &tot_len, &actual_sge_count);
871 wr.sqwr.rdma_write.sge_len = cpu_to_be32(tot_len);
872 c2_wr_set_sge_count(&wr, actual_sge_count);
873 break;
874 case IB_WR_RDMA_READ:
875 c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_READ);
876 msg_size = sizeof(struct c2wr_rdma_read_req);
877
878 /* IWarp only suppots 1 sge for RDMA reads */
879 if (ib_wr->num_sge > 1) {
880 err = -EINVAL;
881 break;
882 }
883
884 /*
885 * Move the local and remote stag/to/len into the WR.
886 */
887 wr.sqwr.rdma_read.local_stag =
888 cpu_to_be32(ib_wr->sg_list->lkey);
889 wr.sqwr.rdma_read.local_to =
890 cpu_to_be64(ib_wr->sg_list->addr);
891 wr.sqwr.rdma_read.remote_stag =
892 cpu_to_be32(rdma_wr(ib_wr)->rkey);
893 wr.sqwr.rdma_read.remote_to =
894 cpu_to_be64(rdma_wr(ib_wr)->remote_addr);
895 wr.sqwr.rdma_read.length =
896 cpu_to_be32(ib_wr->sg_list->length);
897 break;
898 default:
899 /* error */
900 msg_size = 0;
901 err = -EINVAL;
902 break;
903 }
904
905 /*
906 * If we had an error on the last wr build, then
907 * break out. Possible errors include bogus WR
908 * type, and a bogus SGL length...
909 */
910 if (err) {
911 break;
912 }
913
914 /*
915 * Store flags
916 */
917 c2_wr_set_flags(&wr, flags);
918
919 /*
920 * Post the puppy!
921 */
922 spin_lock_irqsave(&qp->lock, lock_flags);
923 err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size);
924 if (err) {
925 spin_unlock_irqrestore(&qp->lock, lock_flags);
926 break;
927 }
928
929 /*
930 * Enqueue mq index to activity FIFO.
931 */
932 c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count);
933 spin_unlock_irqrestore(&qp->lock, lock_flags);
934
935 ib_wr = ib_wr->next;
936 }
937
938out:
939 if (err)
940 *bad_wr = ib_wr;
941 return err;
942}
943
944int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
945 struct ib_recv_wr **bad_wr)
946{
947 struct c2_dev *c2dev = to_c2dev(ibqp->device);
948 struct c2_qp *qp = to_c2qp(ibqp);
949 union c2wr wr;
950 unsigned long lock_flags;
951 int err = 0;
952
953 if (qp->state > IB_QPS_RTS) {
954 err = -EINVAL;
955 goto out;
956 }
957
958 /*
959 * Try and post each work request
960 */
961 while (ib_wr) {
962 u32 tot_len;
963 u8 actual_sge_count;
964
965 if (ib_wr->num_sge > qp->recv_sgl_depth) {
966 err = -EINVAL;
967 break;
968 }
969
970 /*
971 * Create local host-copy of the WR
972 */
973 wr.rqwr.rq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
974 c2_wr_set_id(&wr, CCWR_RECV);
975 c2_wr_set_flags(&wr, 0);
976
977 /* sge_count is limited to eight bits. */
978 BUG_ON(ib_wr->num_sge >= 256);
979 err = move_sgl((struct c2_data_addr *) & (wr.rqwr.data),
980 ib_wr->sg_list,
981 ib_wr->num_sge, &tot_len, &actual_sge_count);
982 c2_wr_set_sge_count(&wr, actual_sge_count);
983
984 /*
985 * If we had an error on the last wr build, then
986 * break out. Possible errors include bogus WR
987 * type, and a bogus SGL length...
988 */
989 if (err) {
990 break;
991 }
992
993 spin_lock_irqsave(&qp->lock, lock_flags);
994 err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size);
995 if (err) {
996 spin_unlock_irqrestore(&qp->lock, lock_flags);
997 break;
998 }
999
1000 /*
1001 * Enqueue mq index to activity FIFO
1002 */
1003 c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count);
1004 spin_unlock_irqrestore(&qp->lock, lock_flags);
1005
1006 ib_wr = ib_wr->next;
1007 }
1008
1009out:
1010 if (err)
1011 *bad_wr = ib_wr;
1012 return err;
1013}
1014
1015void c2_init_qp_table(struct c2_dev *c2dev)
1016{
1017 spin_lock_init(&c2dev->qp_table.lock);
1018 idr_init(&c2dev->qp_table.idr);
1019}
1020
1021void c2_cleanup_qp_table(struct c2_dev *c2dev)
1022{
1023 idr_destroy(&c2dev->qp_table.idr);
1024}
diff --git a/drivers/staging/rdma/amso1100/c2_rnic.c b/drivers/staging/rdma/amso1100/c2_rnic.c
deleted file mode 100644
index 5e65c6d07ca4..000000000000
--- a/drivers/staging/rdma/amso1100/c2_rnic.c
+++ /dev/null
@@ -1,652 +0,0 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34
35
36#include <linux/module.h>
37#include <linux/moduleparam.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/etherdevice.h>
41#include <linux/delay.h>
42#include <linux/ethtool.h>
43#include <linux/mii.h>
44#include <linux/if_vlan.h>
45#include <linux/crc32.h>
46#include <linux/in.h>
47#include <linux/ip.h>
48#include <linux/tcp.h>
49#include <linux/init.h>
50#include <linux/dma-mapping.h>
51#include <linux/mm.h>
52#include <linux/inet.h>
53#include <linux/vmalloc.h>
54#include <linux/slab.h>
55
56#include <linux/route.h>
57
58#include <asm/io.h>
59#include <asm/irq.h>
60#include <asm/byteorder.h>
61#include <rdma/ib_smi.h>
62#include "c2.h"
63#include "c2_vq.h"
64
65/* Device capabilities */
66#define C2_MIN_PAGESIZE 1024
67
68#define C2_MAX_MRS 32768
69#define C2_MAX_QPS 16000
70#define C2_MAX_WQE_SZ 256
71#define C2_MAX_QP_WR ((128*1024)/C2_MAX_WQE_SZ)
72#define C2_MAX_SGES 4
73#define C2_MAX_SGE_RD 1
74#define C2_MAX_CQS 32768
75#define C2_MAX_CQES 4096
76#define C2_MAX_PDS 16384
77
78/*
79 * Send the adapter INIT message to the amso1100
80 */
81static int c2_adapter_init(struct c2_dev *c2dev)
82{
83 struct c2wr_init_req wr;
84
85 memset(&wr, 0, sizeof(wr));
86 c2_wr_set_id(&wr, CCWR_INIT);
87 wr.hdr.context = 0;
88 wr.hint_count = cpu_to_be64(c2dev->hint_count_dma);
89 wr.q0_host_shared = cpu_to_be64(c2dev->req_vq.shared_dma);
90 wr.q1_host_shared = cpu_to_be64(c2dev->rep_vq.shared_dma);
91 wr.q1_host_msg_pool = cpu_to_be64(c2dev->rep_vq.host_dma);
92 wr.q2_host_shared = cpu_to_be64(c2dev->aeq.shared_dma);
93 wr.q2_host_msg_pool = cpu_to_be64(c2dev->aeq.host_dma);
94
95 /* Post the init message */
96 return vq_send_wr(c2dev, (union c2wr *) & wr);
97}
98
99/*
100 * Send the adapter TERM message to the amso1100
101 */
102static void c2_adapter_term(struct c2_dev *c2dev)
103{
104 struct c2wr_init_req wr;
105
106 memset(&wr, 0, sizeof(wr));
107 c2_wr_set_id(&wr, CCWR_TERM);
108 wr.hdr.context = 0;
109
110 /* Post the init message */
111 vq_send_wr(c2dev, (union c2wr *) & wr);
112 c2dev->init = 0;
113
114 return;
115}
116
117/*
118 * Query the adapter
119 */
120static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props)
121{
122 struct c2_vq_req *vq_req;
123 struct c2wr_rnic_query_req wr;
124 struct c2wr_rnic_query_rep *reply;
125 int err;
126
127 vq_req = vq_req_alloc(c2dev);
128 if (!vq_req)
129 return -ENOMEM;
130
131 c2_wr_set_id(&wr, CCWR_RNIC_QUERY);
132 wr.hdr.context = (unsigned long) vq_req;
133 wr.rnic_handle = c2dev->adapter_handle;
134
135 vq_req_get(c2dev, vq_req);
136
137 err = vq_send_wr(c2dev, (union c2wr *) &wr);
138 if (err) {
139 vq_req_put(c2dev, vq_req);
140 goto bail1;
141 }
142
143 err = vq_wait_for_reply(c2dev, vq_req);
144 if (err)
145 goto bail1;
146
147 reply =
148 (struct c2wr_rnic_query_rep *) (unsigned long) (vq_req->reply_msg);
149 if (!reply)
150 err = -ENOMEM;
151 else
152 err = c2_errno(reply);
153 if (err)
154 goto bail2;
155
156 props->fw_ver =
157 ((u64)be32_to_cpu(reply->fw_ver_major) << 32) |
158 ((be32_to_cpu(reply->fw_ver_minor) & 0xFFFF) << 16) |
159 (be32_to_cpu(reply->fw_ver_patch) & 0xFFFF);
160 memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6);
161 props->max_mr_size = 0xFFFFFFFF;
162 props->page_size_cap = ~(C2_MIN_PAGESIZE-1);
163 props->vendor_id = be32_to_cpu(reply->vendor_id);
164 props->vendor_part_id = be32_to_cpu(reply->part_number);
165 props->hw_ver = be32_to_cpu(reply->hw_version);
166 props->max_qp = be32_to_cpu(reply->max_qps);
167 props->max_qp_wr = be32_to_cpu(reply->max_qp_depth);
168 props->device_cap_flags = c2dev->device_cap_flags;
169 props->max_sge = C2_MAX_SGES;
170 props->max_sge_rd = C2_MAX_SGE_RD;
171 props->max_cq = be32_to_cpu(reply->max_cqs);
172 props->max_cqe = be32_to_cpu(reply->max_cq_depth);
173 props->max_mr = be32_to_cpu(reply->max_mrs);
174 props->max_pd = be32_to_cpu(reply->max_pds);
175 props->max_qp_rd_atom = be32_to_cpu(reply->max_qp_ird);
176 props->max_ee_rd_atom = 0;
177 props->max_res_rd_atom = be32_to_cpu(reply->max_global_ird);
178 props->max_qp_init_rd_atom = be32_to_cpu(reply->max_qp_ord);
179 props->max_ee_init_rd_atom = 0;
180 props->atomic_cap = IB_ATOMIC_NONE;
181 props->max_ee = 0;
182 props->max_rdd = 0;
183 props->max_mw = be32_to_cpu(reply->max_mws);
184 props->max_raw_ipv6_qp = 0;
185 props->max_raw_ethy_qp = 0;
186 props->max_mcast_grp = 0;
187 props->max_mcast_qp_attach = 0;
188 props->max_total_mcast_qp_attach = 0;
189 props->max_ah = 0;
190 props->max_fmr = 0;
191 props->max_map_per_fmr = 0;
192 props->max_srq = 0;
193 props->max_srq_wr = 0;
194 props->max_srq_sge = 0;
195 props->max_pkeys = 0;
196 props->local_ca_ack_delay = 0;
197
198 bail2:
199 vq_repbuf_free(c2dev, reply);
200
201 bail1:
202 vq_req_free(c2dev, vq_req);
203 return err;
204}
205
206/*
207 * Add an IP address to the RNIC interface
208 */
209int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
210{
211 struct c2_vq_req *vq_req;
212 struct c2wr_rnic_setconfig_req *wr;
213 struct c2wr_rnic_setconfig_rep *reply;
214 struct c2_netaddr netaddr;
215 int err, len;
216
217 vq_req = vq_req_alloc(c2dev);
218 if (!vq_req)
219 return -ENOMEM;
220
221 len = sizeof(struct c2_netaddr);
222 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
223 if (!wr) {
224 err = -ENOMEM;
225 goto bail0;
226 }
227
228 c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG);
229 wr->hdr.context = (unsigned long) vq_req;
230 wr->rnic_handle = c2dev->adapter_handle;
231 wr->option = cpu_to_be32(C2_CFG_ADD_ADDR);
232
233 netaddr.ip_addr = inaddr;
234 netaddr.netmask = inmask;
235 netaddr.mtu = 0;
236
237 memcpy(wr->data, &netaddr, len);
238
239 vq_req_get(c2dev, vq_req);
240
241 err = vq_send_wr(c2dev, (union c2wr *) wr);
242 if (err) {
243 vq_req_put(c2dev, vq_req);
244 goto bail1;
245 }
246
247 err = vq_wait_for_reply(c2dev, vq_req);
248 if (err)
249 goto bail1;
250
251 reply =
252 (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg);
253 if (!reply) {
254 err = -ENOMEM;
255 goto bail1;
256 }
257
258 err = c2_errno(reply);
259 vq_repbuf_free(c2dev, reply);
260
261bail1:
262 kfree(wr);
263bail0:
264 vq_req_free(c2dev, vq_req);
265 return err;
266}
267
268/*
269 * Delete an IP address from the RNIC interface
270 */
271int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
272{
273 struct c2_vq_req *vq_req;
274 struct c2wr_rnic_setconfig_req *wr;
275 struct c2wr_rnic_setconfig_rep *reply;
276 struct c2_netaddr netaddr;
277 int err, len;
278
279 vq_req = vq_req_alloc(c2dev);
280 if (!vq_req)
281 return -ENOMEM;
282
283 len = sizeof(struct c2_netaddr);
284 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
285 if (!wr) {
286 err = -ENOMEM;
287 goto bail0;
288 }
289
290 c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG);
291 wr->hdr.context = (unsigned long) vq_req;
292 wr->rnic_handle = c2dev->adapter_handle;
293 wr->option = cpu_to_be32(C2_CFG_DEL_ADDR);
294
295 netaddr.ip_addr = inaddr;
296 netaddr.netmask = inmask;
297 netaddr.mtu = 0;
298
299 memcpy(wr->data, &netaddr, len);
300
301 vq_req_get(c2dev, vq_req);
302
303 err = vq_send_wr(c2dev, (union c2wr *) wr);
304 if (err) {
305 vq_req_put(c2dev, vq_req);
306 goto bail1;
307 }
308
309 err = vq_wait_for_reply(c2dev, vq_req);
310 if (err)
311 goto bail1;
312
313 reply =
314 (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg);
315 if (!reply) {
316 err = -ENOMEM;
317 goto bail1;
318 }
319
320 err = c2_errno(reply);
321 vq_repbuf_free(c2dev, reply);
322
323bail1:
324 kfree(wr);
325bail0:
326 vq_req_free(c2dev, vq_req);
327 return err;
328}
329
330/*
331 * Open a single RNIC instance to use with all
332 * low level openib calls
333 */
334static int c2_rnic_open(struct c2_dev *c2dev)
335{
336 struct c2_vq_req *vq_req;
337 union c2wr wr;
338 struct c2wr_rnic_open_rep *reply;
339 int err;
340
341 vq_req = vq_req_alloc(c2dev);
342 if (vq_req == NULL) {
343 return -ENOMEM;
344 }
345
346 memset(&wr, 0, sizeof(wr));
347 c2_wr_set_id(&wr, CCWR_RNIC_OPEN);
348 wr.rnic_open.req.hdr.context = (unsigned long) (vq_req);
349 wr.rnic_open.req.flags = cpu_to_be16(RNIC_PRIV_MODE);
350 wr.rnic_open.req.port_num = cpu_to_be16(0);
351 wr.rnic_open.req.user_context = (unsigned long) c2dev;
352
353 vq_req_get(c2dev, vq_req);
354
355 err = vq_send_wr(c2dev, &wr);
356 if (err) {
357 vq_req_put(c2dev, vq_req);
358 goto bail0;
359 }
360
361 err = vq_wait_for_reply(c2dev, vq_req);
362 if (err) {
363 goto bail0;
364 }
365
366 reply = (struct c2wr_rnic_open_rep *) (unsigned long) (vq_req->reply_msg);
367 if (!reply) {
368 err = -ENOMEM;
369 goto bail0;
370 }
371
372 if ((err = c2_errno(reply)) != 0) {
373 goto bail1;
374 }
375
376 c2dev->adapter_handle = reply->rnic_handle;
377
378bail1:
379 vq_repbuf_free(c2dev, reply);
380bail0:
381 vq_req_free(c2dev, vq_req);
382 return err;
383}
384
385/*
386 * Close the RNIC instance
387 */
388static int c2_rnic_close(struct c2_dev *c2dev)
389{
390 struct c2_vq_req *vq_req;
391 union c2wr wr;
392 struct c2wr_rnic_close_rep *reply;
393 int err;
394
395 vq_req = vq_req_alloc(c2dev);
396 if (vq_req == NULL) {
397 return -ENOMEM;
398 }
399
400 memset(&wr, 0, sizeof(wr));
401 c2_wr_set_id(&wr, CCWR_RNIC_CLOSE);
402 wr.rnic_close.req.hdr.context = (unsigned long) vq_req;
403 wr.rnic_close.req.rnic_handle = c2dev->adapter_handle;
404
405 vq_req_get(c2dev, vq_req);
406
407 err = vq_send_wr(c2dev, &wr);
408 if (err) {
409 vq_req_put(c2dev, vq_req);
410 goto bail0;
411 }
412
413 err = vq_wait_for_reply(c2dev, vq_req);
414 if (err) {
415 goto bail0;
416 }
417
418 reply = (struct c2wr_rnic_close_rep *) (unsigned long) (vq_req->reply_msg);
419 if (!reply) {
420 err = -ENOMEM;
421 goto bail0;
422 }
423
424 if ((err = c2_errno(reply)) != 0) {
425 goto bail1;
426 }
427
428 c2dev->adapter_handle = 0;
429
430bail1:
431 vq_repbuf_free(c2dev, reply);
432bail0:
433 vq_req_free(c2dev, vq_req);
434 return err;
435}
436
437/*
438 * Called by c2_probe to initialize the RNIC. This principally
439 * involves initializing the various limits and resource pools that
440 * comprise the RNIC instance.
441 */
442int c2_rnic_init(struct c2_dev *c2dev)
443{
444 int err;
445 u32 qsize, msgsize;
446 void *q1_pages;
447 void *q2_pages;
448 void __iomem *mmio_regs;
449
450 /* Device capabilities */
451 c2dev->device_cap_flags =
452 (IB_DEVICE_RESIZE_MAX_WR |
453 IB_DEVICE_CURR_QP_STATE_MOD |
454 IB_DEVICE_SYS_IMAGE_GUID |
455 IB_DEVICE_LOCAL_DMA_LKEY |
456 IB_DEVICE_MEM_WINDOW);
457
458 /* Allocate the qptr_array */
459 c2dev->qptr_array = vzalloc(C2_MAX_CQS * sizeof(void *));
460 if (!c2dev->qptr_array) {
461 return -ENOMEM;
462 }
463
464 /* Initialize the qptr_array */
465 c2dev->qptr_array[0] = (void *) &c2dev->req_vq;
466 c2dev->qptr_array[1] = (void *) &c2dev->rep_vq;
467 c2dev->qptr_array[2] = (void *) &c2dev->aeq;
468
469 /* Initialize data structures */
470 init_waitqueue_head(&c2dev->req_vq_wo);
471 spin_lock_init(&c2dev->vqlock);
472 spin_lock_init(&c2dev->lock);
473
474 /* Allocate MQ shared pointer pool for kernel clients. User
475 * mode client pools are hung off the user context
476 */
477 err = c2_init_mqsp_pool(c2dev, GFP_KERNEL, &c2dev->kern_mqsp_pool);
478 if (err) {
479 goto bail0;
480 }
481
482 /* Allocate shared pointers for Q0, Q1, and Q2 from
483 * the shared pointer pool.
484 */
485
486 c2dev->hint_count = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
487 &c2dev->hint_count_dma,
488 GFP_KERNEL);
489 c2dev->req_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
490 &c2dev->req_vq.shared_dma,
491 GFP_KERNEL);
492 c2dev->rep_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
493 &c2dev->rep_vq.shared_dma,
494 GFP_KERNEL);
495 c2dev->aeq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
496 &c2dev->aeq.shared_dma, GFP_KERNEL);
497 if (!c2dev->hint_count || !c2dev->req_vq.shared ||
498 !c2dev->rep_vq.shared || !c2dev->aeq.shared) {
499 err = -ENOMEM;
500 goto bail1;
501 }
502
503 mmio_regs = c2dev->kva;
504 /* Initialize the Verbs Request Queue */
505 c2_mq_req_init(&c2dev->req_vq, 0,
506 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_QSIZE)),
507 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
508 mmio_regs +
509 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
510 mmio_regs +
511 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_SHARED)),
512 C2_MQ_ADAPTER_TARGET);
513
514 /* Initialize the Verbs Reply Queue */
515 qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_QSIZE));
516 msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
517 q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
518 &c2dev->rep_vq.host_dma, GFP_KERNEL);
519 if (!q1_pages) {
520 err = -ENOMEM;
521 goto bail1;
522 }
523 dma_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
524 pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
525 (unsigned long long) c2dev->rep_vq.host_dma);
526 c2_mq_rep_init(&c2dev->rep_vq,
527 1,
528 qsize,
529 msgsize,
530 q1_pages,
531 mmio_regs +
532 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_SHARED)),
533 C2_MQ_HOST_TARGET);
534
535 /* Initialize the Asynchronus Event Queue */
536 qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_QSIZE));
537 msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
538 q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
539 &c2dev->aeq.host_dma, GFP_KERNEL);
540 if (!q2_pages) {
541 err = -ENOMEM;
542 goto bail2;
543 }
544 dma_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
545 pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
546 (unsigned long long) c2dev->aeq.host_dma);
547 c2_mq_rep_init(&c2dev->aeq,
548 2,
549 qsize,
550 msgsize,
551 q2_pages,
552 mmio_regs +
553 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_SHARED)),
554 C2_MQ_HOST_TARGET);
555
556 /* Initialize the verbs request allocator */
557 err = vq_init(c2dev);
558 if (err)
559 goto bail3;
560
561 /* Enable interrupts on the adapter */
562 writel(0, c2dev->regs + C2_IDIS);
563
564 /* create the WR init message */
565 err = c2_adapter_init(c2dev);
566 if (err)
567 goto bail4;
568 c2dev->init++;
569
570 /* open an adapter instance */
571 err = c2_rnic_open(c2dev);
572 if (err)
573 goto bail4;
574
575 /* Initialize cached the adapter limits */
576 err = c2_rnic_query(c2dev, &c2dev->props);
577 if (err)
578 goto bail5;
579
580 /* Initialize the PD pool */
581 err = c2_init_pd_table(c2dev);
582 if (err)
583 goto bail5;
584
585 /* Initialize the QP pool */
586 c2_init_qp_table(c2dev);
587 return 0;
588
589bail5:
590 c2_rnic_close(c2dev);
591bail4:
592 vq_term(c2dev);
593bail3:
594 dma_free_coherent(&c2dev->pcidev->dev,
595 c2dev->aeq.q_size * c2dev->aeq.msg_size,
596 q2_pages, dma_unmap_addr(&c2dev->aeq, mapping));
597bail2:
598 dma_free_coherent(&c2dev->pcidev->dev,
599 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
600 q1_pages, dma_unmap_addr(&c2dev->rep_vq, mapping));
601bail1:
602 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
603bail0:
604 vfree(c2dev->qptr_array);
605
606 return err;
607}
608
609/*
610 * Called by c2_remove to cleanup the RNIC resources.
611 */
612void c2_rnic_term(struct c2_dev *c2dev)
613{
614
615 /* Close the open adapter instance */
616 c2_rnic_close(c2dev);
617
618 /* Send the TERM message to the adapter */
619 c2_adapter_term(c2dev);
620
621 /* Disable interrupts on the adapter */
622 writel(1, c2dev->regs + C2_IDIS);
623
624 /* Free the QP pool */
625 c2_cleanup_qp_table(c2dev);
626
627 /* Free the PD pool */
628 c2_cleanup_pd_table(c2dev);
629
630 /* Free the verbs request allocator */
631 vq_term(c2dev);
632
633 /* Free the asynchronus event queue */
634 dma_free_coherent(&c2dev->pcidev->dev,
635 c2dev->aeq.q_size * c2dev->aeq.msg_size,
636 c2dev->aeq.msg_pool.host,
637 dma_unmap_addr(&c2dev->aeq, mapping));
638
639 /* Free the verbs reply queue */
640 dma_free_coherent(&c2dev->pcidev->dev,
641 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
642 c2dev->rep_vq.msg_pool.host,
643 dma_unmap_addr(&c2dev->rep_vq, mapping));
644
645 /* Free the MQ shared pointer pool */
646 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
647
648 /* Free the qptr_array */
649 vfree(c2dev->qptr_array);
650
651 return;
652}
diff --git a/drivers/staging/rdma/amso1100/c2_status.h b/drivers/staging/rdma/amso1100/c2_status.h
deleted file mode 100644
index 6ee4aa92d875..000000000000
--- a/drivers/staging/rdma/amso1100/c2_status.h
+++ /dev/null
@@ -1,158 +0,0 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _C2_STATUS_H_
34#define _C2_STATUS_H_
35
36/*
37 * Verbs Status Codes
38 */
39enum c2_status {
40 C2_OK = 0, /* This must be zero */
41 CCERR_INSUFFICIENT_RESOURCES = 1,
42 CCERR_INVALID_MODIFIER = 2,
43 CCERR_INVALID_MODE = 3,
44 CCERR_IN_USE = 4,
45 CCERR_INVALID_RNIC = 5,
46 CCERR_INTERRUPTED_OPERATION = 6,
47 CCERR_INVALID_EH = 7,
48 CCERR_INVALID_CQ = 8,
49 CCERR_CQ_EMPTY = 9,
50 CCERR_NOT_IMPLEMENTED = 10,
51 CCERR_CQ_DEPTH_TOO_SMALL = 11,
52 CCERR_PD_IN_USE = 12,
53 CCERR_INVALID_PD = 13,
54 CCERR_INVALID_SRQ = 14,
55 CCERR_INVALID_ADDRESS = 15,
56 CCERR_INVALID_NETMASK = 16,
57 CCERR_INVALID_QP = 17,
58 CCERR_INVALID_QP_STATE = 18,
59 CCERR_TOO_MANY_WRS_POSTED = 19,
60 CCERR_INVALID_WR_TYPE = 20,
61 CCERR_INVALID_SGL_LENGTH = 21,
62 CCERR_INVALID_SQ_DEPTH = 22,
63 CCERR_INVALID_RQ_DEPTH = 23,
64 CCERR_INVALID_ORD = 24,
65 CCERR_INVALID_IRD = 25,
66 CCERR_QP_ATTR_CANNOT_CHANGE = 26,
67 CCERR_INVALID_STAG = 27,
68 CCERR_QP_IN_USE = 28,
69 CCERR_OUTSTANDING_WRS = 29,
70 CCERR_STAG_IN_USE = 30,
71 CCERR_INVALID_STAG_INDEX = 31,
72 CCERR_INVALID_SGL_FORMAT = 32,
73 CCERR_ADAPTER_TIMEOUT = 33,
74 CCERR_INVALID_CQ_DEPTH = 34,
75 CCERR_INVALID_PRIVATE_DATA_LENGTH = 35,
76 CCERR_INVALID_EP = 36,
77 CCERR_MR_IN_USE = CCERR_STAG_IN_USE,
78 CCERR_FLUSHED = 38,
79 CCERR_INVALID_WQE = 39,
80 CCERR_LOCAL_QP_CATASTROPHIC_ERROR = 40,
81 CCERR_REMOTE_TERMINATION_ERROR = 41,
82 CCERR_BASE_AND_BOUNDS_VIOLATION = 42,
83 CCERR_ACCESS_VIOLATION = 43,
84 CCERR_INVALID_PD_ID = 44,
85 CCERR_WRAP_ERROR = 45,
86 CCERR_INV_STAG_ACCESS_ERROR = 46,
87 CCERR_ZERO_RDMA_READ_RESOURCES = 47,
88 CCERR_QP_NOT_PRIVILEGED = 48,
89 CCERR_STAG_STATE_NOT_INVALID = 49,
90 CCERR_INVALID_PAGE_SIZE = 50,
91 CCERR_INVALID_BUFFER_SIZE = 51,
92 CCERR_INVALID_PBE = 52,
93 CCERR_INVALID_FBO = 53,
94 CCERR_INVALID_LENGTH = 54,
95 CCERR_INVALID_ACCESS_RIGHTS = 55,
96 CCERR_PBL_TOO_BIG = 56,
97 CCERR_INVALID_VA = 57,
98 CCERR_INVALID_REGION = 58,
99 CCERR_INVALID_WINDOW = 59,
100 CCERR_TOTAL_LENGTH_TOO_BIG = 60,
101 CCERR_INVALID_QP_ID = 61,
102 CCERR_ADDR_IN_USE = 62,
103 CCERR_ADDR_NOT_AVAIL = 63,
104 CCERR_NET_DOWN = 64,
105 CCERR_NET_UNREACHABLE = 65,
106 CCERR_CONN_ABORTED = 66,
107 CCERR_CONN_RESET = 67,
108 CCERR_NO_BUFS = 68,
109 CCERR_CONN_TIMEDOUT = 69,
110 CCERR_CONN_REFUSED = 70,
111 CCERR_HOST_UNREACHABLE = 71,
112 CCERR_INVALID_SEND_SGL_DEPTH = 72,
113 CCERR_INVALID_RECV_SGL_DEPTH = 73,
114 CCERR_INVALID_RDMA_WRITE_SGL_DEPTH = 74,
115 CCERR_INSUFFICIENT_PRIVILEGES = 75,
116 CCERR_STACK_ERROR = 76,
117 CCERR_INVALID_VERSION = 77,
118 CCERR_INVALID_MTU = 78,
119 CCERR_INVALID_IMAGE = 79,
120 CCERR_PENDING = 98, /* not an error; user internally by adapter */
121 CCERR_DEFER = 99, /* not an error; used internally by adapter */
122 CCERR_FAILED_WRITE = 100,
123 CCERR_FAILED_ERASE = 101,
124 CCERR_FAILED_VERIFICATION = 102,
125 CCERR_NOT_FOUND = 103,
126
127};
128
129/*
130 * CCAE_ACTIVE_CONNECT_RESULTS status result codes.
131 */
132enum c2_connect_status {
133 C2_CONN_STATUS_SUCCESS = C2_OK,
134 C2_CONN_STATUS_NO_MEM = CCERR_INSUFFICIENT_RESOURCES,
135 C2_CONN_STATUS_TIMEDOUT = CCERR_CONN_TIMEDOUT,
136 C2_CONN_STATUS_REFUSED = CCERR_CONN_REFUSED,
137 C2_CONN_STATUS_NETUNREACH = CCERR_NET_UNREACHABLE,
138 C2_CONN_STATUS_HOSTUNREACH = CCERR_HOST_UNREACHABLE,
139 C2_CONN_STATUS_INVALID_RNIC = CCERR_INVALID_RNIC,
140 C2_CONN_STATUS_INVALID_QP = CCERR_INVALID_QP,
141 C2_CONN_STATUS_INVALID_QP_STATE = CCERR_INVALID_QP_STATE,
142 C2_CONN_STATUS_REJECTED = CCERR_CONN_RESET,
143 C2_CONN_STATUS_ADDR_NOT_AVAIL = CCERR_ADDR_NOT_AVAIL,
144};
145
146/*
147 * Flash programming status codes.
148 */
149enum c2_flash_status {
150 C2_FLASH_STATUS_SUCCESS = 0x0000,
151 C2_FLASH_STATUS_VERIFY_ERR = 0x0002,
152 C2_FLASH_STATUS_IMAGE_ERR = 0x0004,
153 C2_FLASH_STATUS_ECLBS = 0x0400,
154 C2_FLASH_STATUS_PSLBS = 0x0800,
155 C2_FLASH_STATUS_VPENS = 0x1000,
156};
157
158#endif /* _C2_STATUS_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_user.h b/drivers/staging/rdma/amso1100/c2_user.h
deleted file mode 100644
index 7e9e7ad65467..000000000000
--- a/drivers/staging/rdma/amso1100/c2_user.h
+++ /dev/null
@@ -1,82 +0,0 @@
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 */
35
36#ifndef C2_USER_H
37#define C2_USER_H
38
39#include <linux/types.h>
40
41/*
42 * Make sure that all structs defined in this file remain laid out so
43 * that they pack the same way on 32-bit and 64-bit architectures (to
44 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
45 * In particular do not use pointer types -- pass pointers in __u64
46 * instead.
47 */
48
49struct c2_alloc_ucontext_resp {
50 __u32 qp_tab_size;
51 __u32 uarc_size;
52};
53
54struct c2_alloc_pd_resp {
55 __u32 pdn;
56 __u32 reserved;
57};
58
59struct c2_create_cq {
60 __u32 lkey;
61 __u32 pdn;
62 __u64 arm_db_page;
63 __u64 set_db_page;
64 __u32 arm_db_index;
65 __u32 set_db_index;
66};
67
68struct c2_create_cq_resp {
69 __u32 cqn;
70 __u32 reserved;
71};
72
73struct c2_create_qp {
74 __u32 lkey;
75 __u32 reserved;
76 __u64 sq_db_page;
77 __u64 rq_db_page;
78 __u32 sq_db_index;
79 __u32 rq_db_index;
80};
81
82#endif /* C2_USER_H */
diff --git a/drivers/staging/rdma/amso1100/c2_vq.c b/drivers/staging/rdma/amso1100/c2_vq.c
deleted file mode 100644
index 2ec716fb2edb..000000000000
--- a/drivers/staging/rdma/amso1100/c2_vq.c
+++ /dev/null
@@ -1,260 +0,0 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include <linux/slab.h>
34#include <linux/spinlock.h>
35
36#include "c2_vq.h"
37#include "c2_provider.h"
38
39/*
40 * Verbs Request Objects:
41 *
42 * VQ Request Objects are allocated by the kernel verbs handlers.
43 * They contain a wait object, a refcnt, an atomic bool indicating that the
44 * adapter has replied, and a copy of the verb reply work request.
45 * A pointer to the VQ Request Object is passed down in the context
46 * field of the work request message, and reflected back by the adapter
47 * in the verbs reply message. The function handle_vq() in the interrupt
48 * path will use this pointer to:
49 * 1) append a copy of the verbs reply message
50 * 2) mark that the reply is ready
51 * 3) wake up the kernel verbs handler blocked awaiting the reply.
52 *
53 *
54 * The kernel verbs handlers do a "get" to put a 2nd reference on the
55 * VQ Request object. If the kernel verbs handler exits before the adapter
56 * can respond, this extra reference will keep the VQ Request object around
57 * until the adapter's reply can be processed. The reason we need this is
58 * because a pointer to this object is stuffed into the context field of
59 * the verbs work request message, and reflected back in the reply message.
60 * It is used in the interrupt handler (handle_vq()) to wake up the appropriate
61 * kernel verb handler that is blocked awaiting the verb reply.
62 * So handle_vq() will do a "put" on the object when it's done accessing it.
63 * NOTE: If we guarantee that the kernel verb handler will never bail before
64 * getting the reply, then we don't need these refcnts.
65 *
66 *
67 * VQ Request objects are freed by the kernel verbs handlers only
68 * after the verb has been processed, or when the adapter fails and
69 * does not reply.
70 *
71 *
72 * Verbs Reply Buffers:
73 *
74 * VQ Reply bufs are local host memory copies of a
75 * outstanding Verb Request reply
76 * message. The are always allocated by the kernel verbs handlers, and _may_ be
77 * freed by either the kernel verbs handler -or- the interrupt handler. The
78 * kernel verbs handler _must_ free the repbuf, then free the vq request object
79 * in that order.
80 */
81
82int vq_init(struct c2_dev *c2dev)
83{
84 sprintf(c2dev->vq_cache_name, "c2-vq:dev%c",
85 (char) ('0' + c2dev->devnum));
86 c2dev->host_msg_cache =
87 kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0,
88 SLAB_HWCACHE_ALIGN, NULL);
89 if (c2dev->host_msg_cache == NULL) {
90 return -ENOMEM;
91 }
92 return 0;
93}
94
95void vq_term(struct c2_dev *c2dev)
96{
97 kmem_cache_destroy(c2dev->host_msg_cache);
98}
99
100/* vq_req_alloc - allocate a VQ Request Object and initialize it.
101 * The refcnt is set to 1.
102 */
103struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev)
104{
105 struct c2_vq_req *r;
106
107 r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL);
108 if (r) {
109 init_waitqueue_head(&r->wait_object);
110 r->reply_msg = 0;
111 r->event = 0;
112 r->cm_id = NULL;
113 r->qp = NULL;
114 atomic_set(&r->refcnt, 1);
115 atomic_set(&r->reply_ready, 0);
116 }
117 return r;
118}
119
120
121/* vq_req_free - free the VQ Request Object. It is assumed the verbs handler
122 * has already free the VQ Reply Buffer if it existed.
123 */
124void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r)
125{
126 r->reply_msg = 0;
127 if (atomic_dec_and_test(&r->refcnt)) {
128 kfree(r);
129 }
130}
131
132/* vq_req_get - reference a VQ Request Object. Done
133 * only in the kernel verbs handlers.
134 */
135void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *r)
136{
137 atomic_inc(&r->refcnt);
138}
139
140
141/* vq_req_put - dereference and potentially free a VQ Request Object.
142 *
143 * This is only called by handle_vq() on the
144 * interrupt when it is done processing
145 * a verb reply message. If the associated
146 * kernel verbs handler has already bailed,
147 * then this put will actually free the VQ
148 * Request object _and_ the VQ Reply Buffer
149 * if it exists.
150 */
151void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r)
152{
153 if (atomic_dec_and_test(&r->refcnt)) {
154 if (r->reply_msg != 0)
155 vq_repbuf_free(c2dev,
156 (void *) (unsigned long) r->reply_msg);
157 kfree(r);
158 }
159}
160
161
162/*
163 * vq_repbuf_alloc - allocate a VQ Reply Buffer.
164 */
165void *vq_repbuf_alloc(struct c2_dev *c2dev)
166{
167 return kmem_cache_alloc(c2dev->host_msg_cache, GFP_ATOMIC);
168}
169
170/*
171 * vq_send_wr - post a verbs request message to the Verbs Request Queue.
172 * If a message is not available in the MQ, then block until one is available.
173 * NOTE: handle_mq() on the interrupt context will wake up threads blocked here.
174 * When the adapter drains the Verbs Request Queue,
175 * it inserts MQ index 0 in to the
176 * adapter->host activity fifo and interrupts the host.
177 */
178int vq_send_wr(struct c2_dev *c2dev, union c2wr *wr)
179{
180 void *msg;
181 wait_queue_t __wait;
182
183 /*
184 * grab adapter vq lock
185 */
186 spin_lock(&c2dev->vqlock);
187
188 /*
189 * allocate msg
190 */
191 msg = c2_mq_alloc(&c2dev->req_vq);
192
193 /*
194 * If we cannot get a msg, then we'll wait
195 * When a messages are available, the int handler will wake_up()
196 * any waiters.
197 */
198 while (msg == NULL) {
199 pr_debug("%s:%d no available msg in VQ, waiting...\n",
200 __func__, __LINE__);
201 init_waitqueue_entry(&__wait, current);
202 add_wait_queue(&c2dev->req_vq_wo, &__wait);
203 spin_unlock(&c2dev->vqlock);
204 for (;;) {
205 set_current_state(TASK_INTERRUPTIBLE);
206 if (!c2_mq_full(&c2dev->req_vq)) {
207 break;
208 }
209 if (!signal_pending(current)) {
210 schedule_timeout(1 * HZ); /* 1 second... */
211 continue;
212 }
213 set_current_state(TASK_RUNNING);
214 remove_wait_queue(&c2dev->req_vq_wo, &__wait);
215 return -EINTR;
216 }
217 set_current_state(TASK_RUNNING);
218 remove_wait_queue(&c2dev->req_vq_wo, &__wait);
219 spin_lock(&c2dev->vqlock);
220 msg = c2_mq_alloc(&c2dev->req_vq);
221 }
222
223 /*
224 * copy wr into adapter msg
225 */
226 memcpy(msg, wr, c2dev->req_vq.msg_size);
227
228 /*
229 * post msg
230 */
231 c2_mq_produce(&c2dev->req_vq);
232
233 /*
234 * release adapter vq lock
235 */
236 spin_unlock(&c2dev->vqlock);
237 return 0;
238}
239
240
241/*
242 * vq_wait_for_reply - block until the adapter posts a Verb Reply Message.
243 */
244int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req)
245{
246 if (!wait_event_timeout(req->wait_object,
247 atomic_read(&req->reply_ready),
248 60*HZ))
249 return -ETIMEDOUT;
250
251 return 0;
252}
253
254/*
255 * vq_repbuf_free - Free a Verbs Reply Buffer.
256 */
257void vq_repbuf_free(struct c2_dev *c2dev, void *reply)
258{
259 kmem_cache_free(c2dev->host_msg_cache, reply);
260}
diff --git a/drivers/staging/rdma/amso1100/c2_vq.h b/drivers/staging/rdma/amso1100/c2_vq.h
deleted file mode 100644
index c1f6cef60213..000000000000
--- a/drivers/staging/rdma/amso1100/c2_vq.h
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _C2_VQ_H_
34#define _C2_VQ_H_
35#include <linux/sched.h>
36#include "c2.h"
37#include "c2_wr.h"
38#include "c2_provider.h"
39
40struct c2_vq_req {
41 u64 reply_msg; /* ptr to reply msg */
42 wait_queue_head_t wait_object; /* wait object for vq reqs */
43 atomic_t reply_ready; /* set when reply is ready */
44 atomic_t refcnt; /* used to cancel WRs... */
45 int event;
46 struct iw_cm_id *cm_id;
47 struct c2_qp *qp;
48};
49
50int vq_init(struct c2_dev *c2dev);
51void vq_term(struct c2_dev *c2dev);
52
53struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev);
54void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *req);
55void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *req);
56void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *req);
57int vq_send_wr(struct c2_dev *c2dev, union c2wr * wr);
58
59void *vq_repbuf_alloc(struct c2_dev *c2dev);
60void vq_repbuf_free(struct c2_dev *c2dev, void *reply);
61
62int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req);
63#endif /* _C2_VQ_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_wr.h b/drivers/staging/rdma/amso1100/c2_wr.h
deleted file mode 100644
index 8d4b4ca463ca..000000000000
--- a/drivers/staging/rdma/amso1100/c2_wr.h
+++ /dev/null
@@ -1,1520 +0,0 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _C2_WR_H_
34#define _C2_WR_H_
35
36#ifdef CCDEBUG
37#define CCWR_MAGIC 0xb07700b0
38#endif
39
40#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
41
42/* Maximum allowed size in bytes of private_data exchange
43 * on connect.
44 */
45#define C2_MAX_PRIVATE_DATA_SIZE 200
46
47/*
48 * These types are shared among the adapter, host, and CCIL consumer.
49 */
50enum c2_cq_notification_type {
51 C2_CQ_NOTIFICATION_TYPE_NONE = 1,
52 C2_CQ_NOTIFICATION_TYPE_NEXT,
53 C2_CQ_NOTIFICATION_TYPE_NEXT_SE
54};
55
56enum c2_setconfig_cmd {
57 C2_CFG_ADD_ADDR = 1,
58 C2_CFG_DEL_ADDR = 2,
59 C2_CFG_ADD_ROUTE = 3,
60 C2_CFG_DEL_ROUTE = 4
61};
62
63enum c2_getconfig_cmd {
64 C2_GETCONFIG_ROUTES = 1,
65 C2_GETCONFIG_ADDRS
66};
67
68/*
69 * CCIL Work Request Identifiers
70 */
71enum c2wr_ids {
72 CCWR_RNIC_OPEN = 1,
73 CCWR_RNIC_QUERY,
74 CCWR_RNIC_SETCONFIG,
75 CCWR_RNIC_GETCONFIG,
76 CCWR_RNIC_CLOSE,
77 CCWR_CQ_CREATE,
78 CCWR_CQ_QUERY,
79 CCWR_CQ_MODIFY,
80 CCWR_CQ_DESTROY,
81 CCWR_QP_CONNECT,
82 CCWR_PD_ALLOC,
83 CCWR_PD_DEALLOC,
84 CCWR_SRQ_CREATE,
85 CCWR_SRQ_QUERY,
86 CCWR_SRQ_MODIFY,
87 CCWR_SRQ_DESTROY,
88 CCWR_QP_CREATE,
89 CCWR_QP_QUERY,
90 CCWR_QP_MODIFY,
91 CCWR_QP_DESTROY,
92 CCWR_NSMR_STAG_ALLOC,
93 CCWR_NSMR_REGISTER,
94 CCWR_NSMR_PBL,
95 CCWR_STAG_DEALLOC,
96 CCWR_NSMR_REREGISTER,
97 CCWR_SMR_REGISTER,
98 CCWR_MR_QUERY,
99 CCWR_MW_ALLOC,
100 CCWR_MW_QUERY,
101 CCWR_EP_CREATE,
102 CCWR_EP_GETOPT,
103 CCWR_EP_SETOPT,
104 CCWR_EP_DESTROY,
105 CCWR_EP_BIND,
106 CCWR_EP_CONNECT,
107 CCWR_EP_LISTEN,
108 CCWR_EP_SHUTDOWN,
109 CCWR_EP_LISTEN_CREATE,
110 CCWR_EP_LISTEN_DESTROY,
111 CCWR_EP_QUERY,
112 CCWR_CR_ACCEPT,
113 CCWR_CR_REJECT,
114 CCWR_CONSOLE,
115 CCWR_TERM,
116 CCWR_FLASH_INIT,
117 CCWR_FLASH,
118 CCWR_BUF_ALLOC,
119 CCWR_BUF_FREE,
120 CCWR_FLASH_WRITE,
121 CCWR_INIT, /* WARNING: Don't move this ever again! */
122
123
124
125 /* Add new IDs here */
126
127
128
129 /*
130 * WARNING: CCWR_LAST must always be the last verbs id defined!
131 * All the preceding IDs are fixed, and must not change.
132 * You can add new IDs, but must not remove or reorder
133 * any IDs. If you do, YOU will ruin any hope of
134 * compatibility between versions.
135 */
136 CCWR_LAST,
137
138 /*
139 * Start over at 1 so that arrays indexed by user wr id's
140 * begin at 1. This is OK since the verbs and user wr id's
141 * are always used on disjoint sets of queues.
142 */
143 /*
144 * The order of the CCWR_SEND_XX verbs must
145 * match the order of the RDMA_OPs
146 */
147 CCWR_SEND = 1,
148 CCWR_SEND_INV,
149 CCWR_SEND_SE,
150 CCWR_SEND_SE_INV,
151 CCWR_RDMA_WRITE,
152 CCWR_RDMA_READ,
153 CCWR_RDMA_READ_INV,
154 CCWR_MW_BIND,
155 CCWR_NSMR_FASTREG,
156 CCWR_STAG_INVALIDATE,
157 CCWR_RECV,
158 CCWR_NOP,
159 CCWR_UNIMPL,
160/* WARNING: This must always be the last user wr id defined! */
161};
162#define RDMA_SEND_OPCODE_FROM_WR_ID(x) (x+2)
163
164/*
165 * SQ/RQ Work Request Types
166 */
167enum c2_wr_type {
168 C2_WR_TYPE_SEND = CCWR_SEND,
169 C2_WR_TYPE_SEND_SE = CCWR_SEND_SE,
170 C2_WR_TYPE_SEND_INV = CCWR_SEND_INV,
171 C2_WR_TYPE_SEND_SE_INV = CCWR_SEND_SE_INV,
172 C2_WR_TYPE_RDMA_WRITE = CCWR_RDMA_WRITE,
173 C2_WR_TYPE_RDMA_READ = CCWR_RDMA_READ,
174 C2_WR_TYPE_RDMA_READ_INV_STAG = CCWR_RDMA_READ_INV,
175 C2_WR_TYPE_BIND_MW = CCWR_MW_BIND,
176 C2_WR_TYPE_FASTREG_NSMR = CCWR_NSMR_FASTREG,
177 C2_WR_TYPE_INV_STAG = CCWR_STAG_INVALIDATE,
178 C2_WR_TYPE_RECV = CCWR_RECV,
179 C2_WR_TYPE_NOP = CCWR_NOP,
180};
181
182struct c2_netaddr {
183 __be32 ip_addr;
184 __be32 netmask;
185 u32 mtu;
186};
187
188struct c2_route {
189 u32 ip_addr; /* 0 indicates the default route */
190 u32 netmask; /* netmask associated with dst */
191 u32 flags;
192 union {
193 u32 ipaddr; /* address of the nexthop interface */
194 u8 enaddr[6];
195 } nexthop;
196};
197
198/*
199 * A Scatter Gather Entry.
200 */
201struct c2_data_addr {
202 __be32 stag;
203 __be32 length;
204 __be64 to;
205};
206
207/*
208 * MR and MW flags used by the consumer, RI, and RNIC.
209 */
210enum c2_mm_flags {
211 MEM_REMOTE = 0x0001, /* allow mw binds with remote access. */
212 MEM_VA_BASED = 0x0002, /* Not Zero-based */
213 MEM_PBL_COMPLETE = 0x0004, /* PBL array is complete in this msg */
214 MEM_LOCAL_READ = 0x0008, /* allow local reads */
215 MEM_LOCAL_WRITE = 0x0010, /* allow local writes */
216 MEM_REMOTE_READ = 0x0020, /* allow remote reads */
217 MEM_REMOTE_WRITE = 0x0040, /* allow remote writes */
218 MEM_WINDOW_BIND = 0x0080, /* binds allowed */
219 MEM_SHARED = 0x0100, /* set if MR is shared */
220 MEM_STAG_VALID = 0x0200 /* set if STAG is in valid state */
221};
222
223/*
224 * CCIL API ACF flags defined in terms of the low level mem flags.
225 * This minimizes translation needed in the user API
226 */
227enum c2_acf {
228 C2_ACF_LOCAL_READ = MEM_LOCAL_READ,
229 C2_ACF_LOCAL_WRITE = MEM_LOCAL_WRITE,
230 C2_ACF_REMOTE_READ = MEM_REMOTE_READ,
231 C2_ACF_REMOTE_WRITE = MEM_REMOTE_WRITE,
232 C2_ACF_WINDOW_BIND = MEM_WINDOW_BIND
233};
234
235/*
236 * Image types of objects written to flash
237 */
238#define C2_FLASH_IMG_BITFILE 1
239#define C2_FLASH_IMG_OPTION_ROM 2
240#define C2_FLASH_IMG_VPD 3
241
242/*
243 * to fix bug 1815 we define the max size allowable of the
244 * terminate message (per the IETF spec).Refer to the IETF
245 * protocol specification, section 12.1.6, page 64)
246 * The message is prefixed by 20 types of DDP info.
247 *
248 * Then the message has 6 bytes for the terminate control
249 * and DDP segment length info plus a DDP header (either
250 * 14 or 18 byts) plus 28 bytes for the RDMA header.
251 * Thus the max size in:
252 * 20 + (6 + 18 + 28) = 72
253 */
254#define C2_MAX_TERMINATE_MESSAGE_SIZE (72)
255
256/*
257 * Build String Length. It must be the same as C2_BUILD_STR_LEN in ccil_api.h
258 */
259#define WR_BUILD_STR_LEN 64
260
261/*
262 * WARNING: All of these structs need to align any 64bit types on
263 * 64 bit boundaries! 64bit types include u64 and u64.
264 */
265
266/*
267 * Clustercore Work Request Header. Be sensitive to field layout
268 * and alignment.
269 */
270struct c2wr_hdr {
271 /* wqe_count is part of the cqe. It is put here so the
272 * adapter can write to it while the wr is pending without
273 * clobbering part of the wr. This word need not be dma'd
274 * from the host to adapter by libccil, but we copy it anyway
275 * to make the memcpy to the adapter better aligned.
276 */
277 __be32 wqe_count;
278
279 /* Put these fields next so that later 32- and 64-bit
280 * quantities are naturally aligned.
281 */
282 u8 id;
283 u8 result; /* adapter -> host */
284 u8 sge_count; /* host -> adapter */
285 u8 flags; /* host -> adapter */
286
287 u64 context;
288#ifdef CCMSGMAGIC
289 u32 magic;
290 u32 pad;
291#endif
292} __attribute__((packed));
293
294/*
295 *------------------------ RNIC ------------------------
296 */
297
298/*
299 * WR_RNIC_OPEN
300 */
301
302/*
303 * Flags for the RNIC WRs
304 */
305enum c2_rnic_flags {
306 RNIC_IRD_STATIC = 0x0001,
307 RNIC_ORD_STATIC = 0x0002,
308 RNIC_QP_STATIC = 0x0004,
309 RNIC_SRQ_SUPPORTED = 0x0008,
310 RNIC_PBL_BLOCK_MODE = 0x0010,
311 RNIC_SRQ_MODEL_ARRIVAL = 0x0020,
312 RNIC_CQ_OVF_DETECTED = 0x0040,
313 RNIC_PRIV_MODE = 0x0080
314};
315
316struct c2wr_rnic_open_req {
317 struct c2wr_hdr hdr;
318 u64 user_context;
319 __be16 flags; /* See enum c2_rnic_flags */
320 __be16 port_num;
321} __attribute__((packed));
322
323struct c2wr_rnic_open_rep {
324 struct c2wr_hdr hdr;
325 u32 rnic_handle;
326} __attribute__((packed));
327
328union c2wr_rnic_open {
329 struct c2wr_rnic_open_req req;
330 struct c2wr_rnic_open_rep rep;
331} __attribute__((packed));
332
333struct c2wr_rnic_query_req {
334 struct c2wr_hdr hdr;
335 u32 rnic_handle;
336} __attribute__((packed));
337
338/*
339 * WR_RNIC_QUERY
340 */
341struct c2wr_rnic_query_rep {
342 struct c2wr_hdr hdr;
343 u64 user_context;
344 __be32 vendor_id;
345 __be32 part_number;
346 __be32 hw_version;
347 __be32 fw_ver_major;
348 __be32 fw_ver_minor;
349 __be32 fw_ver_patch;
350 char fw_ver_build_str[WR_BUILD_STR_LEN];
351 __be32 max_qps;
352 __be32 max_qp_depth;
353 u32 max_srq_depth;
354 u32 max_send_sgl_depth;
355 u32 max_rdma_sgl_depth;
356 __be32 max_cqs;
357 __be32 max_cq_depth;
358 u32 max_cq_event_handlers;
359 __be32 max_mrs;
360 u32 max_pbl_depth;
361 __be32 max_pds;
362 __be32 max_global_ird;
363 u32 max_global_ord;
364 __be32 max_qp_ird;
365 __be32 max_qp_ord;
366 u32 flags;
367 __be32 max_mws;
368 u32 pbe_range_low;
369 u32 pbe_range_high;
370 u32 max_srqs;
371 u32 page_size;
372} __attribute__((packed));
373
374union c2wr_rnic_query {
375 struct c2wr_rnic_query_req req;
376 struct c2wr_rnic_query_rep rep;
377} __attribute__((packed));
378
379/*
380 * WR_RNIC_GETCONFIG
381 */
382
383struct c2wr_rnic_getconfig_req {
384 struct c2wr_hdr hdr;
385 u32 rnic_handle;
386 u32 option; /* see c2_getconfig_cmd_t */
387 u64 reply_buf;
388 u32 reply_buf_len;
389} __attribute__((packed)) ;
390
391struct c2wr_rnic_getconfig_rep {
392 struct c2wr_hdr hdr;
393 u32 option; /* see c2_getconfig_cmd_t */
394 u32 count_len; /* length of the number of addresses configured */
395} __attribute__((packed)) ;
396
397union c2wr_rnic_getconfig {
398 struct c2wr_rnic_getconfig_req req;
399 struct c2wr_rnic_getconfig_rep rep;
400} __attribute__((packed)) ;
401
402/*
403 * WR_RNIC_SETCONFIG
404 */
405struct c2wr_rnic_setconfig_req {
406 struct c2wr_hdr hdr;
407 u32 rnic_handle;
408 __be32 option; /* See c2_setconfig_cmd_t */
409 /* variable data and pad. See c2_netaddr and c2_route */
410 u8 data[0];
411} __attribute__((packed)) ;
412
413struct c2wr_rnic_setconfig_rep {
414 struct c2wr_hdr hdr;
415} __attribute__((packed)) ;
416
417union c2wr_rnic_setconfig {
418 struct c2wr_rnic_setconfig_req req;
419 struct c2wr_rnic_setconfig_rep rep;
420} __attribute__((packed)) ;
421
422/*
423 * WR_RNIC_CLOSE
424 */
425struct c2wr_rnic_close_req {
426 struct c2wr_hdr hdr;
427 u32 rnic_handle;
428} __attribute__((packed)) ;
429
430struct c2wr_rnic_close_rep {
431 struct c2wr_hdr hdr;
432} __attribute__((packed)) ;
433
434union c2wr_rnic_close {
435 struct c2wr_rnic_close_req req;
436 struct c2wr_rnic_close_rep rep;
437} __attribute__((packed)) ;
438
439/*
440 *------------------------ CQ ------------------------
441 */
442struct c2wr_cq_create_req {
443 struct c2wr_hdr hdr;
444 __be64 shared_ht;
445 u64 user_context;
446 __be64 msg_pool;
447 u32 rnic_handle;
448 __be32 msg_size;
449 __be32 depth;
450} __attribute__((packed)) ;
451
452struct c2wr_cq_create_rep {
453 struct c2wr_hdr hdr;
454 __be32 mq_index;
455 __be32 adapter_shared;
456 u32 cq_handle;
457} __attribute__((packed)) ;
458
459union c2wr_cq_create {
460 struct c2wr_cq_create_req req;
461 struct c2wr_cq_create_rep rep;
462} __attribute__((packed)) ;
463
464struct c2wr_cq_modify_req {
465 struct c2wr_hdr hdr;
466 u32 rnic_handle;
467 u32 cq_handle;
468 u32 new_depth;
469 u64 new_msg_pool;
470} __attribute__((packed)) ;
471
472struct c2wr_cq_modify_rep {
473 struct c2wr_hdr hdr;
474} __attribute__((packed)) ;
475
476union c2wr_cq_modify {
477 struct c2wr_cq_modify_req req;
478 struct c2wr_cq_modify_rep rep;
479} __attribute__((packed)) ;
480
481struct c2wr_cq_destroy_req {
482 struct c2wr_hdr hdr;
483 u32 rnic_handle;
484 u32 cq_handle;
485} __attribute__((packed)) ;
486
487struct c2wr_cq_destroy_rep {
488 struct c2wr_hdr hdr;
489} __attribute__((packed)) ;
490
491union c2wr_cq_destroy {
492 struct c2wr_cq_destroy_req req;
493 struct c2wr_cq_destroy_rep rep;
494} __attribute__((packed)) ;
495
496/*
497 *------------------------ PD ------------------------
498 */
499struct c2wr_pd_alloc_req {
500 struct c2wr_hdr hdr;
501 u32 rnic_handle;
502 u32 pd_id;
503} __attribute__((packed)) ;
504
505struct c2wr_pd_alloc_rep {
506 struct c2wr_hdr hdr;
507} __attribute__((packed)) ;
508
509union c2wr_pd_alloc {
510 struct c2wr_pd_alloc_req req;
511 struct c2wr_pd_alloc_rep rep;
512} __attribute__((packed)) ;
513
514struct c2wr_pd_dealloc_req {
515 struct c2wr_hdr hdr;
516 u32 rnic_handle;
517 u32 pd_id;
518} __attribute__((packed)) ;
519
520struct c2wr_pd_dealloc_rep {
521 struct c2wr_hdr hdr;
522} __attribute__((packed)) ;
523
524union c2wr_pd_dealloc {
525 struct c2wr_pd_dealloc_req req;
526 struct c2wr_pd_dealloc_rep rep;
527} __attribute__((packed)) ;
528
529/*
530 *------------------------ SRQ ------------------------
531 */
532struct c2wr_srq_create_req {
533 struct c2wr_hdr hdr;
534 u64 shared_ht;
535 u64 user_context;
536 u32 rnic_handle;
537 u32 srq_depth;
538 u32 srq_limit;
539 u32 sgl_depth;
540 u32 pd_id;
541} __attribute__((packed)) ;
542
543struct c2wr_srq_create_rep {
544 struct c2wr_hdr hdr;
545 u32 srq_depth;
546 u32 sgl_depth;
547 u32 msg_size;
548 u32 mq_index;
549 u32 mq_start;
550 u32 srq_handle;
551} __attribute__((packed)) ;
552
553union c2wr_srq_create {
554 struct c2wr_srq_create_req req;
555 struct c2wr_srq_create_rep rep;
556} __attribute__((packed)) ;
557
558struct c2wr_srq_destroy_req {
559 struct c2wr_hdr hdr;
560 u32 rnic_handle;
561 u32 srq_handle;
562} __attribute__((packed)) ;
563
564struct c2wr_srq_destroy_rep {
565 struct c2wr_hdr hdr;
566} __attribute__((packed)) ;
567
568union c2wr_srq_destroy {
569 struct c2wr_srq_destroy_req req;
570 struct c2wr_srq_destroy_rep rep;
571} __attribute__((packed)) ;
572
573/*
574 *------------------------ QP ------------------------
575 */
576enum c2wr_qp_flags {
577 QP_RDMA_READ = 0x00000001, /* RDMA read enabled? */
578 QP_RDMA_WRITE = 0x00000002, /* RDMA write enabled? */
579 QP_MW_BIND = 0x00000004, /* MWs enabled */
580 QP_ZERO_STAG = 0x00000008, /* enabled? */
581 QP_REMOTE_TERMINATION = 0x00000010, /* remote end terminated */
582 QP_RDMA_READ_RESPONSE = 0x00000020 /* Remote RDMA read */
583 /* enabled? */
584};
585
586struct c2wr_qp_create_req {
587 struct c2wr_hdr hdr;
588 __be64 shared_sq_ht;
589 __be64 shared_rq_ht;
590 u64 user_context;
591 u32 rnic_handle;
592 u32 sq_cq_handle;
593 u32 rq_cq_handle;
594 __be32 sq_depth;
595 __be32 rq_depth;
596 u32 srq_handle;
597 u32 srq_limit;
598 __be32 flags; /* see enum c2wr_qp_flags */
599 __be32 send_sgl_depth;
600 __be32 recv_sgl_depth;
601 __be32 rdma_write_sgl_depth;
602 __be32 ord;
603 __be32 ird;
604 u32 pd_id;
605} __attribute__((packed)) ;
606
607struct c2wr_qp_create_rep {
608 struct c2wr_hdr hdr;
609 __be32 sq_depth;
610 __be32 rq_depth;
611 u32 send_sgl_depth;
612 u32 recv_sgl_depth;
613 u32 rdma_write_sgl_depth;
614 u32 ord;
615 u32 ird;
616 __be32 sq_msg_size;
617 __be32 sq_mq_index;
618 __be32 sq_mq_start;
619 __be32 rq_msg_size;
620 __be32 rq_mq_index;
621 __be32 rq_mq_start;
622 u32 qp_handle;
623} __attribute__((packed)) ;
624
625union c2wr_qp_create {
626 struct c2wr_qp_create_req req;
627 struct c2wr_qp_create_rep rep;
628} __attribute__((packed)) ;
629
630struct c2wr_qp_query_req {
631 struct c2wr_hdr hdr;
632 u32 rnic_handle;
633 u32 qp_handle;
634} __attribute__((packed)) ;
635
636struct c2wr_qp_query_rep {
637 struct c2wr_hdr hdr;
638 u64 user_context;
639 u32 rnic_handle;
640 u32 sq_depth;
641 u32 rq_depth;
642 u32 send_sgl_depth;
643 u32 rdma_write_sgl_depth;
644 u32 recv_sgl_depth;
645 u32 ord;
646 u32 ird;
647 u16 qp_state;
648 u16 flags; /* see c2wr_qp_flags_t */
649 u32 qp_id;
650 u32 local_addr;
651 u32 remote_addr;
652 u16 local_port;
653 u16 remote_port;
654 u32 terminate_msg_length; /* 0 if not present */
655 u8 data[0];
656 /* Terminate Message in-line here. */
657} __attribute__((packed)) ;
658
659union c2wr_qp_query {
660 struct c2wr_qp_query_req req;
661 struct c2wr_qp_query_rep rep;
662} __attribute__((packed)) ;
663
664struct c2wr_qp_modify_req {
665 struct c2wr_hdr hdr;
666 u64 stream_msg;
667 u32 stream_msg_length;
668 u32 rnic_handle;
669 u32 qp_handle;
670 __be32 next_qp_state;
671 __be32 ord;
672 __be32 ird;
673 __be32 sq_depth;
674 __be32 rq_depth;
675 u32 llp_ep_handle;
676} __attribute__((packed)) ;
677
678struct c2wr_qp_modify_rep {
679 struct c2wr_hdr hdr;
680 u32 ord;
681 u32 ird;
682 u32 sq_depth;
683 u32 rq_depth;
684 u32 sq_msg_size;
685 u32 sq_mq_index;
686 u32 sq_mq_start;
687 u32 rq_msg_size;
688 u32 rq_mq_index;
689 u32 rq_mq_start;
690} __attribute__((packed)) ;
691
692union c2wr_qp_modify {
693 struct c2wr_qp_modify_req req;
694 struct c2wr_qp_modify_rep rep;
695} __attribute__((packed)) ;
696
697struct c2wr_qp_destroy_req {
698 struct c2wr_hdr hdr;
699 u32 rnic_handle;
700 u32 qp_handle;
701} __attribute__((packed)) ;
702
703struct c2wr_qp_destroy_rep {
704 struct c2wr_hdr hdr;
705} __attribute__((packed)) ;
706
707union c2wr_qp_destroy {
708 struct c2wr_qp_destroy_req req;
709 struct c2wr_qp_destroy_rep rep;
710} __attribute__((packed)) ;
711
712/*
713 * The CCWR_QP_CONNECT msg is posted on the verbs request queue. It can
714 * only be posted when a QP is in IDLE state. After the connect request is
715 * submitted to the LLP, the adapter moves the QP to CONNECT_PENDING state.
716 * No synchronous reply from adapter to this WR. The results of
717 * connection are passed back in an async event CCAE_ACTIVE_CONNECT_RESULTS
718 * See c2wr_ae_active_connect_results_t
719 */
720struct c2wr_qp_connect_req {
721 struct c2wr_hdr hdr;
722 u32 rnic_handle;
723 u32 qp_handle;
724 __be32 remote_addr;
725 __be16 remote_port;
726 u16 pad;
727 __be32 private_data_length;
728 u8 private_data[0]; /* Private data in-line. */
729} __attribute__((packed)) ;
730
731struct c2wr_qp_connect {
732 struct c2wr_qp_connect_req req;
733 /* no synchronous reply. */
734} __attribute__((packed)) ;
735
736
737/*
738 *------------------------ MM ------------------------
739 */
740
741struct c2wr_nsmr_stag_alloc_req {
742 struct c2wr_hdr hdr;
743 u32 rnic_handle;
744 u32 pbl_depth;
745 u32 pd_id;
746 u32 flags;
747} __attribute__((packed)) ;
748
749struct c2wr_nsmr_stag_alloc_rep {
750 struct c2wr_hdr hdr;
751 u32 pbl_depth;
752 u32 stag_index;
753} __attribute__((packed)) ;
754
755union c2wr_nsmr_stag_alloc {
756 struct c2wr_nsmr_stag_alloc_req req;
757 struct c2wr_nsmr_stag_alloc_rep rep;
758} __attribute__((packed)) ;
759
760struct c2wr_nsmr_register_req {
761 struct c2wr_hdr hdr;
762 __be64 va;
763 u32 rnic_handle;
764 __be16 flags;
765 u8 stag_key;
766 u8 pad;
767 u32 pd_id;
768 __be32 pbl_depth;
769 __be32 pbe_size;
770 __be32 fbo;
771 __be32 length;
772 __be32 addrs_length;
773 /* array of paddrs (must be aligned on a 64bit boundary) */
774 __be64 paddrs[0];
775} __attribute__((packed)) ;
776
777struct c2wr_nsmr_register_rep {
778 struct c2wr_hdr hdr;
779 u32 pbl_depth;
780 __be32 stag_index;
781} __attribute__((packed)) ;
782
783union c2wr_nsmr_register {
784 struct c2wr_nsmr_register_req req;
785 struct c2wr_nsmr_register_rep rep;
786} __attribute__((packed)) ;
787
788struct c2wr_nsmr_pbl_req {
789 struct c2wr_hdr hdr;
790 u32 rnic_handle;
791 __be32 flags;
792 __be32 stag_index;
793 __be32 addrs_length;
794 /* array of paddrs (must be aligned on a 64bit boundary) */
795 __be64 paddrs[0];
796} __attribute__((packed)) ;
797
798struct c2wr_nsmr_pbl_rep {
799 struct c2wr_hdr hdr;
800} __attribute__((packed)) ;
801
802union c2wr_nsmr_pbl {
803 struct c2wr_nsmr_pbl_req req;
804 struct c2wr_nsmr_pbl_rep rep;
805} __attribute__((packed)) ;
806
807struct c2wr_mr_query_req {
808 struct c2wr_hdr hdr;
809 u32 rnic_handle;
810 u32 stag_index;
811} __attribute__((packed)) ;
812
813struct c2wr_mr_query_rep {
814 struct c2wr_hdr hdr;
815 u8 stag_key;
816 u8 pad[3];
817 u32 pd_id;
818 u32 flags;
819 u32 pbl_depth;
820} __attribute__((packed)) ;
821
822union c2wr_mr_query {
823 struct c2wr_mr_query_req req;
824 struct c2wr_mr_query_rep rep;
825} __attribute__((packed)) ;
826
827struct c2wr_mw_query_req {
828 struct c2wr_hdr hdr;
829 u32 rnic_handle;
830 u32 stag_index;
831} __attribute__((packed)) ;
832
833struct c2wr_mw_query_rep {
834 struct c2wr_hdr hdr;
835 u8 stag_key;
836 u8 pad[3];
837 u32 pd_id;
838 u32 flags;
839} __attribute__((packed)) ;
840
841union c2wr_mw_query {
842 struct c2wr_mw_query_req req;
843 struct c2wr_mw_query_rep rep;
844} __attribute__((packed)) ;
845
846
847struct c2wr_stag_dealloc_req {
848 struct c2wr_hdr hdr;
849 u32 rnic_handle;
850 __be32 stag_index;
851} __attribute__((packed)) ;
852
853struct c2wr_stag_dealloc_rep {
854 struct c2wr_hdr hdr;
855} __attribute__((packed)) ;
856
857union c2wr_stag_dealloc {
858 struct c2wr_stag_dealloc_req req;
859 struct c2wr_stag_dealloc_rep rep;
860} __attribute__((packed)) ;
861
862struct c2wr_nsmr_reregister_req {
863 struct c2wr_hdr hdr;
864 u64 va;
865 u32 rnic_handle;
866 u16 flags;
867 u8 stag_key;
868 u8 pad;
869 u32 stag_index;
870 u32 pd_id;
871 u32 pbl_depth;
872 u32 pbe_size;
873 u32 fbo;
874 u32 length;
875 u32 addrs_length;
876 u32 pad1;
877 /* array of paddrs (must be aligned on a 64bit boundary) */
878 u64 paddrs[0];
879} __attribute__((packed)) ;
880
881struct c2wr_nsmr_reregister_rep {
882 struct c2wr_hdr hdr;
883 u32 pbl_depth;
884 u32 stag_index;
885} __attribute__((packed)) ;
886
887union c2wr_nsmr_reregister {
888 struct c2wr_nsmr_reregister_req req;
889 struct c2wr_nsmr_reregister_rep rep;
890} __attribute__((packed)) ;
891
892struct c2wr_smr_register_req {
893 struct c2wr_hdr hdr;
894 u64 va;
895 u32 rnic_handle;
896 u16 flags;
897 u8 stag_key;
898 u8 pad;
899 u32 stag_index;
900 u32 pd_id;
901} __attribute__((packed)) ;
902
903struct c2wr_smr_register_rep {
904 struct c2wr_hdr hdr;
905 u32 stag_index;
906} __attribute__((packed)) ;
907
908union c2wr_smr_register {
909 struct c2wr_smr_register_req req;
910 struct c2wr_smr_register_rep rep;
911} __attribute__((packed)) ;
912
913struct c2wr_mw_alloc_req {
914 struct c2wr_hdr hdr;
915 u32 rnic_handle;
916 u32 pd_id;
917} __attribute__((packed)) ;
918
919struct c2wr_mw_alloc_rep {
920 struct c2wr_hdr hdr;
921 u32 stag_index;
922} __attribute__((packed)) ;
923
924union c2wr_mw_alloc {
925 struct c2wr_mw_alloc_req req;
926 struct c2wr_mw_alloc_rep rep;
927} __attribute__((packed)) ;
928
929/*
930 *------------------------ WRs -----------------------
931 */
932
933struct c2wr_user_hdr {
934 struct c2wr_hdr hdr; /* Has status and WR Type */
935} __attribute__((packed)) ;
936
937enum c2_qp_state {
938 C2_QP_STATE_IDLE = 0x01,
939 C2_QP_STATE_CONNECTING = 0x02,
940 C2_QP_STATE_RTS = 0x04,
941 C2_QP_STATE_CLOSING = 0x08,
942 C2_QP_STATE_TERMINATE = 0x10,
943 C2_QP_STATE_ERROR = 0x20,
944};
945
946/* Completion queue entry. */
947struct c2wr_ce {
948 struct c2wr_hdr hdr; /* Has status and WR Type */
949 u64 qp_user_context; /* c2_user_qp_t * */
950 u32 qp_state; /* Current QP State */
951 u32 handle; /* QPID or EP Handle */
952 __be32 bytes_rcvd; /* valid for RECV WCs */
953 u32 stag;
954} __attribute__((packed)) ;
955
956
957/*
958 * Flags used for all post-sq WRs. These must fit in the flags
959 * field of the struct c2wr_hdr (eight bits).
960 */
961enum {
962 SQ_SIGNALED = 0x01,
963 SQ_READ_FENCE = 0x02,
964 SQ_FENCE = 0x04,
965};
966
967/*
968 * Common fields for all post-sq WRs. Namely the standard header and a
969 * secondary header with fields common to all post-sq WRs.
970 */
971struct c2_sq_hdr {
972 struct c2wr_user_hdr user_hdr;
973} __attribute__((packed));
974
975/*
976 * Same as above but for post-rq WRs.
977 */
978struct c2_rq_hdr {
979 struct c2wr_user_hdr user_hdr;
980} __attribute__((packed));
981
982/*
983 * use the same struct for all sends.
984 */
985struct c2wr_send_req {
986 struct c2_sq_hdr sq_hdr;
987 __be32 sge_len;
988 __be32 remote_stag;
989 u8 data[0]; /* SGE array */
990} __attribute__((packed));
991
992union c2wr_send {
993 struct c2wr_send_req req;
994 struct c2wr_ce rep;
995} __attribute__((packed));
996
997struct c2wr_rdma_write_req {
998 struct c2_sq_hdr sq_hdr;
999 __be64 remote_to;
1000 __be32 remote_stag;
1001 __be32 sge_len;
1002 u8 data[0]; /* SGE array */
1003} __attribute__((packed));
1004
1005union c2wr_rdma_write {
1006 struct c2wr_rdma_write_req req;
1007 struct c2wr_ce rep;
1008} __attribute__((packed));
1009
1010struct c2wr_rdma_read_req {
1011 struct c2_sq_hdr sq_hdr;
1012 __be64 local_to;
1013 __be64 remote_to;
1014 __be32 local_stag;
1015 __be32 remote_stag;
1016 __be32 length;
1017} __attribute__((packed));
1018
1019union c2wr_rdma_read {
1020 struct c2wr_rdma_read_req req;
1021 struct c2wr_ce rep;
1022} __attribute__((packed));
1023
1024struct c2wr_mw_bind_req {
1025 struct c2_sq_hdr sq_hdr;
1026 u64 va;
1027 u8 stag_key;
1028 u8 pad[3];
1029 u32 mw_stag_index;
1030 u32 mr_stag_index;
1031 u32 length;
1032 u32 flags;
1033} __attribute__((packed));
1034
1035union c2wr_mw_bind {
1036 struct c2wr_mw_bind_req req;
1037 struct c2wr_ce rep;
1038} __attribute__((packed));
1039
1040struct c2wr_nsmr_fastreg_req {
1041 struct c2_sq_hdr sq_hdr;
1042 u64 va;
1043 u8 stag_key;
1044 u8 pad[3];
1045 u32 stag_index;
1046 u32 pbe_size;
1047 u32 fbo;
1048 u32 length;
1049 u32 addrs_length;
1050 /* array of paddrs (must be aligned on a 64bit boundary) */
1051 u64 paddrs[0];
1052} __attribute__((packed));
1053
1054union c2wr_nsmr_fastreg {
1055 struct c2wr_nsmr_fastreg_req req;
1056 struct c2wr_ce rep;
1057} __attribute__((packed));
1058
1059struct c2wr_stag_invalidate_req {
1060 struct c2_sq_hdr sq_hdr;
1061 u8 stag_key;
1062 u8 pad[3];
1063 u32 stag_index;
1064} __attribute__((packed));
1065
1066union c2wr_stag_invalidate {
1067 struct c2wr_stag_invalidate_req req;
1068 struct c2wr_ce rep;
1069} __attribute__((packed));
1070
1071union c2wr_sqwr {
1072 struct c2_sq_hdr sq_hdr;
1073 struct c2wr_send_req send;
1074 struct c2wr_send_req send_se;
1075 struct c2wr_send_req send_inv;
1076 struct c2wr_send_req send_se_inv;
1077 struct c2wr_rdma_write_req rdma_write;
1078 struct c2wr_rdma_read_req rdma_read;
1079 struct c2wr_mw_bind_req mw_bind;
1080 struct c2wr_nsmr_fastreg_req nsmr_fastreg;
1081 struct c2wr_stag_invalidate_req stag_inv;
1082} __attribute__((packed));
1083
1084
1085/*
1086 * RQ WRs
1087 */
1088struct c2wr_rqwr {
1089 struct c2_rq_hdr rq_hdr;
1090 u8 data[0]; /* array of SGEs */
1091} __attribute__((packed));
1092
1093union c2wr_recv {
1094 struct c2wr_rqwr req;
1095 struct c2wr_ce rep;
1096} __attribute__((packed));
1097
1098/*
1099 * All AEs start with this header. Most AEs only need to convey the
1100 * information in the header. Some, like LLP connection events, need
1101 * more info. The union typdef c2wr_ae_t has all the possible AEs.
1102 *
1103 * hdr.context is the user_context from the rnic_open WR. NULL If this
1104 * is not affiliated with an rnic
1105 *
1106 * hdr.id is the AE identifier (eg; CCAE_REMOTE_SHUTDOWN,
1107 * CCAE_LLP_CLOSE_COMPLETE)
1108 *
1109 * resource_type is one of: C2_RES_IND_QP, C2_RES_IND_CQ, C2_RES_IND_SRQ
1110 *
1111 * user_context is the context passed down when the host created the resource.
1112 */
1113struct c2wr_ae_hdr {
1114 struct c2wr_hdr hdr;
1115 u64 user_context; /* user context for this res. */
1116 __be32 resource_type; /* see enum c2_resource_indicator */
1117 __be32 resource; /* handle for resource */
1118 __be32 qp_state; /* current QP State */
1119} __attribute__((packed));
1120
1121/*
1122 * After submitting the CCAE_ACTIVE_CONNECT_RESULTS message on the AEQ,
1123 * the adapter moves the QP into RTS state
1124 */
1125struct c2wr_ae_active_connect_results {
1126 struct c2wr_ae_hdr ae_hdr;
1127 __be32 laddr;
1128 __be32 raddr;
1129 __be16 lport;
1130 __be16 rport;
1131 __be32 private_data_length;
1132 u8 private_data[0]; /* data is in-line in the msg. */
1133} __attribute__((packed));
1134
1135/*
1136 * When connections are established by the stack (and the private data
1137 * MPA frame is received), the adapter will generate an event to the host.
1138 * The details of the connection, any private data, and the new connection
1139 * request handle is passed up via the CCAE_CONNECTION_REQUEST msg on the
1140 * AE queue:
1141 */
1142struct c2wr_ae_connection_request {
1143 struct c2wr_ae_hdr ae_hdr;
1144 u32 cr_handle; /* connreq handle (sock ptr) */
1145 __be32 laddr;
1146 __be32 raddr;
1147 __be16 lport;
1148 __be16 rport;
1149 __be32 private_data_length;
1150 u8 private_data[0]; /* data is in-line in the msg. */
1151} __attribute__((packed));
1152
1153union c2wr_ae {
1154 struct c2wr_ae_hdr ae_generic;
1155 struct c2wr_ae_active_connect_results ae_active_connect_results;
1156 struct c2wr_ae_connection_request ae_connection_request;
1157} __attribute__((packed));
1158
1159struct c2wr_init_req {
1160 struct c2wr_hdr hdr;
1161 __be64 hint_count;
1162 __be64 q0_host_shared;
1163 __be64 q1_host_shared;
1164 __be64 q1_host_msg_pool;
1165 __be64 q2_host_shared;
1166 __be64 q2_host_msg_pool;
1167} __attribute__((packed));
1168
1169struct c2wr_init_rep {
1170 struct c2wr_hdr hdr;
1171} __attribute__((packed));
1172
1173union c2wr_init {
1174 struct c2wr_init_req req;
1175 struct c2wr_init_rep rep;
1176} __attribute__((packed));
1177
1178/*
1179 * For upgrading flash.
1180 */
1181
1182struct c2wr_flash_init_req {
1183 struct c2wr_hdr hdr;
1184 u32 rnic_handle;
1185} __attribute__((packed));
1186
1187struct c2wr_flash_init_rep {
1188 struct c2wr_hdr hdr;
1189 u32 adapter_flash_buf_offset;
1190 u32 adapter_flash_len;
1191} __attribute__((packed));
1192
1193union c2wr_flash_init {
1194 struct c2wr_flash_init_req req;
1195 struct c2wr_flash_init_rep rep;
1196} __attribute__((packed));
1197
1198struct c2wr_flash_req {
1199 struct c2wr_hdr hdr;
1200 u32 rnic_handle;
1201 u32 len;
1202} __attribute__((packed));
1203
1204struct c2wr_flash_rep {
1205 struct c2wr_hdr hdr;
1206 u32 status;
1207} __attribute__((packed));
1208
1209union c2wr_flash {
1210 struct c2wr_flash_req req;
1211 struct c2wr_flash_rep rep;
1212} __attribute__((packed));
1213
1214struct c2wr_buf_alloc_req {
1215 struct c2wr_hdr hdr;
1216 u32 rnic_handle;
1217 u32 size;
1218} __attribute__((packed));
1219
1220struct c2wr_buf_alloc_rep {
1221 struct c2wr_hdr hdr;
1222 u32 offset; /* 0 if mem not available */
1223 u32 size; /* 0 if mem not available */
1224} __attribute__((packed));
1225
1226union c2wr_buf_alloc {
1227 struct c2wr_buf_alloc_req req;
1228 struct c2wr_buf_alloc_rep rep;
1229} __attribute__((packed));
1230
1231struct c2wr_buf_free_req {
1232 struct c2wr_hdr hdr;
1233 u32 rnic_handle;
1234 u32 offset; /* Must match value from alloc */
1235 u32 size; /* Must match value from alloc */
1236} __attribute__((packed));
1237
1238struct c2wr_buf_free_rep {
1239 struct c2wr_hdr hdr;
1240} __attribute__((packed));
1241
1242union c2wr_buf_free {
1243 struct c2wr_buf_free_req req;
1244 struct c2wr_ce rep;
1245} __attribute__((packed));
1246
1247struct c2wr_flash_write_req {
1248 struct c2wr_hdr hdr;
1249 u32 rnic_handle;
1250 u32 offset;
1251 u32 size;
1252 u32 type;
1253 u32 flags;
1254} __attribute__((packed));
1255
1256struct c2wr_flash_write_rep {
1257 struct c2wr_hdr hdr;
1258 u32 status;
1259} __attribute__((packed));
1260
1261union c2wr_flash_write {
1262 struct c2wr_flash_write_req req;
1263 struct c2wr_flash_write_rep rep;
1264} __attribute__((packed));
1265
1266/*
1267 * Messages for LLP connection setup.
1268 */
1269
1270/*
1271 * Listen Request. This allocates a listening endpoint to allow passive
1272 * connection setup. Newly established LLP connections are passed up
1273 * via an AE. See c2wr_ae_connection_request_t
1274 */
1275struct c2wr_ep_listen_create_req {
1276 struct c2wr_hdr hdr;
1277 u64 user_context; /* returned in AEs. */
1278 u32 rnic_handle;
1279 __be32 local_addr; /* local addr, or 0 */
1280 __be16 local_port; /* 0 means "pick one" */
1281 u16 pad;
1282 __be32 backlog; /* tradional tcp listen bl */
1283} __attribute__((packed));
1284
1285struct c2wr_ep_listen_create_rep {
1286 struct c2wr_hdr hdr;
1287 u32 ep_handle; /* handle to new listening ep */
1288 u16 local_port; /* resulting port... */
1289 u16 pad;
1290} __attribute__((packed));
1291
1292union c2wr_ep_listen_create {
1293 struct c2wr_ep_listen_create_req req;
1294 struct c2wr_ep_listen_create_rep rep;
1295} __attribute__((packed));
1296
1297struct c2wr_ep_listen_destroy_req {
1298 struct c2wr_hdr hdr;
1299 u32 rnic_handle;
1300 u32 ep_handle;
1301} __attribute__((packed));
1302
1303struct c2wr_ep_listen_destroy_rep {
1304 struct c2wr_hdr hdr;
1305} __attribute__((packed));
1306
1307union c2wr_ep_listen_destroy {
1308 struct c2wr_ep_listen_destroy_req req;
1309 struct c2wr_ep_listen_destroy_rep rep;
1310} __attribute__((packed));
1311
1312struct c2wr_ep_query_req {
1313 struct c2wr_hdr hdr;
1314 u32 rnic_handle;
1315 u32 ep_handle;
1316} __attribute__((packed));
1317
1318struct c2wr_ep_query_rep {
1319 struct c2wr_hdr hdr;
1320 u32 rnic_handle;
1321 u32 local_addr;
1322 u32 remote_addr;
1323 u16 local_port;
1324 u16 remote_port;
1325} __attribute__((packed));
1326
1327union c2wr_ep_query {
1328 struct c2wr_ep_query_req req;
1329 struct c2wr_ep_query_rep rep;
1330} __attribute__((packed));
1331
1332
1333/*
1334 * The host passes this down to indicate acceptance of a pending iWARP
1335 * connection. The cr_handle was obtained from the CONNECTION_REQUEST
1336 * AE passed up by the adapter. See c2wr_ae_connection_request_t.
1337 */
1338struct c2wr_cr_accept_req {
1339 struct c2wr_hdr hdr;
1340 u32 rnic_handle;
1341 u32 qp_handle; /* QP to bind to this LLP conn */
1342 u32 ep_handle; /* LLP handle to accept */
1343 __be32 private_data_length;
1344 u8 private_data[0]; /* data in-line in msg. */
1345} __attribute__((packed));
1346
1347/*
1348 * adapter sends reply when private data is successfully submitted to
1349 * the LLP.
1350 */
1351struct c2wr_cr_accept_rep {
1352 struct c2wr_hdr hdr;
1353} __attribute__((packed));
1354
1355union c2wr_cr_accept {
1356 struct c2wr_cr_accept_req req;
1357 struct c2wr_cr_accept_rep rep;
1358} __attribute__((packed));
1359
1360/*
1361 * The host sends this down if a given iWARP connection request was
1362 * rejected by the consumer. The cr_handle was obtained from a
1363 * previous c2wr_ae_connection_request_t AE sent by the adapter.
1364 */
1365struct c2wr_cr_reject_req {
1366 struct c2wr_hdr hdr;
1367 u32 rnic_handle;
1368 u32 ep_handle; /* LLP handle to reject */
1369} __attribute__((packed));
1370
1371/*
1372 * Dunno if this is needed, but we'll add it for now. The adapter will
1373 * send the reject_reply after the LLP endpoint has been destroyed.
1374 */
1375struct c2wr_cr_reject_rep {
1376 struct c2wr_hdr hdr;
1377} __attribute__((packed));
1378
1379union c2wr_cr_reject {
1380 struct c2wr_cr_reject_req req;
1381 struct c2wr_cr_reject_rep rep;
1382} __attribute__((packed));
1383
1384/*
1385 * console command. Used to implement a debug console over the verbs
1386 * request and reply queues.
1387 */
1388
1389/*
1390 * Console request message. It contains:
1391 * - message hdr with id = CCWR_CONSOLE
1392 * - the physaddr/len of host memory to be used for the reply.
1393 * - the command string. eg: "netstat -s" or "zoneinfo"
1394 */
1395struct c2wr_console_req {
1396 struct c2wr_hdr hdr; /* id = CCWR_CONSOLE */
1397 u64 reply_buf; /* pinned host buf for reply */
1398 u32 reply_buf_len; /* length of reply buffer */
1399 u8 command[0]; /* NUL terminated ascii string */
1400 /* containing the command req */
1401} __attribute__((packed));
1402
1403/*
1404 * flags used in the console reply.
1405 */
1406enum c2_console_flags {
1407 CONS_REPLY_TRUNCATED = 0x00000001 /* reply was truncated */
1408} __attribute__((packed));
1409
1410/*
1411 * Console reply message.
1412 * hdr.result contains the c2_status_t error if the reply was _not_ generated,
1413 * or C2_OK if the reply was generated.
1414 */
1415struct c2wr_console_rep {
1416 struct c2wr_hdr hdr; /* id = CCWR_CONSOLE */
1417 u32 flags;
1418} __attribute__((packed));
1419
1420union c2wr_console {
1421 struct c2wr_console_req req;
1422 struct c2wr_console_rep rep;
1423} __attribute__((packed));
1424
1425
1426/*
1427 * Giant union with all WRs. Makes life easier...
1428 */
1429union c2wr {
1430 struct c2wr_hdr hdr;
1431 struct c2wr_user_hdr user_hdr;
1432 union c2wr_rnic_open rnic_open;
1433 union c2wr_rnic_query rnic_query;
1434 union c2wr_rnic_getconfig rnic_getconfig;
1435 union c2wr_rnic_setconfig rnic_setconfig;
1436 union c2wr_rnic_close rnic_close;
1437 union c2wr_cq_create cq_create;
1438 union c2wr_cq_modify cq_modify;
1439 union c2wr_cq_destroy cq_destroy;
1440 union c2wr_pd_alloc pd_alloc;
1441 union c2wr_pd_dealloc pd_dealloc;
1442 union c2wr_srq_create srq_create;
1443 union c2wr_srq_destroy srq_destroy;
1444 union c2wr_qp_create qp_create;
1445 union c2wr_qp_query qp_query;
1446 union c2wr_qp_modify qp_modify;
1447 union c2wr_qp_destroy qp_destroy;
1448 struct c2wr_qp_connect qp_connect;
1449 union c2wr_nsmr_stag_alloc nsmr_stag_alloc;
1450 union c2wr_nsmr_register nsmr_register;
1451 union c2wr_nsmr_pbl nsmr_pbl;
1452 union c2wr_mr_query mr_query;
1453 union c2wr_mw_query mw_query;
1454 union c2wr_stag_dealloc stag_dealloc;
1455 union c2wr_sqwr sqwr;
1456 struct c2wr_rqwr rqwr;
1457 struct c2wr_ce ce;
1458 union c2wr_ae ae;
1459 union c2wr_init init;
1460 union c2wr_ep_listen_create ep_listen_create;
1461 union c2wr_ep_listen_destroy ep_listen_destroy;
1462 union c2wr_cr_accept cr_accept;
1463 union c2wr_cr_reject cr_reject;
1464 union c2wr_console console;
1465 union c2wr_flash_init flash_init;
1466 union c2wr_flash flash;
1467 union c2wr_buf_alloc buf_alloc;
1468 union c2wr_buf_free buf_free;
1469 union c2wr_flash_write flash_write;
1470} __attribute__((packed));
1471
1472
1473/*
1474 * Accessors for the wr fields that are packed together tightly to
1475 * reduce the wr message size. The wr arguments are void* so that
1476 * either a struct c2wr*, a struct c2wr_hdr*, or a pointer to any of the types
1477 * in the struct c2wr union can be passed in.
1478 */
1479static __inline__ u8 c2_wr_get_id(void *wr)
1480{
1481 return ((struct c2wr_hdr *) wr)->id;
1482}
1483static __inline__ void c2_wr_set_id(void *wr, u8 id)
1484{
1485 ((struct c2wr_hdr *) wr)->id = id;
1486}
1487static __inline__ u8 c2_wr_get_result(void *wr)
1488{
1489 return ((struct c2wr_hdr *) wr)->result;
1490}
1491static __inline__ void c2_wr_set_result(void *wr, u8 result)
1492{
1493 ((struct c2wr_hdr *) wr)->result = result;
1494}
1495static __inline__ u8 c2_wr_get_flags(void *wr)
1496{
1497 return ((struct c2wr_hdr *) wr)->flags;
1498}
1499static __inline__ void c2_wr_set_flags(void *wr, u8 flags)
1500{
1501 ((struct c2wr_hdr *) wr)->flags = flags;
1502}
1503static __inline__ u8 c2_wr_get_sge_count(void *wr)
1504{
1505 return ((struct c2wr_hdr *) wr)->sge_count;
1506}
1507static __inline__ void c2_wr_set_sge_count(void *wr, u8 sge_count)
1508{
1509 ((struct c2wr_hdr *) wr)->sge_count = sge_count;
1510}
1511static __inline__ __be32 c2_wr_get_wqe_count(void *wr)
1512{
1513 return ((struct c2wr_hdr *) wr)->wqe_count;
1514}
1515static __inline__ void c2_wr_set_wqe_count(void *wr, u32 wqe_count)
1516{
1517 ((struct c2wr_hdr *) wr)->wqe_count = wqe_count;
1518}
1519
1520#endif /* _C2_WR_H_ */
diff --git a/drivers/staging/rdma/ehca/Kconfig b/drivers/staging/rdma/ehca/Kconfig
deleted file mode 100644
index 3fadd2ad6426..000000000000
--- a/drivers/staging/rdma/ehca/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
1config INFINIBAND_EHCA
2 tristate "eHCA support"
3 depends on IBMEBUS
4 ---help---
5 This driver supports the deprecated IBM pSeries eHCA InfiniBand
6 adapter.
7
8 To compile the driver as a module, choose M here. The module
9 will be called ib_ehca.
10
diff --git a/drivers/staging/rdma/ehca/Makefile b/drivers/staging/rdma/ehca/Makefile
deleted file mode 100644
index 74d284e46a40..000000000000
--- a/drivers/staging/rdma/ehca/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
1# Authors: Heiko J Schick <schickhj@de.ibm.com>
2# Christoph Raisch <raisch@de.ibm.com>
3# Joachim Fenkes <fenkes@de.ibm.com>
4#
5# Copyright (c) 2005 IBM Corporation
6#
7# All rights reserved.
8#
9# This source code is distributed under a dual license of GPL v2.0 and OpenIB BSD.
10
11obj-$(CONFIG_INFINIBAND_EHCA) += ib_ehca.o
12
13ib_ehca-objs = ehca_main.o ehca_hca.o ehca_mcast.o ehca_pd.o ehca_av.o ehca_eq.o \
14 ehca_cq.o ehca_qp.o ehca_sqp.o ehca_mrmw.o ehca_reqs.o ehca_irq.o \
15 ehca_uverbs.o ipz_pt_fn.o hcp_if.o hcp_phyp.o
16
diff --git a/drivers/staging/rdma/ehca/TODO b/drivers/staging/rdma/ehca/TODO
deleted file mode 100644
index 199a4a600142..000000000000
--- a/drivers/staging/rdma/ehca/TODO
+++ /dev/null
@@ -1,4 +0,0 @@
19/2015
2
3The ehca driver has been deprecated and moved to drivers/staging/rdma.
4It will be removed in the 4.6 merge window.
diff --git a/drivers/staging/rdma/ehca/ehca_av.c b/drivers/staging/rdma/ehca/ehca_av.c
deleted file mode 100644
index 94e088c2d989..000000000000
--- a/drivers/staging/rdma/ehca/ehca_av.c
+++ /dev/null
@@ -1,279 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * address vector functions
5 *
6 * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
7 * Khadija Souissi <souissik@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 * Christoph Raisch <raisch@de.ibm.com>
10 *
11 * Copyright (c) 2005 IBM Corporation
12 *
13 * All rights reserved.
14 *
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
17 *
18 * OpenIB BSD License
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
22 *
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
25 *
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44#include <linux/slab.h>
45
46#include "ehca_tools.h"
47#include "ehca_iverbs.h"
48#include "hcp_if.h"
49
50static struct kmem_cache *av_cache;
51
52int ehca_calc_ipd(struct ehca_shca *shca, int port,
53 enum ib_rate path_rate, u32 *ipd)
54{
55 int path = ib_rate_to_mult(path_rate);
56 int link, ret;
57 struct ib_port_attr pa;
58
59 if (path_rate == IB_RATE_PORT_CURRENT) {
60 *ipd = 0;
61 return 0;
62 }
63
64 if (unlikely(path < 0)) {
65 ehca_err(&shca->ib_device, "Invalid static rate! path_rate=%x",
66 path_rate);
67 return -EINVAL;
68 }
69
70 ret = ehca_query_port(&shca->ib_device, port, &pa);
71 if (unlikely(ret < 0)) {
72 ehca_err(&shca->ib_device, "Failed to query port ret=%i", ret);
73 return ret;
74 }
75
76 link = ib_width_enum_to_int(pa.active_width) * pa.active_speed;
77
78 if (path >= link)
79 /* no need to throttle if path faster than link */
80 *ipd = 0;
81 else
82 /* IPD = round((link / path) - 1) */
83 *ipd = ((link + (path >> 1)) / path) - 1;
84
85 return 0;
86}
87
88struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
89{
90 int ret;
91 struct ehca_av *av;
92 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
93 ib_device);
94
95 av = kmem_cache_alloc(av_cache, GFP_KERNEL);
96 if (!av) {
97 ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
98 pd, ah_attr);
99 return ERR_PTR(-ENOMEM);
100 }
101
102 av->av.sl = ah_attr->sl;
103 av->av.dlid = ah_attr->dlid;
104 av->av.slid_path_bits = ah_attr->src_path_bits;
105
106 if (ehca_static_rate < 0) {
107 u32 ipd;
108
109 if (ehca_calc_ipd(shca, ah_attr->port_num,
110 ah_attr->static_rate, &ipd)) {
111 ret = -EINVAL;
112 goto create_ah_exit1;
113 }
114 av->av.ipd = ipd;
115 } else
116 av->av.ipd = ehca_static_rate;
117
118 av->av.lnh = ah_attr->ah_flags;
119 av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
120 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_TCLASS_MASK,
121 ah_attr->grh.traffic_class);
122 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
123 ah_attr->grh.flow_label);
124 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
125 ah_attr->grh.hop_limit);
126 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1B);
127 /* set sgid in grh.word_1 */
128 if (ah_attr->ah_flags & IB_AH_GRH) {
129 int rc;
130 struct ib_port_attr port_attr;
131 union ib_gid gid;
132
133 memset(&port_attr, 0, sizeof(port_attr));
134 rc = ehca_query_port(pd->device, ah_attr->port_num,
135 &port_attr);
136 if (rc) { /* invalid port number */
137 ret = -EINVAL;
138 ehca_err(pd->device, "Invalid port number "
139 "ehca_query_port() returned %x "
140 "pd=%p ah_attr=%p", rc, pd, ah_attr);
141 goto create_ah_exit1;
142 }
143 memset(&gid, 0, sizeof(gid));
144 rc = ehca_query_gid(pd->device,
145 ah_attr->port_num,
146 ah_attr->grh.sgid_index, &gid);
147 if (rc) {
148 ret = -EINVAL;
149 ehca_err(pd->device, "Failed to retrieve sgid "
150 "ehca_query_gid() returned %x "
151 "pd=%p ah_attr=%p", rc, pd, ah_attr);
152 goto create_ah_exit1;
153 }
154 memcpy(&av->av.grh.word_1, &gid, sizeof(gid));
155 }
156 av->av.pmtu = shca->max_mtu;
157
158 /* dgid comes in grh.word_3 */
159 memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid,
160 sizeof(ah_attr->grh.dgid));
161
162 return &av->ib_ah;
163
164create_ah_exit1:
165 kmem_cache_free(av_cache, av);
166
167 return ERR_PTR(ret);
168}
169
170int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
171{
172 struct ehca_av *av;
173 struct ehca_ud_av new_ehca_av;
174 struct ehca_shca *shca = container_of(ah->pd->device, struct ehca_shca,
175 ib_device);
176
177 memset(&new_ehca_av, 0, sizeof(new_ehca_av));
178 new_ehca_av.sl = ah_attr->sl;
179 new_ehca_av.dlid = ah_attr->dlid;
180 new_ehca_av.slid_path_bits = ah_attr->src_path_bits;
181 new_ehca_av.ipd = ah_attr->static_rate;
182 new_ehca_av.lnh = EHCA_BMASK_SET(GRH_FLAG_MASK,
183 (ah_attr->ah_flags & IB_AH_GRH) > 0);
184 new_ehca_av.grh.word_0 = EHCA_BMASK_SET(GRH_TCLASS_MASK,
185 ah_attr->grh.traffic_class);
186 new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
187 ah_attr->grh.flow_label);
188 new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
189 ah_attr->grh.hop_limit);
190 new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1b);
191
192 /* set sgid in grh.word_1 */
193 if (ah_attr->ah_flags & IB_AH_GRH) {
194 int rc;
195 struct ib_port_attr port_attr;
196 union ib_gid gid;
197
198 memset(&port_attr, 0, sizeof(port_attr));
199 rc = ehca_query_port(ah->device, ah_attr->port_num,
200 &port_attr);
201 if (rc) { /* invalid port number */
202 ehca_err(ah->device, "Invalid port number "
203 "ehca_query_port() returned %x "
204 "ah=%p ah_attr=%p port_num=%x",
205 rc, ah, ah_attr, ah_attr->port_num);
206 return -EINVAL;
207 }
208 memset(&gid, 0, sizeof(gid));
209 rc = ehca_query_gid(ah->device,
210 ah_attr->port_num,
211 ah_attr->grh.sgid_index, &gid);
212 if (rc) {
213 ehca_err(ah->device, "Failed to retrieve sgid "
214 "ehca_query_gid() returned %x "
215 "ah=%p ah_attr=%p port_num=%x "
216 "sgid_index=%x",
217 rc, ah, ah_attr, ah_attr->port_num,
218 ah_attr->grh.sgid_index);
219 return -EINVAL;
220 }
221 memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid));
222 }
223
224 new_ehca_av.pmtu = shca->max_mtu;
225
226 memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid,
227 sizeof(ah_attr->grh.dgid));
228
229 av = container_of(ah, struct ehca_av, ib_ah);
230 av->av = new_ehca_av;
231
232 return 0;
233}
234
235int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
236{
237 struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
238
239 memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
240 sizeof(ah_attr->grh.dgid));
241 ah_attr->sl = av->av.sl;
242
243 ah_attr->dlid = av->av.dlid;
244
245 ah_attr->src_path_bits = av->av.slid_path_bits;
246 ah_attr->static_rate = av->av.ipd;
247 ah_attr->ah_flags = EHCA_BMASK_GET(GRH_FLAG_MASK, av->av.lnh);
248 ah_attr->grh.traffic_class = EHCA_BMASK_GET(GRH_TCLASS_MASK,
249 av->av.grh.word_0);
250 ah_attr->grh.hop_limit = EHCA_BMASK_GET(GRH_HOPLIMIT_MASK,
251 av->av.grh.word_0);
252 ah_attr->grh.flow_label = EHCA_BMASK_GET(GRH_FLOWLABEL_MASK,
253 av->av.grh.word_0);
254
255 return 0;
256}
257
258int ehca_destroy_ah(struct ib_ah *ah)
259{
260 kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah));
261
262 return 0;
263}
264
265int ehca_init_av_cache(void)
266{
267 av_cache = kmem_cache_create("ehca_cache_av",
268 sizeof(struct ehca_av), 0,
269 SLAB_HWCACHE_ALIGN,
270 NULL);
271 if (!av_cache)
272 return -ENOMEM;
273 return 0;
274}
275
276void ehca_cleanup_av_cache(void)
277{
278 kmem_cache_destroy(av_cache);
279}
diff --git a/drivers/staging/rdma/ehca/ehca_classes.h b/drivers/staging/rdma/ehca/ehca_classes.h
deleted file mode 100644
index e8c3387d7aaa..000000000000
--- a/drivers/staging/rdma/ehca/ehca_classes.h
+++ /dev/null
@@ -1,481 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Struct definition for eHCA internal structures
5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43#ifndef __EHCA_CLASSES_H__
44#define __EHCA_CLASSES_H__
45
46struct ehca_module;
47struct ehca_qp;
48struct ehca_cq;
49struct ehca_eq;
50struct ehca_mr;
51struct ehca_mw;
52struct ehca_pd;
53struct ehca_av;
54
55#include <linux/wait.h>
56#include <linux/mutex.h>
57
58#include <rdma/ib_verbs.h>
59#include <rdma/ib_user_verbs.h>
60
61#ifdef CONFIG_PPC64
62#include "ehca_classes_pSeries.h"
63#endif
64#include "ipz_pt_fn.h"
65#include "ehca_qes.h"
66#include "ehca_irq.h"
67
68#define EHCA_EQE_CACHE_SIZE 20
69#define EHCA_MAX_NUM_QUEUES 0xffff
70
71struct ehca_eqe_cache_entry {
72 struct ehca_eqe *eqe;
73 struct ehca_cq *cq;
74};
75
76struct ehca_eq {
77 u32 length;
78 struct ipz_queue ipz_queue;
79 struct ipz_eq_handle ipz_eq_handle;
80 struct work_struct work;
81 struct h_galpas galpas;
82 int is_initialized;
83 struct ehca_pfeq pf;
84 spinlock_t spinlock;
85 struct tasklet_struct interrupt_task;
86 u32 ist;
87 spinlock_t irq_spinlock;
88 struct ehca_eqe_cache_entry eqe_cache[EHCA_EQE_CACHE_SIZE];
89};
90
91struct ehca_sma_attr {
92 u16 lid, lmc, sm_sl, sm_lid;
93 u16 pkey_tbl_len, pkeys[16];
94};
95
96struct ehca_sport {
97 struct ib_cq *ibcq_aqp1;
98 struct ib_qp *ibqp_sqp[2];
99 /* lock to serialze modify_qp() calls for sqp in normal
100 * and irq path (when event PORT_ACTIVE is received first time)
101 */
102 spinlock_t mod_sqp_lock;
103 enum ib_port_state port_state;
104 struct ehca_sma_attr saved_attr;
105 u32 pma_qp_nr;
106};
107
108#define HCA_CAP_MR_PGSIZE_4K 0x80000000
109#define HCA_CAP_MR_PGSIZE_64K 0x40000000
110#define HCA_CAP_MR_PGSIZE_1M 0x20000000
111#define HCA_CAP_MR_PGSIZE_16M 0x10000000
112
113struct ehca_shca {
114 struct ib_device ib_device;
115 struct platform_device *ofdev;
116 u8 num_ports;
117 int hw_level;
118 struct list_head shca_list;
119 struct ipz_adapter_handle ipz_hca_handle;
120 struct ehca_sport sport[2];
121 struct ehca_eq eq;
122 struct ehca_eq neq;
123 struct ehca_mr *maxmr;
124 struct ehca_pd *pd;
125 struct h_galpas galpas;
126 struct mutex modify_mutex;
127 u64 hca_cap;
128 /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
129 u32 hca_cap_mr_pgsize;
130 int max_mtu;
131 int max_num_qps;
132 int max_num_cqs;
133 atomic_t num_cqs;
134 atomic_t num_qps;
135};
136
137struct ehca_pd {
138 struct ib_pd ib_pd;
139 struct ipz_pd fw_pd;
140 /* small queue mgmt */
141 struct mutex lock;
142 struct list_head free[2];
143 struct list_head full[2];
144};
145
146enum ehca_ext_qp_type {
147 EQPT_NORMAL = 0,
148 EQPT_LLQP = 1,
149 EQPT_SRQBASE = 2,
150 EQPT_SRQ = 3,
151};
152
153/* struct to cache modify_qp()'s parms for GSI/SMI qp */
154struct ehca_mod_qp_parm {
155 int mask;
156 struct ib_qp_attr attr;
157};
158
159#define EHCA_MOD_QP_PARM_MAX 4
160
161#define QMAP_IDX_MASK 0xFFFFULL
162
163/* struct for tracking if cqes have been reported to the application */
164struct ehca_qmap_entry {
165 u16 app_wr_id;
166 u8 reported;
167 u8 cqe_req;
168};
169
170struct ehca_queue_map {
171 struct ehca_qmap_entry *map;
172 unsigned int entries;
173 unsigned int tail;
174 unsigned int left_to_poll;
175 unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */
176};
177
178/* function to calculate the next index for the qmap */
179static inline unsigned int next_index(unsigned int cur_index, unsigned int limit)
180{
181 unsigned int temp = cur_index + 1;
182 return (temp == limit) ? 0 : temp;
183}
184
185struct ehca_qp {
186 union {
187 struct ib_qp ib_qp;
188 struct ib_srq ib_srq;
189 };
190 u32 qp_type;
191 enum ehca_ext_qp_type ext_type;
192 enum ib_qp_state state;
193 struct ipz_queue ipz_squeue;
194 struct ehca_queue_map sq_map;
195 struct ipz_queue ipz_rqueue;
196 struct ehca_queue_map rq_map;
197 struct h_galpas galpas;
198 u32 qkey;
199 u32 real_qp_num;
200 u32 token;
201 spinlock_t spinlock_s;
202 spinlock_t spinlock_r;
203 u32 sq_max_inline_data_size;
204 struct ipz_qp_handle ipz_qp_handle;
205 struct ehca_pfqp pf;
206 struct ib_qp_init_attr init_attr;
207 struct ehca_cq *send_cq;
208 struct ehca_cq *recv_cq;
209 unsigned int sqerr_purgeflag;
210 struct hlist_node list_entries;
211 /* array to cache modify_qp()'s parms for GSI/SMI qp */
212 struct ehca_mod_qp_parm *mod_qp_parm;
213 int mod_qp_parm_idx;
214 /* mmap counter for resources mapped into user space */
215 u32 mm_count_squeue;
216 u32 mm_count_rqueue;
217 u32 mm_count_galpa;
218 /* unsolicited ack circumvention */
219 int unsol_ack_circ;
220 int mtu_shift;
221 u32 message_count;
222 u32 packet_count;
223 atomic_t nr_events; /* events seen */
224 wait_queue_head_t wait_completion;
225 int mig_armed;
226 struct list_head sq_err_node;
227 struct list_head rq_err_node;
228};
229
230#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
231#define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ)
232#define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE)
233
234/* must be power of 2 */
235#define QP_HASHTAB_LEN 8
236
237struct ehca_cq {
238 struct ib_cq ib_cq;
239 struct ipz_queue ipz_queue;
240 struct h_galpas galpas;
241 spinlock_t spinlock;
242 u32 cq_number;
243 u32 token;
244 u32 nr_of_entries;
245 struct ipz_cq_handle ipz_cq_handle;
246 struct ehca_pfcq pf;
247 spinlock_t cb_lock;
248 struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
249 struct list_head entry;
250 u32 nr_callbacks; /* #events assigned to cpu by scaling code */
251 atomic_t nr_events; /* #events seen */
252 wait_queue_head_t wait_completion;
253 spinlock_t task_lock;
254 /* mmap counter for resources mapped into user space */
255 u32 mm_count_queue;
256 u32 mm_count_galpa;
257 struct list_head sqp_err_list;
258 struct list_head rqp_err_list;
259};
260
261enum ehca_mr_flag {
262 EHCA_MR_FLAG_FMR = 0x80000000, /* FMR, created with ehca_alloc_fmr */
263 EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR */
264};
265
266struct ehca_mr {
267 union {
268 struct ib_mr ib_mr; /* must always be first in ehca_mr */
269 struct ib_fmr ib_fmr; /* must always be first in ehca_mr */
270 } ib;
271 struct ib_umem *umem;
272 spinlock_t mrlock;
273
274 enum ehca_mr_flag flags;
275 u32 num_kpages; /* number of kernel pages */
276 u32 num_hwpages; /* number of hw pages to form MR */
277 u64 hwpage_size; /* hw page size used for this MR */
278 int acl; /* ACL (stored here for usage in reregister) */
279 u64 *start; /* virtual start address (stored here for */
280 /* usage in reregister) */
281 u64 size; /* size (stored here for usage in reregister) */
282 u32 fmr_page_size; /* page size for FMR */
283 u32 fmr_max_pages; /* max pages for FMR */
284 u32 fmr_max_maps; /* max outstanding maps for FMR */
285 u32 fmr_map_cnt; /* map counter for FMR */
286 /* fw specific data */
287 struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */
288 struct h_galpas galpas;
289};
290
291struct ehca_mw {
292 struct ib_mw ib_mw; /* gen2 mw, must always be first in ehca_mw */
293 spinlock_t mwlock;
294
295 u8 never_bound; /* indication MW was never bound */
296 struct ipz_mrmw_handle ipz_mw_handle; /* MW handle for h-calls */
297 struct h_galpas galpas;
298};
299
300enum ehca_mr_pgi_type {
301 EHCA_MR_PGI_PHYS = 1, /* type of ehca_reg_phys_mr,
302 * ehca_rereg_phys_mr,
303 * ehca_reg_internal_maxmr */
304 EHCA_MR_PGI_USER = 2, /* type of ehca_reg_user_mr */
305 EHCA_MR_PGI_FMR = 3 /* type of ehca_map_phys_fmr */
306};
307
308struct ehca_mr_pginfo {
309 enum ehca_mr_pgi_type type;
310 u64 num_kpages;
311 u64 kpage_cnt;
312 u64 hwpage_size; /* hw page size used for this MR */
313 u64 num_hwpages; /* number of hw pages */
314 u64 hwpage_cnt; /* counter for hw pages */
315 u64 next_hwpage; /* next hw page in buffer/chunk/listelem */
316
317 union {
318 struct { /* type EHCA_MR_PGI_PHYS section */
319 u64 addr;
320 u16 size;
321 } phy;
322 struct { /* type EHCA_MR_PGI_USER section */
323 struct ib_umem *region;
324 struct scatterlist *next_sg;
325 u64 next_nmap;
326 } usr;
327 struct { /* type EHCA_MR_PGI_FMR section */
328 u64 fmr_pgsize;
329 u64 *page_list;
330 u64 next_listelem;
331 } fmr;
332 } u;
333};
334
335/* output parameters for MR/FMR hipz calls */
336struct ehca_mr_hipzout_parms {
337 struct ipz_mrmw_handle handle;
338 u32 lkey;
339 u32 rkey;
340 u64 len;
341 u64 vaddr;
342 u32 acl;
343};
344
345/* output parameters for MW hipz calls */
346struct ehca_mw_hipzout_parms {
347 struct ipz_mrmw_handle handle;
348 u32 rkey;
349};
350
351struct ehca_av {
352 struct ib_ah ib_ah;
353 struct ehca_ud_av av;
354};
355
356struct ehca_ucontext {
357 struct ib_ucontext ib_ucontext;
358};
359
360int ehca_init_pd_cache(void);
361void ehca_cleanup_pd_cache(void);
362int ehca_init_cq_cache(void);
363void ehca_cleanup_cq_cache(void);
364int ehca_init_qp_cache(void);
365void ehca_cleanup_qp_cache(void);
366int ehca_init_av_cache(void);
367void ehca_cleanup_av_cache(void);
368int ehca_init_mrmw_cache(void);
369void ehca_cleanup_mrmw_cache(void);
370int ehca_init_small_qp_cache(void);
371void ehca_cleanup_small_qp_cache(void);
372
373extern rwlock_t ehca_qp_idr_lock;
374extern rwlock_t ehca_cq_idr_lock;
375extern struct idr ehca_qp_idr;
376extern struct idr ehca_cq_idr;
377extern spinlock_t shca_list_lock;
378
379extern int ehca_static_rate;
380extern int ehca_port_act_time;
381extern bool ehca_use_hp_mr;
382extern bool ehca_scaling_code;
383extern int ehca_lock_hcalls;
384extern int ehca_nr_ports;
385extern int ehca_max_cq;
386extern int ehca_max_qp;
387
388struct ipzu_queue_resp {
389 u32 qe_size; /* queue entry size */
390 u32 act_nr_of_sg;
391 u32 queue_length; /* queue length allocated in bytes */
392 u32 pagesize;
393 u32 toggle_state;
394 u32 offset; /* save offset within a page for small_qp */
395};
396
397struct ehca_create_cq_resp {
398 u32 cq_number;
399 u32 token;
400 struct ipzu_queue_resp ipz_queue;
401 u32 fw_handle_ofs;
402 u32 dummy;
403};
404
405struct ehca_create_qp_resp {
406 u32 qp_num;
407 u32 token;
408 u32 qp_type;
409 u32 ext_type;
410 u32 qkey;
411 /* qp_num assigned by ehca: sqp0/1 may have got different numbers */
412 u32 real_qp_num;
413 u32 fw_handle_ofs;
414 u32 dummy;
415 struct ipzu_queue_resp ipz_squeue;
416 struct ipzu_queue_resp ipz_rqueue;
417};
418
419struct ehca_alloc_cq_parms {
420 u32 nr_cqe;
421 u32 act_nr_of_entries;
422 u32 act_pages;
423 struct ipz_eq_handle eq_handle;
424};
425
426enum ehca_service_type {
427 ST_RC = 0,
428 ST_UC = 1,
429 ST_RD = 2,
430 ST_UD = 3,
431};
432
433enum ehca_ll_comp_flags {
434 LLQP_SEND_COMP = 0x20,
435 LLQP_RECV_COMP = 0x40,
436 LLQP_COMP_MASK = 0x60,
437};
438
439struct ehca_alloc_queue_parms {
440 /* input parameters */
441 int max_wr;
442 int max_sge;
443 int page_size;
444 int is_small;
445
446 /* output parameters */
447 u16 act_nr_wqes;
448 u8 act_nr_sges;
449 u32 queue_size; /* bytes for small queues, pages otherwise */
450};
451
452struct ehca_alloc_qp_parms {
453 struct ehca_alloc_queue_parms squeue;
454 struct ehca_alloc_queue_parms rqueue;
455
456 /* input parameters */
457 enum ehca_service_type servicetype;
458 int qp_storage;
459 int sigtype;
460 enum ehca_ext_qp_type ext_type;
461 enum ehca_ll_comp_flags ll_comp_flags;
462 int ud_av_l_key_ctl;
463
464 u32 token;
465 struct ipz_eq_handle eq_handle;
466 struct ipz_pd pd;
467 struct ipz_cq_handle send_cq_handle, recv_cq_handle;
468
469 u32 srq_qpn, srq_token, srq_limit;
470
471 /* output parameters */
472 u32 real_qp_num;
473 struct ipz_qp_handle qp_handle;
474 struct h_galpas galpas;
475};
476
477int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
478int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
479struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
480
481#endif
diff --git a/drivers/staging/rdma/ehca/ehca_classes_pSeries.h b/drivers/staging/rdma/ehca/ehca_classes_pSeries.h
deleted file mode 100644
index 689c35786dd2..000000000000
--- a/drivers/staging/rdma/ehca/ehca_classes_pSeries.h
+++ /dev/null
@@ -1,208 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * pSeries interface definitions
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#ifndef __EHCA_CLASSES_PSERIES_H__
43#define __EHCA_CLASSES_PSERIES_H__
44
45#include "hcp_phyp.h"
46#include "ipz_pt_fn.h"
47
48
49struct ehca_pfqp {
50 struct ipz_qpt sqpt;
51 struct ipz_qpt rqpt;
52};
53
54struct ehca_pfcq {
55 struct ipz_qpt qpt;
56 u32 cqnr;
57};
58
59struct ehca_pfeq {
60 struct ipz_qpt qpt;
61 struct h_galpa galpa;
62 u32 eqnr;
63};
64
65struct ipz_adapter_handle {
66 u64 handle;
67};
68
69struct ipz_cq_handle {
70 u64 handle;
71};
72
73struct ipz_eq_handle {
74 u64 handle;
75};
76
77struct ipz_qp_handle {
78 u64 handle;
79};
80struct ipz_mrmw_handle {
81 u64 handle;
82};
83
84struct ipz_pd {
85 u32 value;
86};
87
88struct hcp_modify_qp_control_block {
89 u32 qkey; /* 00 */
90 u32 rdd; /* reliable datagram domain */
91 u32 send_psn; /* 02 */
92 u32 receive_psn; /* 03 */
93 u32 prim_phys_port; /* 04 */
94 u32 alt_phys_port; /* 05 */
95 u32 prim_p_key_idx; /* 06 */
96 u32 alt_p_key_idx; /* 07 */
97 u32 rdma_atomic_ctrl; /* 08 */
98 u32 qp_state; /* 09 */
99 u32 reserved_10; /* 10 */
100 u32 rdma_nr_atomic_resp_res; /* 11 */
101 u32 path_migration_state; /* 12 */
102 u32 rdma_atomic_outst_dest_qp; /* 13 */
103 u32 dest_qp_nr; /* 14 */
104 u32 min_rnr_nak_timer_field; /* 15 */
105 u32 service_level; /* 16 */
106 u32 send_grh_flag; /* 17 */
107 u32 retry_count; /* 18 */
108 u32 timeout; /* 19 */
109 u32 path_mtu; /* 20 */
110 u32 max_static_rate; /* 21 */
111 u32 dlid; /* 22 */
112 u32 rnr_retry_count; /* 23 */
113 u32 source_path_bits; /* 24 */
114 u32 traffic_class; /* 25 */
115 u32 hop_limit; /* 26 */
116 u32 source_gid_idx; /* 27 */
117 u32 flow_label; /* 28 */
118 u32 reserved_29; /* 29 */
119 union { /* 30 */
120 u64 dw[2];
121 u8 byte[16];
122 } dest_gid;
123 u32 service_level_al; /* 34 */
124 u32 send_grh_flag_al; /* 35 */
125 u32 retry_count_al; /* 36 */
126 u32 timeout_al; /* 37 */
127 u32 max_static_rate_al; /* 38 */
128 u32 dlid_al; /* 39 */
129 u32 rnr_retry_count_al; /* 40 */
130 u32 source_path_bits_al; /* 41 */
131 u32 traffic_class_al; /* 42 */
132 u32 hop_limit_al; /* 43 */
133 u32 source_gid_idx_al; /* 44 */
134 u32 flow_label_al; /* 45 */
135 u32 reserved_46; /* 46 */
136 u32 reserved_47; /* 47 */
137 union { /* 48 */
138 u64 dw[2];
139 u8 byte[16];
140 } dest_gid_al;
141 u32 max_nr_outst_send_wr; /* 52 */
142 u32 max_nr_outst_recv_wr; /* 53 */
143 u32 disable_ete_credit_check; /* 54 */
144 u32 qp_number; /* 55 */
145 u64 send_queue_handle; /* 56 */
146 u64 recv_queue_handle; /* 58 */
147 u32 actual_nr_sges_in_sq_wqe; /* 60 */
148 u32 actual_nr_sges_in_rq_wqe; /* 61 */
149 u32 qp_enable; /* 62 */
150 u32 curr_srq_limit; /* 63 */
151 u64 qp_aff_asyn_ev_log_reg; /* 64 */
152 u64 shared_rq_hndl; /* 66 */
153 u64 trigg_doorbell_qp_hndl; /* 68 */
154 u32 reserved_70_127[58]; /* 70 */
155};
156
157#define MQPCB_MASK_QKEY EHCA_BMASK_IBM( 0, 0)
158#define MQPCB_MASK_SEND_PSN EHCA_BMASK_IBM( 2, 2)
159#define MQPCB_MASK_RECEIVE_PSN EHCA_BMASK_IBM( 3, 3)
160#define MQPCB_MASK_PRIM_PHYS_PORT EHCA_BMASK_IBM( 4, 4)
161#define MQPCB_PRIM_PHYS_PORT EHCA_BMASK_IBM(24, 31)
162#define MQPCB_MASK_ALT_PHYS_PORT EHCA_BMASK_IBM( 5, 5)
163#define MQPCB_MASK_PRIM_P_KEY_IDX EHCA_BMASK_IBM( 6, 6)
164#define MQPCB_PRIM_P_KEY_IDX EHCA_BMASK_IBM(24, 31)
165#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7)
166#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8)
167#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9)
168#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11)
169#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12)
170#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13)
171#define MQPCB_MASK_DEST_QP_NR EHCA_BMASK_IBM(14, 14)
172#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD EHCA_BMASK_IBM(15, 15)
173#define MQPCB_MASK_SERVICE_LEVEL EHCA_BMASK_IBM(16, 16)
174#define MQPCB_MASK_SEND_GRH_FLAG EHCA_BMASK_IBM(17, 17)
175#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18)
176#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19)
177#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20)
178#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21)
179#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22)
180#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23)
181#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24)
182#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25)
183#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26)
184#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27)
185#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28)
186#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30)
187#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31)
188#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32)
189#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33)
190#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34)
191#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35)
192#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36)
193#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37)
194#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38)
195#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39)
196#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40)
197#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41)
198#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42)
199#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44)
200#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45)
201#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46)
202#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47)
203#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48)
204#define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49)
205#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50)
206#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51)
207
208#endif /* __EHCA_CLASSES_PSERIES_H__ */
diff --git a/drivers/staging/rdma/ehca/ehca_cq.c b/drivers/staging/rdma/ehca/ehca_cq.c
deleted file mode 100644
index 1aa7931fe860..000000000000
--- a/drivers/staging/rdma/ehca/ehca_cq.c
+++ /dev/null
@@ -1,397 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Completion queue handling
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 * Heiko J Schick <schickhj@de.ibm.com>
10 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
11 *
12 *
13 * Copyright (c) 2005 IBM Corporation
14 *
15 * All rights reserved.
16 *
17 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
18 * BSD.
19 *
20 * OpenIB BSD License
21 *
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are met:
24 *
25 * Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following disclaimer.
27 *
28 * Redistributions in binary form must reproduce the above copyright notice,
29 * this list of conditions and the following disclaimer in the documentation
30 * and/or other materials
31 * provided with the distribution.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
44 */
45
46#include <linux/slab.h>
47
48#include "ehca_iverbs.h"
49#include "ehca_classes.h"
50#include "ehca_irq.h"
51#include "hcp_if.h"
52
53static struct kmem_cache *cq_cache;
54
55int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
56{
57 unsigned int qp_num = qp->real_qp_num;
58 unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
59 unsigned long flags;
60
61 spin_lock_irqsave(&cq->spinlock, flags);
62 hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
63 spin_unlock_irqrestore(&cq->spinlock, flags);
64
65 ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
66 cq->cq_number, qp_num);
67
68 return 0;
69}
70
71int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
72{
73 int ret = -EINVAL;
74 unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
75 struct hlist_node *iter;
76 struct ehca_qp *qp;
77 unsigned long flags;
78
79 spin_lock_irqsave(&cq->spinlock, flags);
80 hlist_for_each(iter, &cq->qp_hashtab[key]) {
81 qp = hlist_entry(iter, struct ehca_qp, list_entries);
82 if (qp->real_qp_num == real_qp_num) {
83 hlist_del(iter);
84 ehca_dbg(cq->ib_cq.device,
85 "removed qp from cq .cq_num=%x real_qp_num=%x",
86 cq->cq_number, real_qp_num);
87 ret = 0;
88 break;
89 }
90 }
91 spin_unlock_irqrestore(&cq->spinlock, flags);
92 if (ret)
93 ehca_err(cq->ib_cq.device,
94 "qp not found cq_num=%x real_qp_num=%x",
95 cq->cq_number, real_qp_num);
96
97 return ret;
98}
99
100struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
101{
102 struct ehca_qp *ret = NULL;
103 unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
104 struct hlist_node *iter;
105 struct ehca_qp *qp;
106 hlist_for_each(iter, &cq->qp_hashtab[key]) {
107 qp = hlist_entry(iter, struct ehca_qp, list_entries);
108 if (qp->real_qp_num == real_qp_num) {
109 ret = qp;
110 break;
111 }
112 }
113 return ret;
114}
115
116struct ib_cq *ehca_create_cq(struct ib_device *device,
117 const struct ib_cq_init_attr *attr,
118 struct ib_ucontext *context,
119 struct ib_udata *udata)
120{
121 int cqe = attr->cqe;
122 static const u32 additional_cqe = 20;
123 struct ib_cq *cq;
124 struct ehca_cq *my_cq;
125 struct ehca_shca *shca =
126 container_of(device, struct ehca_shca, ib_device);
127 struct ipz_adapter_handle adapter_handle;
128 struct ehca_alloc_cq_parms param; /* h_call's out parameters */
129 struct h_galpa gal;
130 void *vpage;
131 u32 counter;
132 u64 rpage, cqx_fec, h_ret;
133 int rc, i;
134 unsigned long flags;
135
136 if (attr->flags)
137 return ERR_PTR(-EINVAL);
138
139 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
140 return ERR_PTR(-EINVAL);
141
142 if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) {
143 ehca_err(device, "Unable to create CQ, max number of %i "
144 "CQs reached.", shca->max_num_cqs);
145 ehca_err(device, "To increase the maximum number of CQs "
146 "use the number_of_cqs module parameter.\n");
147 return ERR_PTR(-ENOSPC);
148 }
149
150 my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL);
151 if (!my_cq) {
152 ehca_err(device, "Out of memory for ehca_cq struct device=%p",
153 device);
154 atomic_dec(&shca->num_cqs);
155 return ERR_PTR(-ENOMEM);
156 }
157
158 memset(&param, 0, sizeof(struct ehca_alloc_cq_parms));
159
160 spin_lock_init(&my_cq->spinlock);
161 spin_lock_init(&my_cq->cb_lock);
162 spin_lock_init(&my_cq->task_lock);
163 atomic_set(&my_cq->nr_events, 0);
164 init_waitqueue_head(&my_cq->wait_completion);
165
166 cq = &my_cq->ib_cq;
167
168 adapter_handle = shca->ipz_hca_handle;
169 param.eq_handle = shca->eq.ipz_eq_handle;
170
171 idr_preload(GFP_KERNEL);
172 write_lock_irqsave(&ehca_cq_idr_lock, flags);
173 rc = idr_alloc(&ehca_cq_idr, my_cq, 0, 0x2000000, GFP_NOWAIT);
174 write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
175 idr_preload_end();
176
177 if (rc < 0) {
178 cq = ERR_PTR(-ENOMEM);
179 ehca_err(device, "Can't allocate new idr entry. device=%p",
180 device);
181 goto create_cq_exit1;
182 }
183 my_cq->token = rc;
184
185 /*
186 * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
187 * for receiving errors CQEs.
188 */
189 param.nr_cqe = cqe + additional_cqe;
190 h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, &param);
191
192 if (h_ret != H_SUCCESS) {
193 ehca_err(device, "hipz_h_alloc_resource_cq() failed "
194 "h_ret=%lli device=%p", h_ret, device);
195 cq = ERR_PTR(ehca2ib_return_code(h_ret));
196 goto create_cq_exit2;
197 }
198
199 rc = ipz_queue_ctor(NULL, &my_cq->ipz_queue, param.act_pages,
200 EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0, 0);
201 if (!rc) {
202 ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%i device=%p",
203 rc, device);
204 cq = ERR_PTR(-EINVAL);
205 goto create_cq_exit3;
206 }
207
208 for (counter = 0; counter < param.act_pages; counter++) {
209 vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
210 if (!vpage) {
211 ehca_err(device, "ipz_qpageit_get_inc() "
212 "returns NULL device=%p", device);
213 cq = ERR_PTR(-EAGAIN);
214 goto create_cq_exit4;
215 }
216 rpage = __pa(vpage);
217
218 h_ret = hipz_h_register_rpage_cq(adapter_handle,
219 my_cq->ipz_cq_handle,
220 &my_cq->pf,
221 0,
222 0,
223 rpage,
224 1,
225 my_cq->galpas.
226 kernel);
227
228 if (h_ret < H_SUCCESS) {
229 ehca_err(device, "hipz_h_register_rpage_cq() failed "
230 "ehca_cq=%p cq_num=%x h_ret=%lli counter=%i "
231 "act_pages=%i", my_cq, my_cq->cq_number,
232 h_ret, counter, param.act_pages);
233 cq = ERR_PTR(-EINVAL);
234 goto create_cq_exit4;
235 }
236
237 if (counter == (param.act_pages - 1)) {
238 vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
239 if ((h_ret != H_SUCCESS) || vpage) {
240 ehca_err(device, "Registration of pages not "
241 "complete ehca_cq=%p cq_num=%x "
242 "h_ret=%lli", my_cq, my_cq->cq_number,
243 h_ret);
244 cq = ERR_PTR(-EAGAIN);
245 goto create_cq_exit4;
246 }
247 } else {
248 if (h_ret != H_PAGE_REGISTERED) {
249 ehca_err(device, "Registration of page failed "
250 "ehca_cq=%p cq_num=%x h_ret=%lli "
251 "counter=%i act_pages=%i",
252 my_cq, my_cq->cq_number,
253 h_ret, counter, param.act_pages);
254 cq = ERR_PTR(-ENOMEM);
255 goto create_cq_exit4;
256 }
257 }
258 }
259
260 ipz_qeit_reset(&my_cq->ipz_queue);
261
262 gal = my_cq->galpas.kernel;
263 cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
264 ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx",
265 my_cq, my_cq->cq_number, cqx_fec);
266
267 my_cq->ib_cq.cqe = my_cq->nr_of_entries =
268 param.act_nr_of_entries - additional_cqe;
269 my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff;
270
271 for (i = 0; i < QP_HASHTAB_LEN; i++)
272 INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
273
274 INIT_LIST_HEAD(&my_cq->sqp_err_list);
275 INIT_LIST_HEAD(&my_cq->rqp_err_list);
276
277 if (context) {
278 struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
279 struct ehca_create_cq_resp resp;
280 memset(&resp, 0, sizeof(resp));
281 resp.cq_number = my_cq->cq_number;
282 resp.token = my_cq->token;
283 resp.ipz_queue.qe_size = ipz_queue->qe_size;
284 resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg;
285 resp.ipz_queue.queue_length = ipz_queue->queue_length;
286 resp.ipz_queue.pagesize = ipz_queue->pagesize;
287 resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
288 resp.fw_handle_ofs = (u32)
289 (my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1));
290 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
291 ehca_err(device, "Copy to udata failed.");
292 cq = ERR_PTR(-EFAULT);
293 goto create_cq_exit4;
294 }
295 }
296
297 return cq;
298
299create_cq_exit4:
300 ipz_queue_dtor(NULL, &my_cq->ipz_queue);
301
302create_cq_exit3:
303 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
304 if (h_ret != H_SUCCESS)
305 ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
306 "cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret);
307
308create_cq_exit2:
309 write_lock_irqsave(&ehca_cq_idr_lock, flags);
310 idr_remove(&ehca_cq_idr, my_cq->token);
311 write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
312
313create_cq_exit1:
314 kmem_cache_free(cq_cache, my_cq);
315
316 atomic_dec(&shca->num_cqs);
317 return cq;
318}
319
320int ehca_destroy_cq(struct ib_cq *cq)
321{
322 u64 h_ret;
323 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
324 int cq_num = my_cq->cq_number;
325 struct ib_device *device = cq->device;
326 struct ehca_shca *shca = container_of(device, struct ehca_shca,
327 ib_device);
328 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
329 unsigned long flags;
330
331 if (cq->uobject) {
332 if (my_cq->mm_count_galpa || my_cq->mm_count_queue) {
333 ehca_err(device, "Resources still referenced in "
334 "user space cq_num=%x", my_cq->cq_number);
335 return -EINVAL;
336 }
337 }
338
339 /*
340 * remove the CQ from the idr first to make sure
341 * no more interrupt tasklets will touch this CQ
342 */
343 write_lock_irqsave(&ehca_cq_idr_lock, flags);
344 idr_remove(&ehca_cq_idr, my_cq->token);
345 write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
346
347 /* now wait until all pending events have completed */
348 wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events));
349
350 /* nobody's using our CQ any longer -- we can destroy it */
351 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
352 if (h_ret == H_R_STATE) {
353 /* cq in err: read err data and destroy it forcibly */
354 ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err "
355 "state. Try to delete it forcibly.",
356 my_cq, cq_num, my_cq->ipz_cq_handle.handle);
357 ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
358 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
359 if (h_ret == H_SUCCESS)
360 ehca_dbg(device, "cq_num=%x deleted successfully.",
361 cq_num);
362 }
363 if (h_ret != H_SUCCESS) {
364 ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli "
365 "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
366 return ehca2ib_return_code(h_ret);
367 }
368 ipz_queue_dtor(NULL, &my_cq->ipz_queue);
369 kmem_cache_free(cq_cache, my_cq);
370
371 atomic_dec(&shca->num_cqs);
372 return 0;
373}
374
375int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
376{
377 /* TODO: proper resize needs to be done */
378 ehca_err(cq->device, "not implemented yet");
379
380 return -EFAULT;
381}
382
383int ehca_init_cq_cache(void)
384{
385 cq_cache = kmem_cache_create("ehca_cache_cq",
386 sizeof(struct ehca_cq), 0,
387 SLAB_HWCACHE_ALIGN,
388 NULL);
389 if (!cq_cache)
390 return -ENOMEM;
391 return 0;
392}
393
394void ehca_cleanup_cq_cache(void)
395{
396 kmem_cache_destroy(cq_cache);
397}
diff --git a/drivers/staging/rdma/ehca/ehca_eq.c b/drivers/staging/rdma/ehca/ehca_eq.c
deleted file mode 100644
index 90da6747d395..000000000000
--- a/drivers/staging/rdma/ehca/ehca_eq.c
+++ /dev/null
@@ -1,189 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Event queue handling
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 * Heiko J Schick <schickhj@de.ibm.com>
10 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
11 *
12 *
13 * Copyright (c) 2005 IBM Corporation
14 *
15 * All rights reserved.
16 *
17 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
18 * BSD.
19 *
20 * OpenIB BSD License
21 *
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are met:
24 *
25 * Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following disclaimer.
27 *
28 * Redistributions in binary form must reproduce the above copyright notice,
29 * this list of conditions and the following disclaimer in the documentation
30 * and/or other materials
31 * provided with the distribution.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
44 */
45
46#include "ehca_classes.h"
47#include "ehca_irq.h"
48#include "ehca_iverbs.h"
49#include "ehca_qes.h"
50#include "hcp_if.h"
51#include "ipz_pt_fn.h"
52
53int ehca_create_eq(struct ehca_shca *shca,
54 struct ehca_eq *eq,
55 const enum ehca_eq_type type, const u32 length)
56{
57 int ret;
58 u64 h_ret;
59 u32 nr_pages;
60 u32 i;
61 void *vpage;
62 struct ib_device *ib_dev = &shca->ib_device;
63
64 spin_lock_init(&eq->spinlock);
65 spin_lock_init(&eq->irq_spinlock);
66 eq->is_initialized = 0;
67
68 if (type != EHCA_EQ && type != EHCA_NEQ) {
69 ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq);
70 return -EINVAL;
71 }
72 if (!length) {
73 ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq);
74 return -EINVAL;
75 }
76
77 h_ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle,
78 &eq->pf,
79 type,
80 length,
81 &eq->ipz_eq_handle,
82 &eq->length,
83 &nr_pages, &eq->ist);
84
85 if (h_ret != H_SUCCESS) {
86 ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq);
87 return -EINVAL;
88 }
89
90 ret = ipz_queue_ctor(NULL, &eq->ipz_queue, nr_pages,
91 EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0, 0);
92 if (!ret) {
93 ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq);
94 goto create_eq_exit1;
95 }
96
97 for (i = 0; i < nr_pages; i++) {
98 u64 rpage;
99
100 vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
101 if (!vpage)
102 goto create_eq_exit2;
103
104 rpage = __pa(vpage);
105 h_ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle,
106 eq->ipz_eq_handle,
107 &eq->pf,
108 0, 0, rpage, 1);
109
110 if (i == (nr_pages - 1)) {
111 /* last page */
112 vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
113 if (h_ret != H_SUCCESS || vpage)
114 goto create_eq_exit2;
115 } else {
116 if (h_ret != H_PAGE_REGISTERED)
117 goto create_eq_exit2;
118 }
119 }
120
121 ipz_qeit_reset(&eq->ipz_queue);
122
123 /* register interrupt handlers and initialize work queues */
124 if (type == EHCA_EQ) {
125 tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
126
127 ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq,
128 0, "ehca_eq",
129 (void *)shca);
130 if (ret < 0)
131 ehca_err(ib_dev, "Can't map interrupt handler.");
132 } else if (type == EHCA_NEQ) {
133 tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
134
135 ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq,
136 0, "ehca_neq",
137 (void *)shca);
138 if (ret < 0)
139 ehca_err(ib_dev, "Can't map interrupt handler.");
140 }
141
142 eq->is_initialized = 1;
143
144 return 0;
145
146create_eq_exit2:
147 ipz_queue_dtor(NULL, &eq->ipz_queue);
148
149create_eq_exit1:
150 hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
151
152 return -EINVAL;
153}
154
155void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq)
156{
157 unsigned long flags;
158 void *eqe;
159
160 spin_lock_irqsave(&eq->spinlock, flags);
161 eqe = ipz_eqit_eq_get_inc_valid(&eq->ipz_queue);
162 spin_unlock_irqrestore(&eq->spinlock, flags);
163
164 return eqe;
165}
166
167int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq)
168{
169 unsigned long flags;
170 u64 h_ret;
171
172 ibmebus_free_irq(eq->ist, (void *)shca);
173
174 spin_lock_irqsave(&shca_list_lock, flags);
175 eq->is_initialized = 0;
176 spin_unlock_irqrestore(&shca_list_lock, flags);
177
178 tasklet_kill(&eq->interrupt_task);
179
180 h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
181
182 if (h_ret != H_SUCCESS) {
183 ehca_err(&shca->ib_device, "Can't free EQ resources.");
184 return -EINVAL;
185 }
186 ipz_queue_dtor(NULL, &eq->ipz_queue);
187
188 return 0;
189}
diff --git a/drivers/staging/rdma/ehca/ehca_hca.c b/drivers/staging/rdma/ehca/ehca_hca.c
deleted file mode 100644
index e8b1bb65797a..000000000000
--- a/drivers/staging/rdma/ehca/ehca_hca.c
+++ /dev/null
@@ -1,414 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * HCA query functions
5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include <linux/gfp.h>
43
44#include "ehca_tools.h"
45#include "ehca_iverbs.h"
46#include "hcp_if.h"
47
48static unsigned int limit_uint(unsigned int value)
49{
50 return min_t(unsigned int, value, INT_MAX);
51}
52
53int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
54 struct ib_udata *uhw)
55{
56 int i, ret = 0;
57 struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
58 ib_device);
59 struct hipz_query_hca *rblock;
60
61 static const u32 cap_mapping[] = {
62 IB_DEVICE_RESIZE_MAX_WR, HCA_CAP_WQE_RESIZE,
63 IB_DEVICE_BAD_PKEY_CNTR, HCA_CAP_BAD_P_KEY_CTR,
64 IB_DEVICE_BAD_QKEY_CNTR, HCA_CAP_Q_KEY_VIOL_CTR,
65 IB_DEVICE_RAW_MULTI, HCA_CAP_RAW_PACKET_MCAST,
66 IB_DEVICE_AUTO_PATH_MIG, HCA_CAP_AUTO_PATH_MIG,
67 IB_DEVICE_CHANGE_PHY_PORT, HCA_CAP_SQD_RTS_PORT_CHANGE,
68 IB_DEVICE_UD_AV_PORT_ENFORCE, HCA_CAP_AH_PORT_NR_CHECK,
69 IB_DEVICE_CURR_QP_STATE_MOD, HCA_CAP_CUR_QP_STATE_MOD,
70 IB_DEVICE_SHUTDOWN_PORT, HCA_CAP_SHUTDOWN_PORT,
71 IB_DEVICE_INIT_TYPE, HCA_CAP_INIT_TYPE,
72 IB_DEVICE_PORT_ACTIVE_EVENT, HCA_CAP_PORT_ACTIVE_EVENT,
73 };
74
75 if (uhw->inlen || uhw->outlen)
76 return -EINVAL;
77
78 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
79 if (!rblock) {
80 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
81 return -ENOMEM;
82 }
83
84 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
85 ehca_err(&shca->ib_device, "Can't query device properties");
86 ret = -EINVAL;
87 goto query_device1;
88 }
89
90 memset(props, 0, sizeof(struct ib_device_attr));
91 props->page_size_cap = shca->hca_cap_mr_pgsize;
92 props->fw_ver = rblock->hw_ver;
93 props->max_mr_size = rblock->max_mr_size;
94 props->vendor_id = rblock->vendor_id >> 8;
95 props->vendor_part_id = rblock->vendor_part_id >> 16;
96 props->hw_ver = rblock->hw_ver;
97 props->max_qp = limit_uint(rblock->max_qp);
98 props->max_qp_wr = limit_uint(rblock->max_wqes_wq);
99 props->max_sge = limit_uint(rblock->max_sge);
100 props->max_sge_rd = limit_uint(rblock->max_sge_rd);
101 props->max_cq = limit_uint(rblock->max_cq);
102 props->max_cqe = limit_uint(rblock->max_cqe);
103 props->max_mr = limit_uint(rblock->max_mr);
104 props->max_mw = limit_uint(rblock->max_mw);
105 props->max_pd = limit_uint(rblock->max_pd);
106 props->max_ah = limit_uint(rblock->max_ah);
107 props->max_ee = limit_uint(rblock->max_rd_ee_context);
108 props->max_rdd = limit_uint(rblock->max_rd_domain);
109 props->max_fmr = limit_uint(rblock->max_mr);
110 props->max_qp_rd_atom = limit_uint(rblock->max_rr_qp);
111 props->max_ee_rd_atom = limit_uint(rblock->max_rr_ee_context);
112 props->max_res_rd_atom = limit_uint(rblock->max_rr_hca);
113 props->max_qp_init_rd_atom = limit_uint(rblock->max_act_wqs_qp);
114 props->max_ee_init_rd_atom = limit_uint(rblock->max_act_wqs_ee_context);
115
116 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
117 props->max_srq = limit_uint(props->max_qp);
118 props->max_srq_wr = limit_uint(props->max_qp_wr);
119 props->max_srq_sge = 3;
120 }
121
122 props->max_pkeys = 16;
123 /* Some FW versions say 0 here; insert sensible value in that case */
124 props->local_ca_ack_delay = rblock->local_ca_ack_delay ?
125 min_t(u8, rblock->local_ca_ack_delay, 255) : 12;
126 props->max_raw_ipv6_qp = limit_uint(rblock->max_raw_ipv6_qp);
127 props->max_raw_ethy_qp = limit_uint(rblock->max_raw_ethy_qp);
128 props->max_mcast_grp = limit_uint(rblock->max_mcast_grp);
129 props->max_mcast_qp_attach = limit_uint(rblock->max_mcast_qp_attach);
130 props->max_total_mcast_qp_attach
131 = limit_uint(rblock->max_total_mcast_qp_attach);
132
133 /* translate device capabilities */
134 props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
135 IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_N_NOTIFY_CQ;
136 for (i = 0; i < ARRAY_SIZE(cap_mapping); i += 2)
137 if (rblock->hca_cap_indicators & cap_mapping[i + 1])
138 props->device_cap_flags |= cap_mapping[i];
139
140query_device1:
141 ehca_free_fw_ctrlblock(rblock);
142
143 return ret;
144}
145
146static enum ib_mtu map_mtu(struct ehca_shca *shca, u32 fw_mtu)
147{
148 switch (fw_mtu) {
149 case 0x1:
150 return IB_MTU_256;
151 case 0x2:
152 return IB_MTU_512;
153 case 0x3:
154 return IB_MTU_1024;
155 case 0x4:
156 return IB_MTU_2048;
157 case 0x5:
158 return IB_MTU_4096;
159 default:
160 ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
161 fw_mtu);
162 return 0;
163 }
164}
165
166static u8 map_number_of_vls(struct ehca_shca *shca, u32 vl_cap)
167{
168 switch (vl_cap) {
169 case 0x1:
170 return 1;
171 case 0x2:
172 return 2;
173 case 0x3:
174 return 4;
175 case 0x4:
176 return 8;
177 case 0x5:
178 return 15;
179 default:
180 ehca_err(&shca->ib_device, "invalid Vl Capability: %x.",
181 vl_cap);
182 return 0;
183 }
184}
185
186int ehca_query_port(struct ib_device *ibdev,
187 u8 port, struct ib_port_attr *props)
188{
189 int ret = 0;
190 u64 h_ret;
191 struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
192 ib_device);
193 struct hipz_query_port *rblock;
194
195 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
196 if (!rblock) {
197 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
198 return -ENOMEM;
199 }
200
201 h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
202 if (h_ret != H_SUCCESS) {
203 ehca_err(&shca->ib_device, "Can't query port properties");
204 ret = -EINVAL;
205 goto query_port1;
206 }
207
208 memset(props, 0, sizeof(struct ib_port_attr));
209
210 props->active_mtu = props->max_mtu = map_mtu(shca, rblock->max_mtu);
211 props->port_cap_flags = rblock->capability_mask;
212 props->gid_tbl_len = rblock->gid_tbl_len;
213 if (rblock->max_msg_sz)
214 props->max_msg_sz = rblock->max_msg_sz;
215 else
216 props->max_msg_sz = 0x1 << 31;
217 props->bad_pkey_cntr = rblock->bad_pkey_cntr;
218 props->qkey_viol_cntr = rblock->qkey_viol_cntr;
219 props->pkey_tbl_len = rblock->pkey_tbl_len;
220 props->lid = rblock->lid;
221 props->sm_lid = rblock->sm_lid;
222 props->lmc = rblock->lmc;
223 props->sm_sl = rblock->sm_sl;
224 props->subnet_timeout = rblock->subnet_timeout;
225 props->init_type_reply = rblock->init_type_reply;
226 props->max_vl_num = map_number_of_vls(shca, rblock->vl_cap);
227
228 if (rblock->state && rblock->phys_width) {
229 props->phys_state = rblock->phys_pstate;
230 props->state = rblock->phys_state;
231 props->active_width = rblock->phys_width;
232 props->active_speed = rblock->phys_speed;
233 } else {
234 /* old firmware releases don't report physical
235 * port info, so use default values
236 */
237 props->phys_state = 5;
238 props->state = rblock->state;
239 props->active_width = IB_WIDTH_12X;
240 props->active_speed = IB_SPEED_SDR;
241 }
242
243query_port1:
244 ehca_free_fw_ctrlblock(rblock);
245
246 return ret;
247}
248
249int ehca_query_sma_attr(struct ehca_shca *shca,
250 u8 port, struct ehca_sma_attr *attr)
251{
252 int ret = 0;
253 u64 h_ret;
254 struct hipz_query_port *rblock;
255
256 rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
257 if (!rblock) {
258 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
259 return -ENOMEM;
260 }
261
262 h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
263 if (h_ret != H_SUCCESS) {
264 ehca_err(&shca->ib_device, "Can't query port properties");
265 ret = -EINVAL;
266 goto query_sma_attr1;
267 }
268
269 memset(attr, 0, sizeof(struct ehca_sma_attr));
270
271 attr->lid = rblock->lid;
272 attr->lmc = rblock->lmc;
273 attr->sm_sl = rblock->sm_sl;
274 attr->sm_lid = rblock->sm_lid;
275
276 attr->pkey_tbl_len = rblock->pkey_tbl_len;
277 memcpy(attr->pkeys, rblock->pkey_entries, sizeof(attr->pkeys));
278
279query_sma_attr1:
280 ehca_free_fw_ctrlblock(rblock);
281
282 return ret;
283}
284
285int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
286{
287 int ret = 0;
288 u64 h_ret;
289 struct ehca_shca *shca;
290 struct hipz_query_port *rblock;
291
292 shca = container_of(ibdev, struct ehca_shca, ib_device);
293 if (index > 16) {
294 ehca_err(&shca->ib_device, "Invalid index: %x.", index);
295 return -EINVAL;
296 }
297
298 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
299 if (!rblock) {
300 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
301 return -ENOMEM;
302 }
303
304 h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
305 if (h_ret != H_SUCCESS) {
306 ehca_err(&shca->ib_device, "Can't query port properties");
307 ret = -EINVAL;
308 goto query_pkey1;
309 }
310
311 memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16));
312
313query_pkey1:
314 ehca_free_fw_ctrlblock(rblock);
315
316 return ret;
317}
318
319int ehca_query_gid(struct ib_device *ibdev, u8 port,
320 int index, union ib_gid *gid)
321{
322 int ret = 0;
323 u64 h_ret;
324 struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
325 ib_device);
326 struct hipz_query_port *rblock;
327
328 if (index < 0 || index > 255) {
329 ehca_err(&shca->ib_device, "Invalid index: %x.", index);
330 return -EINVAL;
331 }
332
333 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
334 if (!rblock) {
335 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
336 return -ENOMEM;
337 }
338
339 h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
340 if (h_ret != H_SUCCESS) {
341 ehca_err(&shca->ib_device, "Can't query port properties");
342 ret = -EINVAL;
343 goto query_gid1;
344 }
345
346 memcpy(&gid->raw[0], &rblock->gid_prefix, sizeof(u64));
347 memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64));
348
349query_gid1:
350 ehca_free_fw_ctrlblock(rblock);
351
352 return ret;
353}
354
355static const u32 allowed_port_caps = (
356 IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP |
357 IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP |
358 IB_PORT_VENDOR_CLASS_SUP);
359
360int ehca_modify_port(struct ib_device *ibdev,
361 u8 port, int port_modify_mask,
362 struct ib_port_modify *props)
363{
364 int ret = 0;
365 struct ehca_shca *shca;
366 struct hipz_query_port *rblock;
367 u32 cap;
368 u64 hret;
369
370 shca = container_of(ibdev, struct ehca_shca, ib_device);
371 if ((props->set_port_cap_mask | props->clr_port_cap_mask)
372 & ~allowed_port_caps) {
373 ehca_err(&shca->ib_device, "Non-changeable bits set in masks "
374 "set=%x clr=%x allowed=%x", props->set_port_cap_mask,
375 props->clr_port_cap_mask, allowed_port_caps);
376 return -EINVAL;
377 }
378
379 if (mutex_lock_interruptible(&shca->modify_mutex))
380 return -ERESTARTSYS;
381
382 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
383 if (!rblock) {
384 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
385 ret = -ENOMEM;
386 goto modify_port1;
387 }
388
389 hret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
390 if (hret != H_SUCCESS) {
391 ehca_err(&shca->ib_device, "Can't query port properties");
392 ret = -EINVAL;
393 goto modify_port2;
394 }
395
396 cap = (rblock->capability_mask | props->set_port_cap_mask)
397 & ~props->clr_port_cap_mask;
398
399 hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
400 cap, props->init_type, port_modify_mask);
401 if (hret != H_SUCCESS) {
402 ehca_err(&shca->ib_device, "Modify port failed h_ret=%lli",
403 hret);
404 ret = -EINVAL;
405 }
406
407modify_port2:
408 ehca_free_fw_ctrlblock(rblock);
409
410modify_port1:
411 mutex_unlock(&shca->modify_mutex);
412
413 return ret;
414}
diff --git a/drivers/staging/rdma/ehca/ehca_irq.c b/drivers/staging/rdma/ehca/ehca_irq.c
deleted file mode 100644
index 8615d7cf7e01..000000000000
--- a/drivers/staging/rdma/ehca/ehca_irq.c
+++ /dev/null
@@ -1,870 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Functions for EQs, NEQs and interrupts
5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com>
8 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
9 * Joachim Fenkes <fenkes@de.ibm.com>
10 *
11 * Copyright (c) 2005 IBM Corporation
12 *
13 * All rights reserved.
14 *
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
17 *
18 * OpenIB BSD License
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
22 *
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
25 *
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44#include <linux/slab.h>
45#include <linux/smpboot.h>
46
47#include "ehca_classes.h"
48#include "ehca_irq.h"
49#include "ehca_iverbs.h"
50#include "ehca_tools.h"
51#include "hcp_if.h"
52#include "hipz_fns.h"
53#include "ipz_pt_fn.h"
54
55#define EQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
56#define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
57#define EQE_EE_IDENTIFIER EHCA_BMASK_IBM( 2, 7)
58#define EQE_CQ_NUMBER EHCA_BMASK_IBM( 8, 31)
59#define EQE_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
60#define EQE_QP_TOKEN EHCA_BMASK_IBM(32, 63)
61#define EQE_CQ_TOKEN EHCA_BMASK_IBM(32, 63)
62
63#define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
64#define NEQE_EVENT_CODE EHCA_BMASK_IBM( 2, 7)
65#define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15)
66#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
67#define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16)
68#define NEQE_SPECIFIC_EVENT EHCA_BMASK_IBM(16, 23)
69
70#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63)
71#define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7)
72
73static void queue_comp_task(struct ehca_cq *__cq);
74
75static struct ehca_comp_pool *pool;
76
77static inline void comp_event_callback(struct ehca_cq *cq)
78{
79 if (!cq->ib_cq.comp_handler)
80 return;
81
82 spin_lock(&cq->cb_lock);
83 cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context);
84 spin_unlock(&cq->cb_lock);
85
86 return;
87}
88
89static void print_error_data(struct ehca_shca *shca, void *data,
90 u64 *rblock, int length)
91{
92 u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
93 u64 resource = rblock[1];
94
95 switch (type) {
96 case 0x1: /* Queue Pair */
97 {
98 struct ehca_qp *qp = (struct ehca_qp *)data;
99
100 /* only print error data if AER is set */
101 if (rblock[6] == 0)
102 return;
103
104 ehca_err(&shca->ib_device,
105 "QP 0x%x (resource=%llx) has errors.",
106 qp->ib_qp.qp_num, resource);
107 break;
108 }
109 case 0x4: /* Completion Queue */
110 {
111 struct ehca_cq *cq = (struct ehca_cq *)data;
112
113 ehca_err(&shca->ib_device,
114 "CQ 0x%x (resource=%llx) has errors.",
115 cq->cq_number, resource);
116 break;
117 }
118 default:
119 ehca_err(&shca->ib_device,
120 "Unknown error type: %llx on %s.",
121 type, shca->ib_device.name);
122 break;
123 }
124
125 ehca_err(&shca->ib_device, "Error data is available: %llx.", resource);
126 ehca_err(&shca->ib_device, "EHCA ----- error data begin "
127 "---------------------------------------------------");
128 ehca_dmp(rblock, length, "resource=%llx", resource);
129 ehca_err(&shca->ib_device, "EHCA ----- error data end "
130 "----------------------------------------------------");
131
132 return;
133}
134
135int ehca_error_data(struct ehca_shca *shca, void *data,
136 u64 resource)
137{
138
139 unsigned long ret;
140 u64 *rblock;
141 unsigned long block_count;
142
143 rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
144 if (!rblock) {
145 ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
146 ret = -ENOMEM;
147 goto error_data1;
148 }
149
150 /* rblock must be 4K aligned and should be 4K large */
151 ret = hipz_h_error_data(shca->ipz_hca_handle,
152 resource,
153 rblock,
154 &block_count);
155
156 if (ret == H_R_STATE)
157 ehca_err(&shca->ib_device,
158 "No error data is available: %llx.", resource);
159 else if (ret == H_SUCCESS) {
160 int length;
161
162 length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]);
163
164 if (length > EHCA_PAGESIZE)
165 length = EHCA_PAGESIZE;
166
167 print_error_data(shca, data, rblock, length);
168 } else
169 ehca_err(&shca->ib_device,
170 "Error data could not be fetched: %llx", resource);
171
172 ehca_free_fw_ctrlblock(rblock);
173
174error_data1:
175 return ret;
176
177}
178
179static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp,
180 enum ib_event_type event_type)
181{
182 struct ib_event event;
183
184 /* PATH_MIG without the QP ever having been armed is false alarm */
185 if (event_type == IB_EVENT_PATH_MIG && !qp->mig_armed)
186 return;
187
188 event.device = &shca->ib_device;
189 event.event = event_type;
190
191 if (qp->ext_type == EQPT_SRQ) {
192 if (!qp->ib_srq.event_handler)
193 return;
194
195 event.element.srq = &qp->ib_srq;
196 qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context);
197 } else {
198 if (!qp->ib_qp.event_handler)
199 return;
200
201 event.element.qp = &qp->ib_qp;
202 qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
203 }
204}
205
206static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
207 enum ib_event_type event_type, int fatal)
208{
209 struct ehca_qp *qp;
210 u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
211
212 read_lock(&ehca_qp_idr_lock);
213 qp = idr_find(&ehca_qp_idr, token);
214 if (qp)
215 atomic_inc(&qp->nr_events);
216 read_unlock(&ehca_qp_idr_lock);
217
218 if (!qp)
219 return;
220
221 if (fatal)
222 ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
223
224 dispatch_qp_event(shca, qp, fatal && qp->ext_type == EQPT_SRQ ?
225 IB_EVENT_SRQ_ERR : event_type);
226
227 /*
228 * eHCA only processes one WQE at a time for SRQ base QPs,
229 * so the last WQE has been processed as soon as the QP enters
230 * error state.
231 */
232 if (fatal && qp->ext_type == EQPT_SRQBASE)
233 dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED);
234
235 if (atomic_dec_and_test(&qp->nr_events))
236 wake_up(&qp->wait_completion);
237 return;
238}
239
240static void cq_event_callback(struct ehca_shca *shca,
241 u64 eqe)
242{
243 struct ehca_cq *cq;
244 u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
245
246 read_lock(&ehca_cq_idr_lock);
247 cq = idr_find(&ehca_cq_idr, token);
248 if (cq)
249 atomic_inc(&cq->nr_events);
250 read_unlock(&ehca_cq_idr_lock);
251
252 if (!cq)
253 return;
254
255 ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
256
257 if (atomic_dec_and_test(&cq->nr_events))
258 wake_up(&cq->wait_completion);
259
260 return;
261}
262
263static void parse_identifier(struct ehca_shca *shca, u64 eqe)
264{
265 u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);
266
267 switch (identifier) {
268 case 0x02: /* path migrated */
269 qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG, 0);
270 break;
271 case 0x03: /* communication established */
272 qp_event_callback(shca, eqe, IB_EVENT_COMM_EST, 0);
273 break;
274 case 0x04: /* send queue drained */
275 qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED, 0);
276 break;
277 case 0x05: /* QP error */
278 case 0x06: /* QP error */
279 qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL, 1);
280 break;
281 case 0x07: /* CQ error */
282 case 0x08: /* CQ error */
283 cq_event_callback(shca, eqe);
284 break;
285 case 0x09: /* MRMWPTE error */
286 ehca_err(&shca->ib_device, "MRMWPTE error.");
287 break;
288 case 0x0A: /* port event */
289 ehca_err(&shca->ib_device, "Port event.");
290 break;
291 case 0x0B: /* MR access error */
292 ehca_err(&shca->ib_device, "MR access error.");
293 break;
294 case 0x0C: /* EQ error */
295 ehca_err(&shca->ib_device, "EQ error.");
296 break;
297 case 0x0D: /* P/Q_Key mismatch */
298 ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
299 break;
300 case 0x10: /* sampling complete */
301 ehca_err(&shca->ib_device, "Sampling complete.");
302 break;
303 case 0x11: /* unaffiliated access error */
304 ehca_err(&shca->ib_device, "Unaffiliated access error.");
305 break;
306 case 0x12: /* path migrating */
307 ehca_err(&shca->ib_device, "Path migrating.");
308 break;
309 case 0x13: /* interface trace stopped */
310 ehca_err(&shca->ib_device, "Interface trace stopped.");
311 break;
312 case 0x14: /* first error capture info available */
313 ehca_info(&shca->ib_device, "First error capture available");
314 break;
315 case 0x15: /* SRQ limit reached */
316 qp_event_callback(shca, eqe, IB_EVENT_SRQ_LIMIT_REACHED, 0);
317 break;
318 default:
319 ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
320 identifier, shca->ib_device.name);
321 break;
322 }
323
324 return;
325}
326
327static void dispatch_port_event(struct ehca_shca *shca, int port_num,
328 enum ib_event_type type, const char *msg)
329{
330 struct ib_event event;
331
332 ehca_info(&shca->ib_device, "port %d %s.", port_num, msg);
333 event.device = &shca->ib_device;
334 event.event = type;
335 event.element.port_num = port_num;
336 ib_dispatch_event(&event);
337}
338
339static void notify_port_conf_change(struct ehca_shca *shca, int port_num)
340{
341 struct ehca_sma_attr new_attr;
342 struct ehca_sma_attr *old_attr = &shca->sport[port_num - 1].saved_attr;
343
344 ehca_query_sma_attr(shca, port_num, &new_attr);
345
346 if (new_attr.sm_sl != old_attr->sm_sl ||
347 new_attr.sm_lid != old_attr->sm_lid)
348 dispatch_port_event(shca, port_num, IB_EVENT_SM_CHANGE,
349 "SM changed");
350
351 if (new_attr.lid != old_attr->lid ||
352 new_attr.lmc != old_attr->lmc)
353 dispatch_port_event(shca, port_num, IB_EVENT_LID_CHANGE,
354 "LID changed");
355
356 if (new_attr.pkey_tbl_len != old_attr->pkey_tbl_len ||
357 memcmp(new_attr.pkeys, old_attr->pkeys,
358 sizeof(u16) * new_attr.pkey_tbl_len))
359 dispatch_port_event(shca, port_num, IB_EVENT_PKEY_CHANGE,
360 "P_Key changed");
361
362 *old_attr = new_attr;
363}
364
365/* replay modify_qp for sqps -- return 0 if all is well, 1 if AQP1 destroyed */
366static int replay_modify_qp(struct ehca_sport *sport)
367{
368 int aqp1_destroyed;
369 unsigned long flags;
370
371 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
372
373 aqp1_destroyed = !sport->ibqp_sqp[IB_QPT_GSI];
374
375 if (sport->ibqp_sqp[IB_QPT_SMI])
376 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
377 if (!aqp1_destroyed)
378 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
379
380 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
381
382 return aqp1_destroyed;
383}
384
385static void parse_ec(struct ehca_shca *shca, u64 eqe)
386{
387 u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
388 u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
389 u8 spec_event;
390 struct ehca_sport *sport = &shca->sport[port - 1];
391
392 switch (ec) {
393 case 0x30: /* port availability change */
394 if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
395 /* only replay modify_qp calls in autodetect mode;
396 * if AQP1 was destroyed, the port is already down
397 * again and we can drop the event.
398 */
399 if (ehca_nr_ports < 0)
400 if (replay_modify_qp(sport))
401 break;
402
403 sport->port_state = IB_PORT_ACTIVE;
404 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
405 "is active");
406 ehca_query_sma_attr(shca, port, &sport->saved_attr);
407 } else {
408 sport->port_state = IB_PORT_DOWN;
409 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
410 "is inactive");
411 }
412 break;
413 case 0x31:
414 /* port configuration change
415 * disruptive change is caused by
416 * LID, PKEY or SM change
417 */
418 if (EHCA_BMASK_GET(NEQE_DISRUPTIVE, eqe)) {
419 ehca_warn(&shca->ib_device, "disruptive port "
420 "%d configuration change", port);
421
422 sport->port_state = IB_PORT_DOWN;
423 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
424 "is inactive");
425
426 sport->port_state = IB_PORT_ACTIVE;
427 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
428 "is active");
429 ehca_query_sma_attr(shca, port,
430 &sport->saved_attr);
431 } else
432 notify_port_conf_change(shca, port);
433 break;
434 case 0x32: /* adapter malfunction */
435 ehca_err(&shca->ib_device, "Adapter malfunction.");
436 break;
437 case 0x33: /* trace stopped */
438 ehca_err(&shca->ib_device, "Traced stopped.");
439 break;
440 case 0x34: /* util async event */
441 spec_event = EHCA_BMASK_GET(NEQE_SPECIFIC_EVENT, eqe);
442 if (spec_event == 0x80) /* client reregister required */
443 dispatch_port_event(shca, port,
444 IB_EVENT_CLIENT_REREGISTER,
445 "client reregister req.");
446 else
447 ehca_warn(&shca->ib_device, "Unknown util async "
448 "event %x on port %x", spec_event, port);
449 break;
450 default:
451 ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
452 ec, shca->ib_device.name);
453 break;
454 }
455
456 return;
457}
458
459static inline void reset_eq_pending(struct ehca_cq *cq)
460{
461 u64 CQx_EP;
462 struct h_galpa gal = cq->galpas.kernel;
463
464 hipz_galpa_store_cq(gal, cqx_ep, 0x0);
465 CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep));
466
467 return;
468}
469
470irqreturn_t ehca_interrupt_neq(int irq, void *dev_id)
471{
472 struct ehca_shca *shca = (struct ehca_shca*)dev_id;
473
474 tasklet_hi_schedule(&shca->neq.interrupt_task);
475
476 return IRQ_HANDLED;
477}
478
479void ehca_tasklet_neq(unsigned long data)
480{
481 struct ehca_shca *shca = (struct ehca_shca*)data;
482 struct ehca_eqe *eqe;
483 u64 ret;
484
485 eqe = ehca_poll_eq(shca, &shca->neq);
486
487 while (eqe) {
488 if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
489 parse_ec(shca, eqe->entry);
490
491 eqe = ehca_poll_eq(shca, &shca->neq);
492 }
493
494 ret = hipz_h_reset_event(shca->ipz_hca_handle,
495 shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);
496
497 if (ret != H_SUCCESS)
498 ehca_err(&shca->ib_device, "Can't clear notification events.");
499
500 return;
501}
502
503irqreturn_t ehca_interrupt_eq(int irq, void *dev_id)
504{
505 struct ehca_shca *shca = (struct ehca_shca*)dev_id;
506
507 tasklet_hi_schedule(&shca->eq.interrupt_task);
508
509 return IRQ_HANDLED;
510}
511
512
513static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
514{
515 u64 eqe_value;
516 u32 token;
517 struct ehca_cq *cq;
518
519 eqe_value = eqe->entry;
520 ehca_dbg(&shca->ib_device, "eqe_value=%llx", eqe_value);
521 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
522 ehca_dbg(&shca->ib_device, "Got completion event");
523 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
524 read_lock(&ehca_cq_idr_lock);
525 cq = idr_find(&ehca_cq_idr, token);
526 if (cq)
527 atomic_inc(&cq->nr_events);
528 read_unlock(&ehca_cq_idr_lock);
529 if (cq == NULL) {
530 ehca_err(&shca->ib_device,
531 "Invalid eqe for non-existing cq token=%x",
532 token);
533 return;
534 }
535 reset_eq_pending(cq);
536 if (ehca_scaling_code)
537 queue_comp_task(cq);
538 else {
539 comp_event_callback(cq);
540 if (atomic_dec_and_test(&cq->nr_events))
541 wake_up(&cq->wait_completion);
542 }
543 } else {
544 ehca_dbg(&shca->ib_device, "Got non completion event");
545 parse_identifier(shca, eqe_value);
546 }
547}
548
549void ehca_process_eq(struct ehca_shca *shca, int is_irq)
550{
551 struct ehca_eq *eq = &shca->eq;
552 struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
553 u64 eqe_value, ret;
554 int eqe_cnt, i;
555 int eq_empty = 0;
556
557 spin_lock(&eq->irq_spinlock);
558 if (is_irq) {
559 const int max_query_cnt = 100;
560 int query_cnt = 0;
561 int int_state = 1;
562 do {
563 int_state = hipz_h_query_int_state(
564 shca->ipz_hca_handle, eq->ist);
565 query_cnt++;
566 iosync();
567 } while (int_state && query_cnt < max_query_cnt);
568 if (unlikely((query_cnt == max_query_cnt)))
569 ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
570 int_state, query_cnt);
571 }
572
573 /* read out all eqes */
574 eqe_cnt = 0;
575 do {
576 u32 token;
577 eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq);
578 if (!eqe_cache[eqe_cnt].eqe)
579 break;
580 eqe_value = eqe_cache[eqe_cnt].eqe->entry;
581 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
582 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
583 read_lock(&ehca_cq_idr_lock);
584 eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
585 if (eqe_cache[eqe_cnt].cq)
586 atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
587 read_unlock(&ehca_cq_idr_lock);
588 if (!eqe_cache[eqe_cnt].cq) {
589 ehca_err(&shca->ib_device,
590 "Invalid eqe for non-existing cq "
591 "token=%x", token);
592 continue;
593 }
594 } else
595 eqe_cache[eqe_cnt].cq = NULL;
596 eqe_cnt++;
597 } while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
598 if (!eqe_cnt) {
599 if (is_irq)
600 ehca_dbg(&shca->ib_device,
601 "No eqe found for irq event");
602 goto unlock_irq_spinlock;
603 } else if (!is_irq) {
604 ret = hipz_h_eoi(eq->ist);
605 if (ret != H_SUCCESS)
606 ehca_err(&shca->ib_device,
607 "bad return code EOI -rc = %lld\n", ret);
608 ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
609 }
610 if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
611 ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
612 /* enable irq for new packets */
613 for (i = 0; i < eqe_cnt; i++) {
614 if (eq->eqe_cache[i].cq)
615 reset_eq_pending(eq->eqe_cache[i].cq);
616 }
617 /* check eq */
618 spin_lock(&eq->spinlock);
619 eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
620 spin_unlock(&eq->spinlock);
621 /* call completion handler for cached eqes */
622 for (i = 0; i < eqe_cnt; i++)
623 if (eq->eqe_cache[i].cq) {
624 if (ehca_scaling_code)
625 queue_comp_task(eq->eqe_cache[i].cq);
626 else {
627 struct ehca_cq *cq = eq->eqe_cache[i].cq;
628 comp_event_callback(cq);
629 if (atomic_dec_and_test(&cq->nr_events))
630 wake_up(&cq->wait_completion);
631 }
632 } else {
633 ehca_dbg(&shca->ib_device, "Got non completion event");
634 parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
635 }
636 /* poll eq if not empty */
637 if (eq_empty)
638 goto unlock_irq_spinlock;
639 do {
640 struct ehca_eqe *eqe;
641 eqe = ehca_poll_eq(shca, &shca->eq);
642 if (!eqe)
643 break;
644 process_eqe(shca, eqe);
645 } while (1);
646
647unlock_irq_spinlock:
648 spin_unlock(&eq->irq_spinlock);
649}
650
651void ehca_tasklet_eq(unsigned long data)
652{
653 ehca_process_eq((struct ehca_shca*)data, 1);
654}
655
656static int find_next_online_cpu(struct ehca_comp_pool *pool)
657{
658 int cpu;
659 unsigned long flags;
660
661 WARN_ON_ONCE(!in_interrupt());
662 if (ehca_debug_level >= 3)
663 ehca_dmp(cpu_online_mask, cpumask_size(), "");
664
665 spin_lock_irqsave(&pool->last_cpu_lock, flags);
666 do {
667 cpu = cpumask_next(pool->last_cpu, cpu_online_mask);
668 if (cpu >= nr_cpu_ids)
669 cpu = cpumask_first(cpu_online_mask);
670 pool->last_cpu = cpu;
671 } while (!per_cpu_ptr(pool->cpu_comp_tasks, cpu)->active);
672 spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
673
674 return cpu;
675}
676
677static void __queue_comp_task(struct ehca_cq *__cq,
678 struct ehca_cpu_comp_task *cct,
679 struct task_struct *thread)
680{
681 unsigned long flags;
682
683 spin_lock_irqsave(&cct->task_lock, flags);
684 spin_lock(&__cq->task_lock);
685
686 if (__cq->nr_callbacks == 0) {
687 __cq->nr_callbacks++;
688 list_add_tail(&__cq->entry, &cct->cq_list);
689 cct->cq_jobs++;
690 wake_up_process(thread);
691 } else
692 __cq->nr_callbacks++;
693
694 spin_unlock(&__cq->task_lock);
695 spin_unlock_irqrestore(&cct->task_lock, flags);
696}
697
698static void queue_comp_task(struct ehca_cq *__cq)
699{
700 int cpu_id;
701 struct ehca_cpu_comp_task *cct;
702 struct task_struct *thread;
703 int cq_jobs;
704 unsigned long flags;
705
706 cpu_id = find_next_online_cpu(pool);
707 BUG_ON(!cpu_online(cpu_id));
708
709 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
710 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id);
711 BUG_ON(!cct || !thread);
712
713 spin_lock_irqsave(&cct->task_lock, flags);
714 cq_jobs = cct->cq_jobs;
715 spin_unlock_irqrestore(&cct->task_lock, flags);
716 if (cq_jobs > 0) {
717 cpu_id = find_next_online_cpu(pool);
718 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
719 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id);
720 BUG_ON(!cct || !thread);
721 }
722 __queue_comp_task(__cq, cct, thread);
723}
724
725static void run_comp_task(struct ehca_cpu_comp_task *cct)
726{
727 struct ehca_cq *cq;
728
729 while (!list_empty(&cct->cq_list)) {
730 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
731 spin_unlock_irq(&cct->task_lock);
732
733 comp_event_callback(cq);
734 if (atomic_dec_and_test(&cq->nr_events))
735 wake_up(&cq->wait_completion);
736
737 spin_lock_irq(&cct->task_lock);
738 spin_lock(&cq->task_lock);
739 cq->nr_callbacks--;
740 if (!cq->nr_callbacks) {
741 list_del_init(cct->cq_list.next);
742 cct->cq_jobs--;
743 }
744 spin_unlock(&cq->task_lock);
745 }
746}
747
748static void comp_task_park(unsigned int cpu)
749{
750 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
751 struct ehca_cpu_comp_task *target;
752 struct task_struct *thread;
753 struct ehca_cq *cq, *tmp;
754 LIST_HEAD(list);
755
756 spin_lock_irq(&cct->task_lock);
757 cct->cq_jobs = 0;
758 cct->active = 0;
759 list_splice_init(&cct->cq_list, &list);
760 spin_unlock_irq(&cct->task_lock);
761
762 cpu = find_next_online_cpu(pool);
763 target = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
764 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu);
765 spin_lock_irq(&target->task_lock);
766 list_for_each_entry_safe(cq, tmp, &list, entry) {
767 list_del(&cq->entry);
768 __queue_comp_task(cq, target, thread);
769 }
770 spin_unlock_irq(&target->task_lock);
771}
772
773static void comp_task_stop(unsigned int cpu, bool online)
774{
775 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
776
777 spin_lock_irq(&cct->task_lock);
778 cct->cq_jobs = 0;
779 cct->active = 0;
780 WARN_ON(!list_empty(&cct->cq_list));
781 spin_unlock_irq(&cct->task_lock);
782}
783
784static int comp_task_should_run(unsigned int cpu)
785{
786 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
787
788 return cct->cq_jobs;
789}
790
791static void comp_task(unsigned int cpu)
792{
793 struct ehca_cpu_comp_task *cct = this_cpu_ptr(pool->cpu_comp_tasks);
794 int cql_empty;
795
796 spin_lock_irq(&cct->task_lock);
797 cql_empty = list_empty(&cct->cq_list);
798 if (!cql_empty) {
799 __set_current_state(TASK_RUNNING);
800 run_comp_task(cct);
801 }
802 spin_unlock_irq(&cct->task_lock);
803}
804
805static struct smp_hotplug_thread comp_pool_threads = {
806 .thread_should_run = comp_task_should_run,
807 .thread_fn = comp_task,
808 .thread_comm = "ehca_comp/%u",
809 .cleanup = comp_task_stop,
810 .park = comp_task_park,
811};
812
813int ehca_create_comp_pool(void)
814{
815 int cpu, ret = -ENOMEM;
816
817 if (!ehca_scaling_code)
818 return 0;
819
820 pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
821 if (pool == NULL)
822 return -ENOMEM;
823
824 spin_lock_init(&pool->last_cpu_lock);
825 pool->last_cpu = cpumask_any(cpu_online_mask);
826
827 pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
828 if (!pool->cpu_comp_tasks)
829 goto out_pool;
830
831 pool->cpu_comp_threads = alloc_percpu(struct task_struct *);
832 if (!pool->cpu_comp_threads)
833 goto out_tasks;
834
835 for_each_present_cpu(cpu) {
836 struct ehca_cpu_comp_task *cct;
837
838 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
839 spin_lock_init(&cct->task_lock);
840 INIT_LIST_HEAD(&cct->cq_list);
841 }
842
843 comp_pool_threads.store = pool->cpu_comp_threads;
844 ret = smpboot_register_percpu_thread(&comp_pool_threads);
845 if (ret)
846 goto out_threads;
847
848 pr_info("eHCA scaling code enabled\n");
849 return ret;
850
851out_threads:
852 free_percpu(pool->cpu_comp_threads);
853out_tasks:
854 free_percpu(pool->cpu_comp_tasks);
855out_pool:
856 kfree(pool);
857 return ret;
858}
859
860void ehca_destroy_comp_pool(void)
861{
862 if (!ehca_scaling_code)
863 return;
864
865 smpboot_unregister_percpu_thread(&comp_pool_threads);
866
867 free_percpu(pool->cpu_comp_threads);
868 free_percpu(pool->cpu_comp_tasks);
869 kfree(pool);
870}
diff --git a/drivers/staging/rdma/ehca/ehca_irq.h b/drivers/staging/rdma/ehca/ehca_irq.h
deleted file mode 100644
index 5370199f08c7..000000000000
--- a/drivers/staging/rdma/ehca/ehca_irq.h
+++ /dev/null
@@ -1,77 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Function definitions and structs for EQs, NEQs and interrupts
5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#ifndef __EHCA_IRQ_H
43#define __EHCA_IRQ_H
44
45
46struct ehca_shca;
47
48#include <linux/interrupt.h>
49#include <linux/types.h>
50
51int ehca_error_data(struct ehca_shca *shca, void *data, u64 resource);
52
53irqreturn_t ehca_interrupt_neq(int irq, void *dev_id);
54void ehca_tasklet_neq(unsigned long data);
55
56irqreturn_t ehca_interrupt_eq(int irq, void *dev_id);
57void ehca_tasklet_eq(unsigned long data);
58void ehca_process_eq(struct ehca_shca *shca, int is_irq);
59
60struct ehca_cpu_comp_task {
61 struct list_head cq_list;
62 spinlock_t task_lock;
63 int cq_jobs;
64 int active;
65};
66
67struct ehca_comp_pool {
68 struct ehca_cpu_comp_task __percpu *cpu_comp_tasks;
69 struct task_struct * __percpu *cpu_comp_threads;
70 int last_cpu;
71 spinlock_t last_cpu_lock;
72};
73
74int ehca_create_comp_pool(void);
75void ehca_destroy_comp_pool(void);
76
77#endif
diff --git a/drivers/staging/rdma/ehca/ehca_iverbs.h b/drivers/staging/rdma/ehca/ehca_iverbs.h
deleted file mode 100644
index cca5933fcda6..000000000000
--- a/drivers/staging/rdma/ehca/ehca_iverbs.h
+++ /dev/null
@@ -1,202 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Function definitions for internal functions
5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Dietmar Decker <ddecker@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#ifndef __EHCA_IVERBS_H__
43#define __EHCA_IVERBS_H__
44
45#include "ehca_classes.h"
46
47int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
48 struct ib_udata *uhw);
49
50int ehca_query_port(struct ib_device *ibdev, u8 port,
51 struct ib_port_attr *props);
52
53enum rdma_protocol_type
54ehca_query_protocol(struct ib_device *device, u8 port_num);
55
56int ehca_query_sma_attr(struct ehca_shca *shca, u8 port,
57 struct ehca_sma_attr *attr);
58
59int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey);
60
61int ehca_query_gid(struct ib_device *ibdev, u8 port, int index,
62 union ib_gid *gid);
63
64int ehca_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask,
65 struct ib_port_modify *props);
66
67struct ib_pd *ehca_alloc_pd(struct ib_device *device,
68 struct ib_ucontext *context,
69 struct ib_udata *udata);
70
71int ehca_dealloc_pd(struct ib_pd *pd);
72
73struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
74
75int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
76
77int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
78
79int ehca_destroy_ah(struct ib_ah *ah);
80
81struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
82
83struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
84 u64 virt, int mr_access_flags,
85 struct ib_udata *udata);
86
87int ehca_dereg_mr(struct ib_mr *mr);
88
89struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
90
91int ehca_dealloc_mw(struct ib_mw *mw);
92
93struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
94 int mr_access_flags,
95 struct ib_fmr_attr *fmr_attr);
96
97int ehca_map_phys_fmr(struct ib_fmr *fmr,
98 u64 *page_list, int list_len, u64 iova);
99
100int ehca_unmap_fmr(struct list_head *fmr_list);
101
102int ehca_dealloc_fmr(struct ib_fmr *fmr);
103
104enum ehca_eq_type {
105 EHCA_EQ = 0, /* Event Queue */
106 EHCA_NEQ /* Notification Event Queue */
107};
108
109int ehca_create_eq(struct ehca_shca *shca, struct ehca_eq *eq,
110 enum ehca_eq_type type, const u32 length);
111
112int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq);
113
114void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq);
115
116
117struct ib_cq *ehca_create_cq(struct ib_device *device,
118 const struct ib_cq_init_attr *attr,
119 struct ib_ucontext *context,
120 struct ib_udata *udata);
121
122int ehca_destroy_cq(struct ib_cq *cq);
123
124int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
125
126int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
127
128int ehca_peek_cq(struct ib_cq *cq, int wc_cnt);
129
130int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags);
131
132struct ib_qp *ehca_create_qp(struct ib_pd *pd,
133 struct ib_qp_init_attr *init_attr,
134 struct ib_udata *udata);
135
136int ehca_destroy_qp(struct ib_qp *qp);
137
138int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
139 struct ib_udata *udata);
140
141int ehca_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
142 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
143
144int ehca_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
145 struct ib_send_wr **bad_send_wr);
146
147int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
148 struct ib_recv_wr **bad_recv_wr);
149
150int ehca_post_srq_recv(struct ib_srq *srq,
151 struct ib_recv_wr *recv_wr,
152 struct ib_recv_wr **bad_recv_wr);
153
154struct ib_srq *ehca_create_srq(struct ib_pd *pd,
155 struct ib_srq_init_attr *init_attr,
156 struct ib_udata *udata);
157
158int ehca_modify_srq(struct ib_srq *srq, struct ib_srq_attr *attr,
159 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
160
161int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
162
163int ehca_destroy_srq(struct ib_srq *srq);
164
165u64 ehca_define_sqp(struct ehca_shca *shca, struct ehca_qp *ibqp,
166 struct ib_qp_init_attr *qp_init_attr);
167
168int ehca_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
169
170int ehca_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
171
172struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
173 struct ib_udata *udata);
174
175int ehca_dealloc_ucontext(struct ib_ucontext *context);
176
177int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
178
179int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
180 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
181 const struct ib_mad_hdr *in, size_t in_mad_size,
182 struct ib_mad_hdr *out, size_t *out_mad_size,
183 u16 *out_mad_pkey_index);
184
185void ehca_poll_eqs(unsigned long data);
186
187int ehca_calc_ipd(struct ehca_shca *shca, int port,
188 enum ib_rate path_rate, u32 *ipd);
189
190void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq);
191
192#ifdef CONFIG_PPC_64K_PAGES
193void *ehca_alloc_fw_ctrlblock(gfp_t flags);
194void ehca_free_fw_ctrlblock(void *ptr);
195#else
196#define ehca_alloc_fw_ctrlblock(flags) ((void *)get_zeroed_page(flags))
197#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
198#endif
199
200void ehca_recover_sqp(struct ib_qp *sqp);
201
202#endif
diff --git a/drivers/staging/rdma/ehca/ehca_main.c b/drivers/staging/rdma/ehca/ehca_main.c
deleted file mode 100644
index 832f22f40862..000000000000
--- a/drivers/staging/rdma/ehca/ehca_main.c
+++ /dev/null
@@ -1,1118 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * module start stop, hca detection
5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43#ifdef CONFIG_PPC_64K_PAGES
44#include <linux/slab.h>
45#endif
46
47#include <linux/notifier.h>
48#include <linux/memory.h>
49#include <rdma/ib_mad.h>
50#include "ehca_classes.h"
51#include "ehca_iverbs.h"
52#include "ehca_mrmw.h"
53#include "ehca_tools.h"
54#include "hcp_if.h"
55
56#define HCAD_VERSION "0029"
57
58MODULE_LICENSE("Dual BSD/GPL");
59MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
60MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
61MODULE_VERSION(HCAD_VERSION);
62
63static bool ehca_open_aqp1 = 0;
64static int ehca_hw_level = 0;
65static bool ehca_poll_all_eqs = 1;
66
67int ehca_debug_level = 0;
68int ehca_nr_ports = -1;
69bool ehca_use_hp_mr = 0;
70int ehca_port_act_time = 30;
71int ehca_static_rate = -1;
72bool ehca_scaling_code = 0;
73int ehca_lock_hcalls = -1;
74int ehca_max_cq = -1;
75int ehca_max_qp = -1;
76
77module_param_named(open_aqp1, ehca_open_aqp1, bool, S_IRUGO);
78module_param_named(debug_level, ehca_debug_level, int, S_IRUGO);
79module_param_named(hw_level, ehca_hw_level, int, S_IRUGO);
80module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO);
81module_param_named(use_hp_mr, ehca_use_hp_mr, bool, S_IRUGO);
82module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO);
83module_param_named(poll_all_eqs, ehca_poll_all_eqs, bool, S_IRUGO);
84module_param_named(static_rate, ehca_static_rate, int, S_IRUGO);
85module_param_named(scaling_code, ehca_scaling_code, bool, S_IRUGO);
86module_param_named(lock_hcalls, ehca_lock_hcalls, bint, S_IRUGO);
87module_param_named(number_of_cqs, ehca_max_cq, int, S_IRUGO);
88module_param_named(number_of_qps, ehca_max_qp, int, S_IRUGO);
89
90MODULE_PARM_DESC(open_aqp1,
91 "Open AQP1 on startup (default: no)");
92MODULE_PARM_DESC(debug_level,
93 "Amount of debug output (0: none (default), 1: traces, "
94 "2: some dumps, 3: lots)");
95MODULE_PARM_DESC(hw_level,
96 "Hardware level (0: autosensing (default), "
97 "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
98MODULE_PARM_DESC(nr_ports,
99 "number of connected ports (-1: autodetect (default), "
100 "1: port one only, 2: two ports)");
101MODULE_PARM_DESC(use_hp_mr,
102 "Use high performance MRs (default: no)");
103MODULE_PARM_DESC(port_act_time,
104 "Time to wait for port activation (default: 30 sec)");
105MODULE_PARM_DESC(poll_all_eqs,
106 "Poll all event queues periodically (default: yes)");
107MODULE_PARM_DESC(static_rate,
108 "Set permanent static rate (default: no static rate)");
109MODULE_PARM_DESC(scaling_code,
110 "Enable scaling code (default: no)");
111MODULE_PARM_DESC(lock_hcalls,
112 "Serialize all hCalls made by the driver "
113 "(default: autodetect)");
114MODULE_PARM_DESC(number_of_cqs,
115 "Max number of CQs which can be allocated "
116 "(default: autodetect)");
117MODULE_PARM_DESC(number_of_qps,
118 "Max number of QPs which can be allocated "
119 "(default: autodetect)");
120
121DEFINE_RWLOCK(ehca_qp_idr_lock);
122DEFINE_RWLOCK(ehca_cq_idr_lock);
123DEFINE_IDR(ehca_qp_idr);
124DEFINE_IDR(ehca_cq_idr);
125
126static LIST_HEAD(shca_list); /* list of all registered ehcas */
127DEFINE_SPINLOCK(shca_list_lock);
128
129static struct timer_list poll_eqs_timer;
130
131#ifdef CONFIG_PPC_64K_PAGES
132static struct kmem_cache *ctblk_cache;
133
134void *ehca_alloc_fw_ctrlblock(gfp_t flags)
135{
136 void *ret = kmem_cache_zalloc(ctblk_cache, flags);
137 if (!ret)
138 ehca_gen_err("Out of memory for ctblk");
139 return ret;
140}
141
142void ehca_free_fw_ctrlblock(void *ptr)
143{
144 if (ptr)
145 kmem_cache_free(ctblk_cache, ptr);
146
147}
148#endif
149
150int ehca2ib_return_code(u64 ehca_rc)
151{
152 switch (ehca_rc) {
153 case H_SUCCESS:
154 return 0;
155 case H_RESOURCE: /* Resource in use */
156 case H_BUSY:
157 return -EBUSY;
158 case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
159 case H_CONSTRAINED: /* resource constraint */
160 case H_NO_MEM:
161 return -ENOMEM;
162 default:
163 return -EINVAL;
164 }
165}
166
167static int ehca_create_slab_caches(void)
168{
169 int ret;
170
171 ret = ehca_init_pd_cache();
172 if (ret) {
173 ehca_gen_err("Cannot create PD SLAB cache.");
174 return ret;
175 }
176
177 ret = ehca_init_cq_cache();
178 if (ret) {
179 ehca_gen_err("Cannot create CQ SLAB cache.");
180 goto create_slab_caches2;
181 }
182
183 ret = ehca_init_qp_cache();
184 if (ret) {
185 ehca_gen_err("Cannot create QP SLAB cache.");
186 goto create_slab_caches3;
187 }
188
189 ret = ehca_init_av_cache();
190 if (ret) {
191 ehca_gen_err("Cannot create AV SLAB cache.");
192 goto create_slab_caches4;
193 }
194
195 ret = ehca_init_mrmw_cache();
196 if (ret) {
197 ehca_gen_err("Cannot create MR&MW SLAB cache.");
198 goto create_slab_caches5;
199 }
200
201 ret = ehca_init_small_qp_cache();
202 if (ret) {
203 ehca_gen_err("Cannot create small queue SLAB cache.");
204 goto create_slab_caches6;
205 }
206
207#ifdef CONFIG_PPC_64K_PAGES
208 ctblk_cache = kmem_cache_create("ehca_cache_ctblk",
209 EHCA_PAGESIZE, H_CB_ALIGNMENT,
210 SLAB_HWCACHE_ALIGN,
211 NULL);
212 if (!ctblk_cache) {
213 ehca_gen_err("Cannot create ctblk SLAB cache.");
214 ehca_cleanup_small_qp_cache();
215 ret = -ENOMEM;
216 goto create_slab_caches6;
217 }
218#endif
219 return 0;
220
221create_slab_caches6:
222 ehca_cleanup_mrmw_cache();
223
224create_slab_caches5:
225 ehca_cleanup_av_cache();
226
227create_slab_caches4:
228 ehca_cleanup_qp_cache();
229
230create_slab_caches3:
231 ehca_cleanup_cq_cache();
232
233create_slab_caches2:
234 ehca_cleanup_pd_cache();
235
236 return ret;
237}
238
239static void ehca_destroy_slab_caches(void)
240{
241 ehca_cleanup_small_qp_cache();
242 ehca_cleanup_mrmw_cache();
243 ehca_cleanup_av_cache();
244 ehca_cleanup_qp_cache();
245 ehca_cleanup_cq_cache();
246 ehca_cleanup_pd_cache();
247#ifdef CONFIG_PPC_64K_PAGES
248 kmem_cache_destroy(ctblk_cache);
249#endif
250}
251
252#define EHCA_HCAAVER EHCA_BMASK_IBM(32, 39)
253#define EHCA_REVID EHCA_BMASK_IBM(40, 63)
254
255static struct cap_descr {
256 u64 mask;
257 char *descr;
258} hca_cap_descr[] = {
259 { HCA_CAP_AH_PORT_NR_CHECK, "HCA_CAP_AH_PORT_NR_CHECK" },
260 { HCA_CAP_ATOMIC, "HCA_CAP_ATOMIC" },
261 { HCA_CAP_AUTO_PATH_MIG, "HCA_CAP_AUTO_PATH_MIG" },
262 { HCA_CAP_BAD_P_KEY_CTR, "HCA_CAP_BAD_P_KEY_CTR" },
263 { HCA_CAP_SQD_RTS_PORT_CHANGE, "HCA_CAP_SQD_RTS_PORT_CHANGE" },
264 { HCA_CAP_CUR_QP_STATE_MOD, "HCA_CAP_CUR_QP_STATE_MOD" },
265 { HCA_CAP_INIT_TYPE, "HCA_CAP_INIT_TYPE" },
266 { HCA_CAP_PORT_ACTIVE_EVENT, "HCA_CAP_PORT_ACTIVE_EVENT" },
267 { HCA_CAP_Q_KEY_VIOL_CTR, "HCA_CAP_Q_KEY_VIOL_CTR" },
268 { HCA_CAP_WQE_RESIZE, "HCA_CAP_WQE_RESIZE" },
269 { HCA_CAP_RAW_PACKET_MCAST, "HCA_CAP_RAW_PACKET_MCAST" },
270 { HCA_CAP_SHUTDOWN_PORT, "HCA_CAP_SHUTDOWN_PORT" },
271 { HCA_CAP_RC_LL_QP, "HCA_CAP_RC_LL_QP" },
272 { HCA_CAP_SRQ, "HCA_CAP_SRQ" },
273 { HCA_CAP_UD_LL_QP, "HCA_CAP_UD_LL_QP" },
274 { HCA_CAP_RESIZE_MR, "HCA_CAP_RESIZE_MR" },
275 { HCA_CAP_MINI_QP, "HCA_CAP_MINI_QP" },
276 { HCA_CAP_H_ALLOC_RES_SYNC, "HCA_CAP_H_ALLOC_RES_SYNC" },
277};
278
279static int ehca_sense_attributes(struct ehca_shca *shca)
280{
281 int i, ret = 0;
282 u64 h_ret;
283 struct hipz_query_hca *rblock;
284 struct hipz_query_port *port;
285 const char *loc_code;
286
287 static const u32 pgsize_map[] = {
288 HCA_CAP_MR_PGSIZE_4K, 0x1000,
289 HCA_CAP_MR_PGSIZE_64K, 0x10000,
290 HCA_CAP_MR_PGSIZE_1M, 0x100000,
291 HCA_CAP_MR_PGSIZE_16M, 0x1000000,
292 };
293
294 ehca_gen_dbg("Probing adapter %s...",
295 shca->ofdev->dev.of_node->full_name);
296 loc_code = of_get_property(shca->ofdev->dev.of_node, "ibm,loc-code",
297 NULL);
298 if (loc_code)
299 ehca_gen_dbg(" ... location lode=%s", loc_code);
300
301 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
302 if (!rblock) {
303 ehca_gen_err("Cannot allocate rblock memory.");
304 return -ENOMEM;
305 }
306
307 h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock);
308 if (h_ret != H_SUCCESS) {
309 ehca_gen_err("Cannot query device properties. h_ret=%lli",
310 h_ret);
311 ret = -EPERM;
312 goto sense_attributes1;
313 }
314
315 if (ehca_nr_ports == 1)
316 shca->num_ports = 1;
317 else
318 shca->num_ports = (u8)rblock->num_ports;
319
320 ehca_gen_dbg(" ... found %x ports", rblock->num_ports);
321
322 if (ehca_hw_level == 0) {
323 u32 hcaaver;
324 u32 revid;
325
326 hcaaver = EHCA_BMASK_GET(EHCA_HCAAVER, rblock->hw_ver);
327 revid = EHCA_BMASK_GET(EHCA_REVID, rblock->hw_ver);
328
329 ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid);
330
331 if (hcaaver == 1) {
332 if (revid <= 3)
333 shca->hw_level = 0x10 | (revid + 1);
334 else
335 shca->hw_level = 0x14;
336 } else if (hcaaver == 2) {
337 if (revid == 0)
338 shca->hw_level = 0x21;
339 else if (revid == 0x10)
340 shca->hw_level = 0x22;
341 else if (revid == 0x20 || revid == 0x21)
342 shca->hw_level = 0x23;
343 }
344
345 if (!shca->hw_level) {
346 ehca_gen_warn("unknown hardware version"
347 " - assuming default level");
348 shca->hw_level = 0x22;
349 }
350 } else
351 shca->hw_level = ehca_hw_level;
352 ehca_gen_dbg(" ... hardware level=%x", shca->hw_level);
353
354 shca->hca_cap = rblock->hca_cap_indicators;
355 ehca_gen_dbg(" ... HCA capabilities:");
356 for (i = 0; i < ARRAY_SIZE(hca_cap_descr); i++)
357 if (EHCA_BMASK_GET(hca_cap_descr[i].mask, shca->hca_cap))
358 ehca_gen_dbg(" %s", hca_cap_descr[i].descr);
359
360 /* Autodetect hCall locking -- the "H_ALLOC_RESOURCE synced" flag is
361 * a firmware property, so it's valid across all adapters
362 */
363 if (ehca_lock_hcalls == -1)
364 ehca_lock_hcalls = !EHCA_BMASK_GET(HCA_CAP_H_ALLOC_RES_SYNC,
365 shca->hca_cap);
366
367 /* translate supported MR page sizes; always support 4K */
368 shca->hca_cap_mr_pgsize = EHCA_PAGESIZE;
369 for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2)
370 if (rblock->memory_page_size_supported & pgsize_map[i])
371 shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
372
373 /* Set maximum number of CQs and QPs to calculate EQ size */
374 if (shca->max_num_qps == -1)
375 shca->max_num_qps = min_t(int, rblock->max_qp,
376 EHCA_MAX_NUM_QUEUES);
377 else if (shca->max_num_qps < 1 || shca->max_num_qps > rblock->max_qp) {
378 ehca_gen_warn("The requested number of QPs is out of range "
379 "(1 - %i) specified by HW. Value is set to %i",
380 rblock->max_qp, rblock->max_qp);
381 shca->max_num_qps = rblock->max_qp;
382 }
383
384 if (shca->max_num_cqs == -1)
385 shca->max_num_cqs = min_t(int, rblock->max_cq,
386 EHCA_MAX_NUM_QUEUES);
387 else if (shca->max_num_cqs < 1 || shca->max_num_cqs > rblock->max_cq) {
388 ehca_gen_warn("The requested number of CQs is out of range "
389 "(1 - %i) specified by HW. Value is set to %i",
390 rblock->max_cq, rblock->max_cq);
391 }
392
393 /* query max MTU from first port -- it's the same for all ports */
394 port = (struct hipz_query_port *)rblock;
395 h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port);
396 if (h_ret != H_SUCCESS) {
397 ehca_gen_err("Cannot query port properties. h_ret=%lli",
398 h_ret);
399 ret = -EPERM;
400 goto sense_attributes1;
401 }
402
403 shca->max_mtu = port->max_mtu;
404
405sense_attributes1:
406 ehca_free_fw_ctrlblock(rblock);
407 return ret;
408}
409
410static int init_node_guid(struct ehca_shca *shca)
411{
412 int ret = 0;
413 struct hipz_query_hca *rblock;
414
415 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
416 if (!rblock) {
417 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
418 return -ENOMEM;
419 }
420
421 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
422 ehca_err(&shca->ib_device, "Can't query device properties");
423 ret = -EINVAL;
424 goto init_node_guid1;
425 }
426
427 memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64));
428
429init_node_guid1:
430 ehca_free_fw_ctrlblock(rblock);
431 return ret;
432}
433
434static int ehca_port_immutable(struct ib_device *ibdev, u8 port_num,
435 struct ib_port_immutable *immutable)
436{
437 struct ib_port_attr attr;
438 int err;
439
440 err = ehca_query_port(ibdev, port_num, &attr);
441 if (err)
442 return err;
443
444 immutable->pkey_tbl_len = attr.pkey_tbl_len;
445 immutable->gid_tbl_len = attr.gid_tbl_len;
446 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
447 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
448
449 return 0;
450}
451
452static int ehca_init_device(struct ehca_shca *shca)
453{
454 int ret;
455
456 ret = init_node_guid(shca);
457 if (ret)
458 return ret;
459
460 strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
461 shca->ib_device.owner = THIS_MODULE;
462
463 shca->ib_device.uverbs_abi_ver = 8;
464 shca->ib_device.uverbs_cmd_mask =
465 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
466 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
467 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
468 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
469 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
470 (1ull << IB_USER_VERBS_CMD_REG_MR) |
471 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
472 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
473 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
474 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
475 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
476 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
477 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
478 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
479 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
480 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
481
482 shca->ib_device.node_type = RDMA_NODE_IB_CA;
483 shca->ib_device.phys_port_cnt = shca->num_ports;
484 shca->ib_device.num_comp_vectors = 1;
485 shca->ib_device.dma_device = &shca->ofdev->dev;
486 shca->ib_device.query_device = ehca_query_device;
487 shca->ib_device.query_port = ehca_query_port;
488 shca->ib_device.query_gid = ehca_query_gid;
489 shca->ib_device.query_pkey = ehca_query_pkey;
490 /* shca->in_device.modify_device = ehca_modify_device */
491 shca->ib_device.modify_port = ehca_modify_port;
492 shca->ib_device.alloc_ucontext = ehca_alloc_ucontext;
493 shca->ib_device.dealloc_ucontext = ehca_dealloc_ucontext;
494 shca->ib_device.alloc_pd = ehca_alloc_pd;
495 shca->ib_device.dealloc_pd = ehca_dealloc_pd;
496 shca->ib_device.create_ah = ehca_create_ah;
497 /* shca->ib_device.modify_ah = ehca_modify_ah; */
498 shca->ib_device.query_ah = ehca_query_ah;
499 shca->ib_device.destroy_ah = ehca_destroy_ah;
500 shca->ib_device.create_qp = ehca_create_qp;
501 shca->ib_device.modify_qp = ehca_modify_qp;
502 shca->ib_device.query_qp = ehca_query_qp;
503 shca->ib_device.destroy_qp = ehca_destroy_qp;
504 shca->ib_device.post_send = ehca_post_send;
505 shca->ib_device.post_recv = ehca_post_recv;
506 shca->ib_device.create_cq = ehca_create_cq;
507 shca->ib_device.destroy_cq = ehca_destroy_cq;
508 shca->ib_device.resize_cq = ehca_resize_cq;
509 shca->ib_device.poll_cq = ehca_poll_cq;
510 /* shca->ib_device.peek_cq = ehca_peek_cq; */
511 shca->ib_device.req_notify_cq = ehca_req_notify_cq;
512 /* shca->ib_device.req_ncomp_notif = ehca_req_ncomp_notif; */
513 shca->ib_device.get_dma_mr = ehca_get_dma_mr;
514 shca->ib_device.reg_user_mr = ehca_reg_user_mr;
515 shca->ib_device.dereg_mr = ehca_dereg_mr;
516 shca->ib_device.alloc_mw = ehca_alloc_mw;
517 shca->ib_device.dealloc_mw = ehca_dealloc_mw;
518 shca->ib_device.alloc_fmr = ehca_alloc_fmr;
519 shca->ib_device.map_phys_fmr = ehca_map_phys_fmr;
520 shca->ib_device.unmap_fmr = ehca_unmap_fmr;
521 shca->ib_device.dealloc_fmr = ehca_dealloc_fmr;
522 shca->ib_device.attach_mcast = ehca_attach_mcast;
523 shca->ib_device.detach_mcast = ehca_detach_mcast;
524 shca->ib_device.process_mad = ehca_process_mad;
525 shca->ib_device.mmap = ehca_mmap;
526 shca->ib_device.dma_ops = &ehca_dma_mapping_ops;
527 shca->ib_device.get_port_immutable = ehca_port_immutable;
528
529 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
530 shca->ib_device.uverbs_cmd_mask |=
531 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
532 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
533 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
534 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
535
536 shca->ib_device.create_srq = ehca_create_srq;
537 shca->ib_device.modify_srq = ehca_modify_srq;
538 shca->ib_device.query_srq = ehca_query_srq;
539 shca->ib_device.destroy_srq = ehca_destroy_srq;
540 shca->ib_device.post_srq_recv = ehca_post_srq_recv;
541 }
542
543 return ret;
544}
545
546static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
547{
548 struct ehca_sport *sport = &shca->sport[port - 1];
549 struct ib_cq *ibcq;
550 struct ib_qp *ibqp;
551 struct ib_qp_init_attr qp_init_attr;
552 struct ib_cq_init_attr cq_attr = {};
553 int ret;
554
555 if (sport->ibcq_aqp1) {
556 ehca_err(&shca->ib_device, "AQP1 CQ is already created.");
557 return -EPERM;
558 }
559
560 cq_attr.cqe = 10;
561 ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1),
562 &cq_attr);
563 if (IS_ERR(ibcq)) {
564 ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
565 return PTR_ERR(ibcq);
566 }
567 sport->ibcq_aqp1 = ibcq;
568
569 if (sport->ibqp_sqp[IB_QPT_GSI]) {
570 ehca_err(&shca->ib_device, "AQP1 QP is already created.");
571 ret = -EPERM;
572 goto create_aqp1;
573 }
574
575 memset(&qp_init_attr, 0, sizeof(struct ib_qp_init_attr));
576 qp_init_attr.send_cq = ibcq;
577 qp_init_attr.recv_cq = ibcq;
578 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
579 qp_init_attr.cap.max_send_wr = 100;
580 qp_init_attr.cap.max_recv_wr = 100;
581 qp_init_attr.cap.max_send_sge = 2;
582 qp_init_attr.cap.max_recv_sge = 1;
583 qp_init_attr.qp_type = IB_QPT_GSI;
584 qp_init_attr.port_num = port;
585 qp_init_attr.qp_context = NULL;
586 qp_init_attr.event_handler = NULL;
587 qp_init_attr.srq = NULL;
588
589 ibqp = ib_create_qp(&shca->pd->ib_pd, &qp_init_attr);
590 if (IS_ERR(ibqp)) {
591 ehca_err(&shca->ib_device, "Cannot create AQP1 QP.");
592 ret = PTR_ERR(ibqp);
593 goto create_aqp1;
594 }
595 sport->ibqp_sqp[IB_QPT_GSI] = ibqp;
596
597 return 0;
598
599create_aqp1:
600 ib_destroy_cq(sport->ibcq_aqp1);
601 return ret;
602}
603
604static int ehca_destroy_aqp1(struct ehca_sport *sport)
605{
606 int ret;
607
608 ret = ib_destroy_qp(sport->ibqp_sqp[IB_QPT_GSI]);
609 if (ret) {
610 ehca_gen_err("Cannot destroy AQP1 QP. ret=%i", ret);
611 return ret;
612 }
613
614 ret = ib_destroy_cq(sport->ibcq_aqp1);
615 if (ret)
616 ehca_gen_err("Cannot destroy AQP1 CQ. ret=%i", ret);
617
618 return ret;
619}
620
621static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
622{
623 return snprintf(buf, PAGE_SIZE, "%d\n", ehca_debug_level);
624}
625
626static ssize_t ehca_store_debug_level(struct device_driver *ddp,
627 const char *buf, size_t count)
628{
629 int value = (*buf) - '0';
630 if (value >= 0 && value <= 9)
631 ehca_debug_level = value;
632 return 1;
633}
634
635static DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
636 ehca_show_debug_level, ehca_store_debug_level);
637
638static struct attribute *ehca_drv_attrs[] = {
639 &driver_attr_debug_level.attr,
640 NULL
641};
642
643static struct attribute_group ehca_drv_attr_grp = {
644 .attrs = ehca_drv_attrs
645};
646
647static const struct attribute_group *ehca_drv_attr_groups[] = {
648 &ehca_drv_attr_grp,
649 NULL,
650};
651
652#define EHCA_RESOURCE_ATTR(name) \
653static ssize_t ehca_show_##name(struct device *dev, \
654 struct device_attribute *attr, \
655 char *buf) \
656{ \
657 struct ehca_shca *shca; \
658 struct hipz_query_hca *rblock; \
659 int data; \
660 \
661 shca = dev_get_drvdata(dev); \
662 \
663 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \
664 if (!rblock) { \
665 dev_err(dev, "Can't allocate rblock memory.\n"); \
666 return 0; \
667 } \
668 \
669 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \
670 dev_err(dev, "Can't query device properties\n"); \
671 ehca_free_fw_ctrlblock(rblock); \
672 return 0; \
673 } \
674 \
675 data = rblock->name; \
676 ehca_free_fw_ctrlblock(rblock); \
677 \
678 if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \
679 return snprintf(buf, 256, "1\n"); \
680 else \
681 return snprintf(buf, 256, "%d\n", data); \
682 \
683} \
684static DEVICE_ATTR(name, S_IRUGO, ehca_show_##name, NULL);
685
686EHCA_RESOURCE_ATTR(num_ports);
687EHCA_RESOURCE_ATTR(hw_ver);
688EHCA_RESOURCE_ATTR(max_eq);
689EHCA_RESOURCE_ATTR(cur_eq);
690EHCA_RESOURCE_ATTR(max_cq);
691EHCA_RESOURCE_ATTR(cur_cq);
692EHCA_RESOURCE_ATTR(max_qp);
693EHCA_RESOURCE_ATTR(cur_qp);
694EHCA_RESOURCE_ATTR(max_mr);
695EHCA_RESOURCE_ATTR(cur_mr);
696EHCA_RESOURCE_ATTR(max_mw);
697EHCA_RESOURCE_ATTR(cur_mw);
698EHCA_RESOURCE_ATTR(max_pd);
699EHCA_RESOURCE_ATTR(max_ah);
700
701static ssize_t ehca_show_adapter_handle(struct device *dev,
702 struct device_attribute *attr,
703 char *buf)
704{
705 struct ehca_shca *shca = dev_get_drvdata(dev);
706
707 return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle);
708
709}
710static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
711
712static struct attribute *ehca_dev_attrs[] = {
713 &dev_attr_adapter_handle.attr,
714 &dev_attr_num_ports.attr,
715 &dev_attr_hw_ver.attr,
716 &dev_attr_max_eq.attr,
717 &dev_attr_cur_eq.attr,
718 &dev_attr_max_cq.attr,
719 &dev_attr_cur_cq.attr,
720 &dev_attr_max_qp.attr,
721 &dev_attr_cur_qp.attr,
722 &dev_attr_max_mr.attr,
723 &dev_attr_cur_mr.attr,
724 &dev_attr_max_mw.attr,
725 &dev_attr_cur_mw.attr,
726 &dev_attr_max_pd.attr,
727 &dev_attr_max_ah.attr,
728 NULL
729};
730
731static struct attribute_group ehca_dev_attr_grp = {
732 .attrs = ehca_dev_attrs
733};
734
735static int ehca_probe(struct platform_device *dev)
736{
737 struct ehca_shca *shca;
738 const u64 *handle;
739 struct ib_pd *ibpd;
740 int ret, i, eq_size;
741 unsigned long flags;
742
743 handle = of_get_property(dev->dev.of_node, "ibm,hca-handle", NULL);
744 if (!handle) {
745 ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
746 dev->dev.of_node->full_name);
747 return -ENODEV;
748 }
749
750 if (!(*handle)) {
751 ehca_gen_err("Wrong eHCA handle for adapter: %s.",
752 dev->dev.of_node->full_name);
753 return -ENODEV;
754 }
755
756 shca = (struct ehca_shca *)ib_alloc_device(sizeof(*shca));
757 if (!shca) {
758 ehca_gen_err("Cannot allocate shca memory.");
759 return -ENOMEM;
760 }
761
762 mutex_init(&shca->modify_mutex);
763 atomic_set(&shca->num_cqs, 0);
764 atomic_set(&shca->num_qps, 0);
765 shca->max_num_qps = ehca_max_qp;
766 shca->max_num_cqs = ehca_max_cq;
767
768 for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
769 spin_lock_init(&shca->sport[i].mod_sqp_lock);
770
771 shca->ofdev = dev;
772 shca->ipz_hca_handle.handle = *handle;
773 dev_set_drvdata(&dev->dev, shca);
774
775 ret = ehca_sense_attributes(shca);
776 if (ret < 0) {
777 ehca_gen_err("Cannot sense eHCA attributes.");
778 goto probe1;
779 }
780
781 ret = ehca_init_device(shca);
782 if (ret) {
783 ehca_gen_err("Cannot init ehca device struct");
784 goto probe1;
785 }
786
787 eq_size = 2 * shca->max_num_cqs + 4 * shca->max_num_qps;
788 /* create event queues */
789 ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size);
790 if (ret) {
791 ehca_err(&shca->ib_device, "Cannot create EQ.");
792 goto probe1;
793 }
794
795 ret = ehca_create_eq(shca, &shca->neq, EHCA_NEQ, 513);
796 if (ret) {
797 ehca_err(&shca->ib_device, "Cannot create NEQ.");
798 goto probe3;
799 }
800
801 /* create internal protection domain */
802 ibpd = ehca_alloc_pd(&shca->ib_device, (void *)(-1), NULL);
803 if (IS_ERR(ibpd)) {
804 ehca_err(&shca->ib_device, "Cannot create internal PD.");
805 ret = PTR_ERR(ibpd);
806 goto probe4;
807 }
808
809 shca->pd = container_of(ibpd, struct ehca_pd, ib_pd);
810 shca->pd->ib_pd.device = &shca->ib_device;
811
812 /* create internal max MR */
813 ret = ehca_reg_internal_maxmr(shca, shca->pd, &shca->maxmr);
814
815 if (ret) {
816 ehca_err(&shca->ib_device, "Cannot create internal MR ret=%i",
817 ret);
818 goto probe5;
819 }
820
821 ret = ib_register_device(&shca->ib_device, NULL);
822 if (ret) {
823 ehca_err(&shca->ib_device,
824 "ib_register_device() failed ret=%i", ret);
825 goto probe6;
826 }
827
828 /* create AQP1 for port 1 */
829 if (ehca_open_aqp1 == 1) {
830 shca->sport[0].port_state = IB_PORT_DOWN;
831 ret = ehca_create_aqp1(shca, 1);
832 if (ret) {
833 ehca_err(&shca->ib_device,
834 "Cannot create AQP1 for port 1.");
835 goto probe7;
836 }
837 }
838
839 /* create AQP1 for port 2 */
840 if ((ehca_open_aqp1 == 1) && (shca->num_ports == 2)) {
841 shca->sport[1].port_state = IB_PORT_DOWN;
842 ret = ehca_create_aqp1(shca, 2);
843 if (ret) {
844 ehca_err(&shca->ib_device,
845 "Cannot create AQP1 for port 2.");
846 goto probe8;
847 }
848 }
849
850 ret = sysfs_create_group(&dev->dev.kobj, &ehca_dev_attr_grp);
851 if (ret) /* only complain; we can live without attributes */
852 ehca_err(&shca->ib_device,
853 "Cannot create device attributes ret=%d", ret);
854
855 spin_lock_irqsave(&shca_list_lock, flags);
856 list_add(&shca->shca_list, &shca_list);
857 spin_unlock_irqrestore(&shca_list_lock, flags);
858
859 return 0;
860
861probe8:
862 ret = ehca_destroy_aqp1(&shca->sport[0]);
863 if (ret)
864 ehca_err(&shca->ib_device,
865 "Cannot destroy AQP1 for port 1. ret=%i", ret);
866
867probe7:
868 ib_unregister_device(&shca->ib_device);
869
870probe6:
871 ret = ehca_dereg_internal_maxmr(shca);
872 if (ret)
873 ehca_err(&shca->ib_device,
874 "Cannot destroy internal MR. ret=%x", ret);
875
876probe5:
877 ret = ehca_dealloc_pd(&shca->pd->ib_pd);
878 if (ret)
879 ehca_err(&shca->ib_device,
880 "Cannot destroy internal PD. ret=%x", ret);
881
882probe4:
883 ret = ehca_destroy_eq(shca, &shca->neq);
884 if (ret)
885 ehca_err(&shca->ib_device,
886 "Cannot destroy NEQ. ret=%x", ret);
887
888probe3:
889 ret = ehca_destroy_eq(shca, &shca->eq);
890 if (ret)
891 ehca_err(&shca->ib_device,
892 "Cannot destroy EQ. ret=%x", ret);
893
894probe1:
895 ib_dealloc_device(&shca->ib_device);
896
897 return -EINVAL;
898}
899
900static int ehca_remove(struct platform_device *dev)
901{
902 struct ehca_shca *shca = dev_get_drvdata(&dev->dev);
903 unsigned long flags;
904 int ret;
905
906 sysfs_remove_group(&dev->dev.kobj, &ehca_dev_attr_grp);
907
908 if (ehca_open_aqp1 == 1) {
909 int i;
910 for (i = 0; i < shca->num_ports; i++) {
911 ret = ehca_destroy_aqp1(&shca->sport[i]);
912 if (ret)
913 ehca_err(&shca->ib_device,
914 "Cannot destroy AQP1 for port %x "
915 "ret=%i", ret, i);
916 }
917 }
918
919 ib_unregister_device(&shca->ib_device);
920
921 ret = ehca_dereg_internal_maxmr(shca);
922 if (ret)
923 ehca_err(&shca->ib_device,
924 "Cannot destroy internal MR. ret=%i", ret);
925
926 ret = ehca_dealloc_pd(&shca->pd->ib_pd);
927 if (ret)
928 ehca_err(&shca->ib_device,
929 "Cannot destroy internal PD. ret=%i", ret);
930
931 ret = ehca_destroy_eq(shca, &shca->eq);
932 if (ret)
933 ehca_err(&shca->ib_device, "Cannot destroy EQ. ret=%i", ret);
934
935 ret = ehca_destroy_eq(shca, &shca->neq);
936 if (ret)
937 ehca_err(&shca->ib_device, "Canot destroy NEQ. ret=%i", ret);
938
939 ib_dealloc_device(&shca->ib_device);
940
941 spin_lock_irqsave(&shca_list_lock, flags);
942 list_del(&shca->shca_list);
943 spin_unlock_irqrestore(&shca_list_lock, flags);
944
945 return ret;
946}
947
948static struct of_device_id ehca_device_table[] =
949{
950 {
951 .name = "lhca",
952 .compatible = "IBM,lhca",
953 },
954 {},
955};
956MODULE_DEVICE_TABLE(of, ehca_device_table);
957
958static struct platform_driver ehca_driver = {
959 .probe = ehca_probe,
960 .remove = ehca_remove,
961 .driver = {
962 .name = "ehca",
963 .owner = THIS_MODULE,
964 .groups = ehca_drv_attr_groups,
965 .of_match_table = ehca_device_table,
966 },
967};
968
969void ehca_poll_eqs(unsigned long data)
970{
971 struct ehca_shca *shca;
972
973 spin_lock(&shca_list_lock);
974 list_for_each_entry(shca, &shca_list, shca_list) {
975 if (shca->eq.is_initialized) {
976 /* call deadman proc only if eq ptr does not change */
977 struct ehca_eq *eq = &shca->eq;
978 int max = 3;
979 volatile u64 q_ofs, q_ofs2;
980 unsigned long flags;
981 spin_lock_irqsave(&eq->spinlock, flags);
982 q_ofs = eq->ipz_queue.current_q_offset;
983 spin_unlock_irqrestore(&eq->spinlock, flags);
984 do {
985 spin_lock_irqsave(&eq->spinlock, flags);
986 q_ofs2 = eq->ipz_queue.current_q_offset;
987 spin_unlock_irqrestore(&eq->spinlock, flags);
988 max--;
989 } while (q_ofs == q_ofs2 && max > 0);
990 if (q_ofs == q_ofs2)
991 ehca_process_eq(shca, 0);
992 }
993 }
994 mod_timer(&poll_eqs_timer, round_jiffies(jiffies + HZ));
995 spin_unlock(&shca_list_lock);
996}
997
998static int ehca_mem_notifier(struct notifier_block *nb,
999 unsigned long action, void *data)
1000{
1001 static unsigned long ehca_dmem_warn_time;
1002 unsigned long flags;
1003
1004 switch (action) {
1005 case MEM_CANCEL_OFFLINE:
1006 case MEM_CANCEL_ONLINE:
1007 case MEM_ONLINE:
1008 case MEM_OFFLINE:
1009 return NOTIFY_OK;
1010 case MEM_GOING_ONLINE:
1011 case MEM_GOING_OFFLINE:
1012 /* only ok if no hca is attached to the lpar */
1013 spin_lock_irqsave(&shca_list_lock, flags);
1014 if (list_empty(&shca_list)) {
1015 spin_unlock_irqrestore(&shca_list_lock, flags);
1016 return NOTIFY_OK;
1017 } else {
1018 spin_unlock_irqrestore(&shca_list_lock, flags);
1019 if (printk_timed_ratelimit(&ehca_dmem_warn_time,
1020 30 * 1000))
1021 ehca_gen_err("DMEM operations are not allowed"
1022 "in conjunction with eHCA");
1023 return NOTIFY_BAD;
1024 }
1025 }
1026 return NOTIFY_OK;
1027}
1028
1029static struct notifier_block ehca_mem_nb = {
1030 .notifier_call = ehca_mem_notifier,
1031};
1032
1033static int __init ehca_module_init(void)
1034{
1035 int ret;
1036
1037 printk(KERN_INFO "eHCA Infiniband Device Driver "
1038 "(Version " HCAD_VERSION ")\n");
1039
1040 ret = ehca_create_comp_pool();
1041 if (ret) {
1042 ehca_gen_err("Cannot create comp pool.");
1043 return ret;
1044 }
1045
1046 ret = ehca_create_slab_caches();
1047 if (ret) {
1048 ehca_gen_err("Cannot create SLAB caches");
1049 ret = -ENOMEM;
1050 goto module_init1;
1051 }
1052
1053 ret = ehca_create_busmap();
1054 if (ret) {
1055 ehca_gen_err("Cannot create busmap.");
1056 goto module_init2;
1057 }
1058
1059 ret = ibmebus_register_driver(&ehca_driver);
1060 if (ret) {
1061 ehca_gen_err("Cannot register eHCA device driver");
1062 ret = -EINVAL;
1063 goto module_init3;
1064 }
1065
1066 ret = register_memory_notifier(&ehca_mem_nb);
1067 if (ret) {
1068 ehca_gen_err("Failed registering memory add/remove notifier");
1069 goto module_init4;
1070 }
1071
1072 if (ehca_poll_all_eqs != 1) {
1073 ehca_gen_err("WARNING!!!");
1074 ehca_gen_err("It is possible to lose interrupts.");
1075 } else {
1076 init_timer(&poll_eqs_timer);
1077 poll_eqs_timer.function = ehca_poll_eqs;
1078 poll_eqs_timer.expires = jiffies + HZ;
1079 add_timer(&poll_eqs_timer);
1080 }
1081
1082 return 0;
1083
1084module_init4:
1085 ibmebus_unregister_driver(&ehca_driver);
1086
1087module_init3:
1088 ehca_destroy_busmap();
1089
1090module_init2:
1091 ehca_destroy_slab_caches();
1092
1093module_init1:
1094 ehca_destroy_comp_pool();
1095 return ret;
1096};
1097
1098static void __exit ehca_module_exit(void)
1099{
1100 if (ehca_poll_all_eqs == 1)
1101 del_timer_sync(&poll_eqs_timer);
1102
1103 ibmebus_unregister_driver(&ehca_driver);
1104
1105 unregister_memory_notifier(&ehca_mem_nb);
1106
1107 ehca_destroy_busmap();
1108
1109 ehca_destroy_slab_caches();
1110
1111 ehca_destroy_comp_pool();
1112
1113 idr_destroy(&ehca_cq_idr);
1114 idr_destroy(&ehca_qp_idr);
1115};
1116
1117module_init(ehca_module_init);
1118module_exit(ehca_module_exit);
diff --git a/drivers/staging/rdma/ehca/ehca_mcast.c b/drivers/staging/rdma/ehca/ehca_mcast.c
deleted file mode 100644
index cec181532924..000000000000
--- a/drivers/staging/rdma/ehca/ehca_mcast.c
+++ /dev/null
@@ -1,131 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * mcast functions
5 *
6 * Authors: Khadija Souissi <souissik@de.ibm.com>
7 * Waleri Fomin <fomin@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
10 * Heiko J Schick <schickhj@de.ibm.com>
11 *
12 * Copyright (c) 2005 IBM Corporation
13 *
14 * All rights reserved.
15 *
16 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
17 * BSD.
18 *
19 * OpenIB BSD License
20 *
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions are met:
23 *
24 * Redistributions of source code must retain the above copyright notice, this
25 * list of conditions and the following disclaimer.
26 *
27 * Redistributions in binary form must reproduce the above copyright notice,
28 * this list of conditions and the following disclaimer in the documentation
29 * and/or other materials
30 * provided with the distribution.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
33 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
36 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
37 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
39 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
40 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42 * POSSIBILITY OF SUCH DAMAGE.
43 */
44
45#include <linux/module.h>
46#include <linux/err.h>
47#include "ehca_classes.h"
48#include "ehca_tools.h"
49#include "ehca_qes.h"
50#include "ehca_iverbs.h"
51#include "hcp_if.h"
52
53#define MAX_MC_LID 0xFFFE
54#define MIN_MC_LID 0xC000 /* Multicast limits */
55#define EHCA_VALID_MULTICAST_GID(gid) ((gid)[0] == 0xFF)
56#define EHCA_VALID_MULTICAST_LID(lid) \
57 (((lid) >= MIN_MC_LID) && ((lid) <= MAX_MC_LID))
58
59int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
60{
61 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
62 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
63 ib_device);
64 union ib_gid my_gid;
65 u64 subnet_prefix, interface_id, h_ret;
66
67 if (ibqp->qp_type != IB_QPT_UD) {
68 ehca_err(ibqp->device, "invalid qp_type=%x", ibqp->qp_type);
69 return -EINVAL;
70 }
71
72 if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
73 ehca_err(ibqp->device, "invalid mulitcast gid");
74 return -EINVAL;
75 } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
76 ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
77 return -EINVAL;
78 }
79
80 memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
81
82 subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
83 interface_id = be64_to_cpu(my_gid.global.interface_id);
84 h_ret = hipz_h_attach_mcqp(shca->ipz_hca_handle,
85 my_qp->ipz_qp_handle,
86 my_qp->galpas.kernel,
87 lid, subnet_prefix, interface_id);
88 if (h_ret != H_SUCCESS)
89 ehca_err(ibqp->device,
90 "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed "
91 "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
92
93 return ehca2ib_return_code(h_ret);
94}
95
96int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
97{
98 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
99 struct ehca_shca *shca = container_of(ibqp->pd->device,
100 struct ehca_shca, ib_device);
101 union ib_gid my_gid;
102 u64 subnet_prefix, interface_id, h_ret;
103
104 if (ibqp->qp_type != IB_QPT_UD) {
105 ehca_err(ibqp->device, "invalid qp_type %x", ibqp->qp_type);
106 return -EINVAL;
107 }
108
109 if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
110 ehca_err(ibqp->device, "invalid mulitcast gid");
111 return -EINVAL;
112 } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
113 ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
114 return -EINVAL;
115 }
116
117 memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
118
119 subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
120 interface_id = be64_to_cpu(my_gid.global.interface_id);
121 h_ret = hipz_h_detach_mcqp(shca->ipz_hca_handle,
122 my_qp->ipz_qp_handle,
123 my_qp->galpas.kernel,
124 lid, subnet_prefix, interface_id);
125 if (h_ret != H_SUCCESS)
126 ehca_err(ibqp->device,
127 "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed "
128 "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
129
130 return ehca2ib_return_code(h_ret);
131}
diff --git a/drivers/staging/rdma/ehca/ehca_mrmw.c b/drivers/staging/rdma/ehca/ehca_mrmw.c
deleted file mode 100644
index 3367205e3160..000000000000
--- a/drivers/staging/rdma/ehca/ehca_mrmw.c
+++ /dev/null
@@ -1,2202 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * MR/MW functions
5 *
6 * Authors: Dietmar Decker <ddecker@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43#include <linux/slab.h>
44#include <rdma/ib_umem.h>
45
46#include "ehca_iverbs.h"
47#include "ehca_mrmw.h"
48#include "hcp_if.h"
49#include "hipz_hw.h"
50
51#define NUM_CHUNKS(length, chunk_size) \
52 (((length) + (chunk_size - 1)) / (chunk_size))
53
54/* max number of rpages (per hcall register_rpages) */
55#define MAX_RPAGES 512
56
57/* DMEM toleration management */
58#define EHCA_SECTSHIFT SECTION_SIZE_BITS
59#define EHCA_SECTSIZE (1UL << EHCA_SECTSHIFT)
60#define EHCA_HUGEPAGESHIFT 34
61#define EHCA_HUGEPAGE_SIZE (1UL << EHCA_HUGEPAGESHIFT)
62#define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
63#define EHCA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
64#define EHCA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
65#define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
66#define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
67#define EHCA_TOP_MAP_SIZE (0x10000) /* currently fixed map size */
68#define EHCA_DIR_MAP_SIZE (0x10000)
69#define EHCA_ENT_MAP_SIZE (0x10000)
70#define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
71
72static unsigned long ehca_mr_len;
73
74/*
75 * Memory map data structures
76 */
77struct ehca_dir_bmap {
78 u64 ent[EHCA_MAP_ENTRIES];
79};
80struct ehca_top_bmap {
81 struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES];
82};
83struct ehca_bmap {
84 struct ehca_top_bmap *top[EHCA_MAP_ENTRIES];
85};
86
87static struct ehca_bmap *ehca_bmap;
88
89static struct kmem_cache *mr_cache;
90static struct kmem_cache *mw_cache;
91
92enum ehca_mr_pgsize {
93 EHCA_MR_PGSIZE4K = 0x1000L,
94 EHCA_MR_PGSIZE64K = 0x10000L,
95 EHCA_MR_PGSIZE1M = 0x100000L,
96 EHCA_MR_PGSIZE16M = 0x1000000L
97};
98
99#define EHCA_MR_PGSHIFT4K 12
100#define EHCA_MR_PGSHIFT64K 16
101#define EHCA_MR_PGSHIFT1M 20
102#define EHCA_MR_PGSHIFT16M 24
103
104static u64 ehca_map_vaddr(void *caddr);
105
106static u32 ehca_encode_hwpage_size(u32 pgsize)
107{
108 int log = ilog2(pgsize);
109 WARN_ON(log < 12 || log > 24 || log & 3);
110 return (log - 12) / 4;
111}
112
113static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
114{
115 return rounddown_pow_of_two(shca->hca_cap_mr_pgsize);
116}
117
118static struct ehca_mr *ehca_mr_new(void)
119{
120 struct ehca_mr *me;
121
122 me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
123 if (me)
124 spin_lock_init(&me->mrlock);
125 else
126 ehca_gen_err("alloc failed");
127
128 return me;
129}
130
131static void ehca_mr_delete(struct ehca_mr *me)
132{
133 kmem_cache_free(mr_cache, me);
134}
135
136static struct ehca_mw *ehca_mw_new(void)
137{
138 struct ehca_mw *me;
139
140 me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
141 if (me)
142 spin_lock_init(&me->mwlock);
143 else
144 ehca_gen_err("alloc failed");
145
146 return me;
147}
148
149static void ehca_mw_delete(struct ehca_mw *me)
150{
151 kmem_cache_free(mw_cache, me);
152}
153
154/*----------------------------------------------------------------------*/
155
156struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
157{
158 struct ib_mr *ib_mr;
159 int ret;
160 struct ehca_mr *e_maxmr;
161 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
162 struct ehca_shca *shca =
163 container_of(pd->device, struct ehca_shca, ib_device);
164
165 if (shca->maxmr) {
166 e_maxmr = ehca_mr_new();
167 if (!e_maxmr) {
168 ehca_err(&shca->ib_device, "out of memory");
169 ib_mr = ERR_PTR(-ENOMEM);
170 goto get_dma_mr_exit0;
171 }
172
173 ret = ehca_reg_maxmr(shca, e_maxmr,
174 (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)),
175 mr_access_flags, e_pd,
176 &e_maxmr->ib.ib_mr.lkey,
177 &e_maxmr->ib.ib_mr.rkey);
178 if (ret) {
179 ehca_mr_delete(e_maxmr);
180 ib_mr = ERR_PTR(ret);
181 goto get_dma_mr_exit0;
182 }
183 ib_mr = &e_maxmr->ib.ib_mr;
184 } else {
185 ehca_err(&shca->ib_device, "no internal max-MR exist!");
186 ib_mr = ERR_PTR(-EINVAL);
187 goto get_dma_mr_exit0;
188 }
189
190get_dma_mr_exit0:
191 if (IS_ERR(ib_mr))
192 ehca_err(&shca->ib_device, "h_ret=%li pd=%p mr_access_flags=%x",
193 PTR_ERR(ib_mr), pd, mr_access_flags);
194 return ib_mr;
195} /* end ehca_get_dma_mr() */
196
197/*----------------------------------------------------------------------*/
198
199struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
200 u64 virt, int mr_access_flags,
201 struct ib_udata *udata)
202{
203 struct ib_mr *ib_mr;
204 struct ehca_mr *e_mr;
205 struct ehca_shca *shca =
206 container_of(pd->device, struct ehca_shca, ib_device);
207 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
208 struct ehca_mr_pginfo pginfo;
209 int ret, page_shift;
210 u32 num_kpages;
211 u32 num_hwpages;
212 u64 hwpage_size;
213
214 if (!pd) {
215 ehca_gen_err("bad pd=%p", pd);
216 return ERR_PTR(-EFAULT);
217 }
218
219 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
220 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
221 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
222 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
223 /*
224 * Remote Write Access requires Local Write Access
225 * Remote Atomic Access requires Local Write Access
226 */
227 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
228 mr_access_flags);
229 ib_mr = ERR_PTR(-EINVAL);
230 goto reg_user_mr_exit0;
231 }
232
233 if (length == 0 || virt + length < virt) {
234 ehca_err(pd->device, "bad input values: length=%llx "
235 "virt_base=%llx", length, virt);
236 ib_mr = ERR_PTR(-EINVAL);
237 goto reg_user_mr_exit0;
238 }
239
240 e_mr = ehca_mr_new();
241 if (!e_mr) {
242 ehca_err(pd->device, "out of memory");
243 ib_mr = ERR_PTR(-ENOMEM);
244 goto reg_user_mr_exit0;
245 }
246
247 e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
248 mr_access_flags, 0);
249 if (IS_ERR(e_mr->umem)) {
250 ib_mr = (void *)e_mr->umem;
251 goto reg_user_mr_exit1;
252 }
253
254 if (e_mr->umem->page_size != PAGE_SIZE) {
255 ehca_err(pd->device, "page size not supported, "
256 "e_mr->umem->page_size=%x", e_mr->umem->page_size);
257 ib_mr = ERR_PTR(-EINVAL);
258 goto reg_user_mr_exit2;
259 }
260
261 /* determine number of MR pages */
262 num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
263 /* select proper hw_pgsize */
264 page_shift = PAGE_SHIFT;
265 if (e_mr->umem->hugetlb) {
266 /* determine page_shift, clamp between 4K and 16M */
267 page_shift = (fls64(length - 1) + 3) & ~3;
268 page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K),
269 EHCA_MR_PGSHIFT16M);
270 }
271 hwpage_size = 1UL << page_shift;
272
273 /* now that we have the desired page size, shift until it's
274 * supported, too. 4K is always supported, so this terminates.
275 */
276 while (!(hwpage_size & shca->hca_cap_mr_pgsize))
277 hwpage_size >>= 4;
278
279reg_user_mr_fallback:
280 num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
281 /* register MR on HCA */
282 memset(&pginfo, 0, sizeof(pginfo));
283 pginfo.type = EHCA_MR_PGI_USER;
284 pginfo.hwpage_size = hwpage_size;
285 pginfo.num_kpages = num_kpages;
286 pginfo.num_hwpages = num_hwpages;
287 pginfo.u.usr.region = e_mr->umem;
288 pginfo.next_hwpage = ib_umem_offset(e_mr->umem) / hwpage_size;
289 pginfo.u.usr.next_sg = pginfo.u.usr.region->sg_head.sgl;
290 ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
291 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
292 &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
293 if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
294 ehca_warn(pd->device, "failed to register mr "
295 "with hwpage_size=%llx", hwpage_size);
296 ehca_info(pd->device, "try to register mr with "
297 "kpage_size=%lx", PAGE_SIZE);
298 /*
299 * this means kpages are not contiguous for a hw page
300 * try kernel page size as fallback solution
301 */
302 hwpage_size = PAGE_SIZE;
303 goto reg_user_mr_fallback;
304 }
305 if (ret) {
306 ib_mr = ERR_PTR(ret);
307 goto reg_user_mr_exit2;
308 }
309
310 /* successful registration of all pages */
311 return &e_mr->ib.ib_mr;
312
313reg_user_mr_exit2:
314 ib_umem_release(e_mr->umem);
315reg_user_mr_exit1:
316 ehca_mr_delete(e_mr);
317reg_user_mr_exit0:
318 if (IS_ERR(ib_mr))
319 ehca_err(pd->device, "rc=%li pd=%p mr_access_flags=%x udata=%p",
320 PTR_ERR(ib_mr), pd, mr_access_flags, udata);
321 return ib_mr;
322} /* end ehca_reg_user_mr() */
323
324/*----------------------------------------------------------------------*/
325
326int ehca_dereg_mr(struct ib_mr *mr)
327{
328 int ret = 0;
329 u64 h_ret;
330 struct ehca_shca *shca =
331 container_of(mr->device, struct ehca_shca, ib_device);
332 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
333
334 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
335 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
336 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
337 ret = -EINVAL;
338 goto dereg_mr_exit0;
339 } else if (e_mr == shca->maxmr) {
340 /* should be impossible, however reject to be sure */
341 ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
342 "shca->maxmr=%p mr->lkey=%x",
343 mr, shca->maxmr, mr->lkey);
344 ret = -EINVAL;
345 goto dereg_mr_exit0;
346 }
347
348 /* TODO: BUSY: MR still has bound window(s) */
349 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
350 if (h_ret != H_SUCCESS) {
351 ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p "
352 "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
353 h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
354 e_mr->ipz_mr_handle.handle, mr->lkey);
355 ret = ehca2ib_return_code(h_ret);
356 goto dereg_mr_exit0;
357 }
358
359 if (e_mr->umem)
360 ib_umem_release(e_mr->umem);
361
362 /* successful deregistration */
363 ehca_mr_delete(e_mr);
364
365dereg_mr_exit0:
366 if (ret)
367 ehca_err(mr->device, "ret=%i mr=%p", ret, mr);
368 return ret;
369} /* end ehca_dereg_mr() */
370
371/*----------------------------------------------------------------------*/
372
373struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
374{
375 struct ib_mw *ib_mw;
376 u64 h_ret;
377 struct ehca_mw *e_mw;
378 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
379 struct ehca_shca *shca =
380 container_of(pd->device, struct ehca_shca, ib_device);
381 struct ehca_mw_hipzout_parms hipzout;
382
383 if (type != IB_MW_TYPE_1)
384 return ERR_PTR(-EINVAL);
385
386 e_mw = ehca_mw_new();
387 if (!e_mw) {
388 ib_mw = ERR_PTR(-ENOMEM);
389 goto alloc_mw_exit0;
390 }
391
392 h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
393 e_pd->fw_pd, &hipzout);
394 if (h_ret != H_SUCCESS) {
395 ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli "
396 "shca=%p hca_hndl=%llx mw=%p",
397 h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
398 ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
399 goto alloc_mw_exit1;
400 }
401 /* successful MW allocation */
402 e_mw->ipz_mw_handle = hipzout.handle;
403 e_mw->ib_mw.rkey = hipzout.rkey;
404 return &e_mw->ib_mw;
405
406alloc_mw_exit1:
407 ehca_mw_delete(e_mw);
408alloc_mw_exit0:
409 if (IS_ERR(ib_mw))
410 ehca_err(pd->device, "h_ret=%li pd=%p", PTR_ERR(ib_mw), pd);
411 return ib_mw;
412} /* end ehca_alloc_mw() */
413
414/*----------------------------------------------------------------------*/
415
416int ehca_dealloc_mw(struct ib_mw *mw)
417{
418 u64 h_ret;
419 struct ehca_shca *shca =
420 container_of(mw->device, struct ehca_shca, ib_device);
421 struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
422
423 h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
424 if (h_ret != H_SUCCESS) {
425 ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p "
426 "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
427 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
428 e_mw->ipz_mw_handle.handle);
429 return ehca2ib_return_code(h_ret);
430 }
431 /* successful deallocation */
432 ehca_mw_delete(e_mw);
433 return 0;
434} /* end ehca_dealloc_mw() */
435
436/*----------------------------------------------------------------------*/
437
438struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
439 int mr_access_flags,
440 struct ib_fmr_attr *fmr_attr)
441{
442 struct ib_fmr *ib_fmr;
443 struct ehca_shca *shca =
444 container_of(pd->device, struct ehca_shca, ib_device);
445 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
446 struct ehca_mr *e_fmr;
447 int ret;
448 u32 tmp_lkey, tmp_rkey;
449 struct ehca_mr_pginfo pginfo;
450 u64 hw_pgsize;
451
452 /* check other parameters */
453 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
454 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
455 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
456 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
457 /*
458 * Remote Write Access requires Local Write Access
459 * Remote Atomic Access requires Local Write Access
460 */
461 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
462 mr_access_flags);
463 ib_fmr = ERR_PTR(-EINVAL);
464 goto alloc_fmr_exit0;
465 }
466 if (mr_access_flags & IB_ACCESS_MW_BIND) {
467 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
468 mr_access_flags);
469 ib_fmr = ERR_PTR(-EINVAL);
470 goto alloc_fmr_exit0;
471 }
472 if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
473 ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
474 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
475 fmr_attr->max_pages, fmr_attr->max_maps,
476 fmr_attr->page_shift);
477 ib_fmr = ERR_PTR(-EINVAL);
478 goto alloc_fmr_exit0;
479 }
480
481 hw_pgsize = 1 << fmr_attr->page_shift;
482 if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) {
483 ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
484 fmr_attr->page_shift);
485 ib_fmr = ERR_PTR(-EINVAL);
486 goto alloc_fmr_exit0;
487 }
488
489 e_fmr = ehca_mr_new();
490 if (!e_fmr) {
491 ib_fmr = ERR_PTR(-ENOMEM);
492 goto alloc_fmr_exit0;
493 }
494 e_fmr->flags |= EHCA_MR_FLAG_FMR;
495
496 /* register MR on HCA */
497 memset(&pginfo, 0, sizeof(pginfo));
498 pginfo.hwpage_size = hw_pgsize;
499 /*
500 * pginfo.num_hwpages==0, ie register_rpages() will not be called
501 * but deferred to map_phys_fmr()
502 */
503 ret = ehca_reg_mr(shca, e_fmr, NULL,
504 fmr_attr->max_pages * (1 << fmr_attr->page_shift),
505 mr_access_flags, e_pd, &pginfo,
506 &tmp_lkey, &tmp_rkey, EHCA_REG_MR);
507 if (ret) {
508 ib_fmr = ERR_PTR(ret);
509 goto alloc_fmr_exit1;
510 }
511
512 /* successful */
513 e_fmr->hwpage_size = hw_pgsize;
514 e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
515 e_fmr->fmr_max_pages = fmr_attr->max_pages;
516 e_fmr->fmr_max_maps = fmr_attr->max_maps;
517 e_fmr->fmr_map_cnt = 0;
518 return &e_fmr->ib.ib_fmr;
519
520alloc_fmr_exit1:
521 ehca_mr_delete(e_fmr);
522alloc_fmr_exit0:
523 return ib_fmr;
524} /* end ehca_alloc_fmr() */
525
526/*----------------------------------------------------------------------*/
527
528int ehca_map_phys_fmr(struct ib_fmr *fmr,
529 u64 *page_list,
530 int list_len,
531 u64 iova)
532{
533 int ret;
534 struct ehca_shca *shca =
535 container_of(fmr->device, struct ehca_shca, ib_device);
536 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
537 struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
538 struct ehca_mr_pginfo pginfo;
539 u32 tmp_lkey, tmp_rkey;
540
541 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
542 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
543 e_fmr, e_fmr->flags);
544 ret = -EINVAL;
545 goto map_phys_fmr_exit0;
546 }
547 ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
548 if (ret)
549 goto map_phys_fmr_exit0;
550 if (iova % e_fmr->fmr_page_size) {
551 /* only whole-numbered pages */
552 ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
553 iova, e_fmr->fmr_page_size);
554 ret = -EINVAL;
555 goto map_phys_fmr_exit0;
556 }
557 if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
558 /* HCAD does not limit the maps, however trace this anyway */
559 ehca_info(fmr->device, "map limit exceeded, fmr=%p "
560 "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
561 fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
562 }
563
564 memset(&pginfo, 0, sizeof(pginfo));
565 pginfo.type = EHCA_MR_PGI_FMR;
566 pginfo.num_kpages = list_len;
567 pginfo.hwpage_size = e_fmr->hwpage_size;
568 pginfo.num_hwpages =
569 list_len * e_fmr->fmr_page_size / pginfo.hwpage_size;
570 pginfo.u.fmr.page_list = page_list;
571 pginfo.next_hwpage =
572 (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size;
573 pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
574
575 ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
576 list_len * e_fmr->fmr_page_size,
577 e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
578 if (ret)
579 goto map_phys_fmr_exit0;
580
581 /* successful reregistration */
582 e_fmr->fmr_map_cnt++;
583 e_fmr->ib.ib_fmr.lkey = tmp_lkey;
584 e_fmr->ib.ib_fmr.rkey = tmp_rkey;
585 return 0;
586
587map_phys_fmr_exit0:
588 if (ret)
589 ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x "
590 "iova=%llx", ret, fmr, page_list, list_len, iova);
591 return ret;
592} /* end ehca_map_phys_fmr() */
593
594/*----------------------------------------------------------------------*/
595
596int ehca_unmap_fmr(struct list_head *fmr_list)
597{
598 int ret = 0;
599 struct ib_fmr *ib_fmr;
600 struct ehca_shca *shca = NULL;
601 struct ehca_shca *prev_shca;
602 struct ehca_mr *e_fmr;
603 u32 num_fmr = 0;
604 u32 unmap_fmr_cnt = 0;
605
606 /* check all FMR belong to same SHCA, and check internal flag */
607 list_for_each_entry(ib_fmr, fmr_list, list) {
608 prev_shca = shca;
609 shca = container_of(ib_fmr->device, struct ehca_shca,
610 ib_device);
611 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
612 if ((shca != prev_shca) && prev_shca) {
613 ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
614 "prev_shca=%p e_fmr=%p",
615 shca, prev_shca, e_fmr);
616 ret = -EINVAL;
617 goto unmap_fmr_exit0;
618 }
619 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
620 ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
621 "e_fmr->flags=%x", e_fmr, e_fmr->flags);
622 ret = -EINVAL;
623 goto unmap_fmr_exit0;
624 }
625 num_fmr++;
626 }
627
628 /* loop over all FMRs to unmap */
629 list_for_each_entry(ib_fmr, fmr_list, list) {
630 unmap_fmr_cnt++;
631 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
632 shca = container_of(ib_fmr->device, struct ehca_shca,
633 ib_device);
634 ret = ehca_unmap_one_fmr(shca, e_fmr);
635 if (ret) {
636 /* unmap failed, stop unmapping of rest of FMRs */
637 ehca_err(&shca->ib_device, "unmap of one FMR failed, "
638 "stop rest, e_fmr=%p num_fmr=%x "
639 "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
640 unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
641 goto unmap_fmr_exit0;
642 }
643 }
644
645unmap_fmr_exit0:
646 if (ret)
647 ehca_gen_err("ret=%i fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
648 ret, fmr_list, num_fmr, unmap_fmr_cnt);
649 return ret;
650} /* end ehca_unmap_fmr() */
651
652/*----------------------------------------------------------------------*/
653
654int ehca_dealloc_fmr(struct ib_fmr *fmr)
655{
656 int ret;
657 u64 h_ret;
658 struct ehca_shca *shca =
659 container_of(fmr->device, struct ehca_shca, ib_device);
660 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
661
662 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
663 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
664 e_fmr, e_fmr->flags);
665 ret = -EINVAL;
666 goto free_fmr_exit0;
667 }
668
669 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
670 if (h_ret != H_SUCCESS) {
671 ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p "
672 "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
673 h_ret, e_fmr, shca->ipz_hca_handle.handle,
674 e_fmr->ipz_mr_handle.handle, fmr->lkey);
675 ret = ehca2ib_return_code(h_ret);
676 goto free_fmr_exit0;
677 }
678 /* successful deregistration */
679 ehca_mr_delete(e_fmr);
680 return 0;
681
682free_fmr_exit0:
683 if (ret)
684 ehca_err(&shca->ib_device, "ret=%i fmr=%p", ret, fmr);
685 return ret;
686} /* end ehca_dealloc_fmr() */
687
688/*----------------------------------------------------------------------*/
689
690static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
691 struct ehca_mr *e_mr,
692 struct ehca_mr_pginfo *pginfo);
693
694int ehca_reg_mr(struct ehca_shca *shca,
695 struct ehca_mr *e_mr,
696 u64 *iova_start,
697 u64 size,
698 int acl,
699 struct ehca_pd *e_pd,
700 struct ehca_mr_pginfo *pginfo,
701 u32 *lkey, /*OUT*/
702 u32 *rkey, /*OUT*/
703 enum ehca_reg_type reg_type)
704{
705 int ret;
706 u64 h_ret;
707 u32 hipz_acl;
708 struct ehca_mr_hipzout_parms hipzout;
709
710 ehca_mrmw_map_acl(acl, &hipz_acl);
711 ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
712 if (ehca_use_hp_mr == 1)
713 hipz_acl |= 0x00000001;
714
715 h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
716 (u64)iova_start, size, hipz_acl,
717 e_pd->fw_pd, &hipzout);
718 if (h_ret != H_SUCCESS) {
719 ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli "
720 "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle);
721 ret = ehca2ib_return_code(h_ret);
722 goto ehca_reg_mr_exit0;
723 }
724
725 e_mr->ipz_mr_handle = hipzout.handle;
726
727 if (reg_type == EHCA_REG_BUSMAP_MR)
728 ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
729 else if (reg_type == EHCA_REG_MR)
730 ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
731 else
732 ret = -EINVAL;
733
734 if (ret)
735 goto ehca_reg_mr_exit1;
736
737 /* successful registration */
738 e_mr->num_kpages = pginfo->num_kpages;
739 e_mr->num_hwpages = pginfo->num_hwpages;
740 e_mr->hwpage_size = pginfo->hwpage_size;
741 e_mr->start = iova_start;
742 e_mr->size = size;
743 e_mr->acl = acl;
744 *lkey = hipzout.lkey;
745 *rkey = hipzout.rkey;
746 return 0;
747
748ehca_reg_mr_exit1:
749 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
750 if (h_ret != H_SUCCESS) {
751 ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p "
752 "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
753 "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
754 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
755 hipzout.lkey, pginfo, pginfo->num_kpages,
756 pginfo->num_hwpages, ret);
757 ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
758 "not recoverable");
759 }
760ehca_reg_mr_exit0:
761 if (ret)
762 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
763 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
764 "num_kpages=%llx num_hwpages=%llx",
765 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
766 pginfo->num_kpages, pginfo->num_hwpages);
767 return ret;
768} /* end ehca_reg_mr() */
769
770/*----------------------------------------------------------------------*/
771
772int ehca_reg_mr_rpages(struct ehca_shca *shca,
773 struct ehca_mr *e_mr,
774 struct ehca_mr_pginfo *pginfo)
775{
776 int ret = 0;
777 u64 h_ret;
778 u32 rnum;
779 u64 rpage;
780 u32 i;
781 u64 *kpage;
782
783 if (!pginfo->num_hwpages) /* in case of fmr */
784 return 0;
785
786 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
787 if (!kpage) {
788 ehca_err(&shca->ib_device, "kpage alloc failed");
789 ret = -ENOMEM;
790 goto ehca_reg_mr_rpages_exit0;
791 }
792
793 /* max MAX_RPAGES ehca mr pages per register call */
794 for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
795
796 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
797 rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
798 if (rnum == 0)
799 rnum = MAX_RPAGES; /* last shot is full */
800 } else
801 rnum = MAX_RPAGES;
802
803 ret = ehca_set_pagebuf(pginfo, rnum, kpage);
804 if (ret) {
805 ehca_err(&shca->ib_device, "ehca_set_pagebuf "
806 "bad rc, ret=%i rnum=%x kpage=%p",
807 ret, rnum, kpage);
808 goto ehca_reg_mr_rpages_exit1;
809 }
810
811 if (rnum > 1) {
812 rpage = __pa(kpage);
813 if (!rpage) {
814 ehca_err(&shca->ib_device, "kpage=%p i=%x",
815 kpage, i);
816 ret = -EFAULT;
817 goto ehca_reg_mr_rpages_exit1;
818 }
819 } else
820 rpage = *kpage;
821
822 h_ret = hipz_h_register_rpage_mr(
823 shca->ipz_hca_handle, e_mr,
824 ehca_encode_hwpage_size(pginfo->hwpage_size),
825 0, rpage, rnum);
826
827 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
828 /*
829 * check for 'registration complete'==H_SUCCESS
830 * and for 'page registered'==H_PAGE_REGISTERED
831 */
832 if (h_ret != H_SUCCESS) {
833 ehca_err(&shca->ib_device, "last "
834 "hipz_reg_rpage_mr failed, h_ret=%lli "
835 "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
836 " lkey=%x", h_ret, e_mr, i,
837 shca->ipz_hca_handle.handle,
838 e_mr->ipz_mr_handle.handle,
839 e_mr->ib.ib_mr.lkey);
840 ret = ehca2ib_return_code(h_ret);
841 break;
842 } else
843 ret = 0;
844 } else if (h_ret != H_PAGE_REGISTERED) {
845 ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
846 "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
847 "mr_hndl=%llx", h_ret, e_mr, i,
848 e_mr->ib.ib_mr.lkey,
849 shca->ipz_hca_handle.handle,
850 e_mr->ipz_mr_handle.handle);
851 ret = ehca2ib_return_code(h_ret);
852 break;
853 } else
854 ret = 0;
855 } /* end for(i) */
856
857
858ehca_reg_mr_rpages_exit1:
859 ehca_free_fw_ctrlblock(kpage);
860ehca_reg_mr_rpages_exit0:
861 if (ret)
862 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p "
863 "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr,
864 pginfo, pginfo->num_kpages, pginfo->num_hwpages);
865 return ret;
866} /* end ehca_reg_mr_rpages() */
867
868/*----------------------------------------------------------------------*/
869
870inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
871 struct ehca_mr *e_mr,
872 u64 *iova_start,
873 u64 size,
874 u32 acl,
875 struct ehca_pd *e_pd,
876 struct ehca_mr_pginfo *pginfo,
877 u32 *lkey, /*OUT*/
878 u32 *rkey) /*OUT*/
879{
880 int ret;
881 u64 h_ret;
882 u32 hipz_acl;
883 u64 *kpage;
884 u64 rpage;
885 struct ehca_mr_pginfo pginfo_save;
886 struct ehca_mr_hipzout_parms hipzout;
887
888 ehca_mrmw_map_acl(acl, &hipz_acl);
889 ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
890
891 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
892 if (!kpage) {
893 ehca_err(&shca->ib_device, "kpage alloc failed");
894 ret = -ENOMEM;
895 goto ehca_rereg_mr_rereg1_exit0;
896 }
897
898 pginfo_save = *pginfo;
899 ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
900 if (ret) {
901 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
902 "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
903 "kpage=%p", e_mr, pginfo, pginfo->type,
904 pginfo->num_kpages, pginfo->num_hwpages, kpage);
905 goto ehca_rereg_mr_rereg1_exit1;
906 }
907 rpage = __pa(kpage);
908 if (!rpage) {
909 ehca_err(&shca->ib_device, "kpage=%p", kpage);
910 ret = -EFAULT;
911 goto ehca_rereg_mr_rereg1_exit1;
912 }
913 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
914 (u64)iova_start, size, hipz_acl,
915 e_pd->fw_pd, rpage, &hipzout);
916 if (h_ret != H_SUCCESS) {
917 /*
918 * reregistration unsuccessful, try it again with the 3 hCalls,
919 * e.g. this is required in case H_MR_CONDITION
920 * (MW bound or MR is shared)
921 */
922 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
923 "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr);
924 *pginfo = pginfo_save;
925 ret = -EAGAIN;
926 } else if ((u64 *)hipzout.vaddr != iova_start) {
927 ehca_err(&shca->ib_device, "PHYP changed iova_start in "
928 "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
929 "mr_handle=%llx lkey=%x lkey_out=%x", iova_start,
930 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
931 e_mr->ib.ib_mr.lkey, hipzout.lkey);
932 ret = -EFAULT;
933 } else {
934 /*
935 * successful reregistration
936 * note: start and start_out are identical for eServer HCAs
937 */
938 e_mr->num_kpages = pginfo->num_kpages;
939 e_mr->num_hwpages = pginfo->num_hwpages;
940 e_mr->hwpage_size = pginfo->hwpage_size;
941 e_mr->start = iova_start;
942 e_mr->size = size;
943 e_mr->acl = acl;
944 *lkey = hipzout.lkey;
945 *rkey = hipzout.rkey;
946 }
947
948ehca_rereg_mr_rereg1_exit1:
949 ehca_free_fw_ctrlblock(kpage);
950ehca_rereg_mr_rereg1_exit0:
951 if ( ret && (ret != -EAGAIN) )
952 ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x "
953 "pginfo=%p num_kpages=%llx num_hwpages=%llx",
954 ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
955 pginfo->num_hwpages);
956 return ret;
957} /* end ehca_rereg_mr_rereg1() */
958
959/*----------------------------------------------------------------------*/
960
961int ehca_rereg_mr(struct ehca_shca *shca,
962 struct ehca_mr *e_mr,
963 u64 *iova_start,
964 u64 size,
965 int acl,
966 struct ehca_pd *e_pd,
967 struct ehca_mr_pginfo *pginfo,
968 u32 *lkey,
969 u32 *rkey)
970{
971 int ret = 0;
972 u64 h_ret;
973 int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
974 int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
975
976 /* first determine reregistration hCall(s) */
977 if ((pginfo->num_hwpages > MAX_RPAGES) ||
978 (e_mr->num_hwpages > MAX_RPAGES) ||
979 (pginfo->num_hwpages > e_mr->num_hwpages)) {
980 ehca_dbg(&shca->ib_device, "Rereg3 case, "
981 "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
982 pginfo->num_hwpages, e_mr->num_hwpages);
983 rereg_1_hcall = 0;
984 rereg_3_hcall = 1;
985 }
986
987 if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
988 rereg_1_hcall = 0;
989 rereg_3_hcall = 1;
990 e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
991 ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
992 e_mr);
993 }
994
995 if (rereg_1_hcall) {
996 ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
997 acl, e_pd, pginfo, lkey, rkey);
998 if (ret) {
999 if (ret == -EAGAIN)
1000 rereg_3_hcall = 1;
1001 else
1002 goto ehca_rereg_mr_exit0;
1003 }
1004 }
1005
1006 if (rereg_3_hcall) {
1007 struct ehca_mr save_mr;
1008
1009 /* first deregister old MR */
1010 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1011 if (h_ret != H_SUCCESS) {
1012 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1013 "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
1014 "mr->lkey=%x",
1015 h_ret, e_mr, shca->ipz_hca_handle.handle,
1016 e_mr->ipz_mr_handle.handle,
1017 e_mr->ib.ib_mr.lkey);
1018 ret = ehca2ib_return_code(h_ret);
1019 goto ehca_rereg_mr_exit0;
1020 }
1021 /* clean ehca_mr_t, without changing struct ib_mr and lock */
1022 save_mr = *e_mr;
1023 ehca_mr_deletenew(e_mr);
1024
1025 /* set some MR values */
1026 e_mr->flags = save_mr.flags;
1027 e_mr->hwpage_size = save_mr.hwpage_size;
1028 e_mr->fmr_page_size = save_mr.fmr_page_size;
1029 e_mr->fmr_max_pages = save_mr.fmr_max_pages;
1030 e_mr->fmr_max_maps = save_mr.fmr_max_maps;
1031 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
1032
1033 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
1034 e_pd, pginfo, lkey, rkey, EHCA_REG_MR);
1035 if (ret) {
1036 u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
1037 memcpy(&e_mr->flags, &(save_mr.flags),
1038 sizeof(struct ehca_mr) - offset);
1039 goto ehca_rereg_mr_exit0;
1040 }
1041 }
1042
1043ehca_rereg_mr_exit0:
1044 if (ret)
1045 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
1046 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
1047 "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
1048 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
1049 acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
1050 rereg_1_hcall, rereg_3_hcall);
1051 return ret;
1052} /* end ehca_rereg_mr() */
1053
1054/*----------------------------------------------------------------------*/
1055
1056int ehca_unmap_one_fmr(struct ehca_shca *shca,
1057 struct ehca_mr *e_fmr)
1058{
1059 int ret = 0;
1060 u64 h_ret;
1061 struct ehca_pd *e_pd =
1062 container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
1063 struct ehca_mr save_fmr;
1064 u32 tmp_lkey, tmp_rkey;
1065 struct ehca_mr_pginfo pginfo;
1066 struct ehca_mr_hipzout_parms hipzout;
1067 struct ehca_mr save_mr;
1068
1069 if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
1070 /*
1071 * note: after using rereg hcall with len=0,
1072 * rereg hcall must be used again for registering pages
1073 */
1074 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
1075 0, 0, e_pd->fw_pd, 0, &hipzout);
1076 if (h_ret == H_SUCCESS) {
1077 /* successful reregistration */
1078 e_fmr->start = NULL;
1079 e_fmr->size = 0;
1080 tmp_lkey = hipzout.lkey;
1081 tmp_rkey = hipzout.rkey;
1082 return 0;
1083 }
1084 /*
1085 * should not happen, because length checked above,
1086 * FMRs are not shared and no MW bound to FMRs
1087 */
1088 ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
1089 "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
1090 "mr_hndl=%llx lkey=%x lkey_out=%x",
1091 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1092 e_fmr->ipz_mr_handle.handle,
1093 e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
1094 /* try free and rereg */
1095 }
1096
1097 /* first free old FMR */
1098 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
1099 if (h_ret != H_SUCCESS) {
1100 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1101 "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
1102 "lkey=%x",
1103 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1104 e_fmr->ipz_mr_handle.handle,
1105 e_fmr->ib.ib_fmr.lkey);
1106 ret = ehca2ib_return_code(h_ret);
1107 goto ehca_unmap_one_fmr_exit0;
1108 }
1109 /* clean ehca_mr_t, without changing lock */
1110 save_fmr = *e_fmr;
1111 ehca_mr_deletenew(e_fmr);
1112
1113 /* set some MR values */
1114 e_fmr->flags = save_fmr.flags;
1115 e_fmr->hwpage_size = save_fmr.hwpage_size;
1116 e_fmr->fmr_page_size = save_fmr.fmr_page_size;
1117 e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
1118 e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
1119 e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
1120 e_fmr->acl = save_fmr.acl;
1121
1122 memset(&pginfo, 0, sizeof(pginfo));
1123 pginfo.type = EHCA_MR_PGI_FMR;
1124 ret = ehca_reg_mr(shca, e_fmr, NULL,
1125 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
1126 e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
1127 &tmp_rkey, EHCA_REG_MR);
1128 if (ret) {
1129 u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
1130 memcpy(&e_fmr->flags, &(save_mr.flags),
1131 sizeof(struct ehca_mr) - offset);
1132 }
1133
1134ehca_unmap_one_fmr_exit0:
1135 if (ret)
1136 ehca_err(&shca->ib_device, "ret=%i tmp_lkey=%x tmp_rkey=%x "
1137 "fmr_max_pages=%x",
1138 ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
1139 return ret;
1140} /* end ehca_unmap_one_fmr() */
1141
1142/*----------------------------------------------------------------------*/
1143
1144int ehca_reg_smr(struct ehca_shca *shca,
1145 struct ehca_mr *e_origmr,
1146 struct ehca_mr *e_newmr,
1147 u64 *iova_start,
1148 int acl,
1149 struct ehca_pd *e_pd,
1150 u32 *lkey, /*OUT*/
1151 u32 *rkey) /*OUT*/
1152{
1153 int ret = 0;
1154 u64 h_ret;
1155 u32 hipz_acl;
1156 struct ehca_mr_hipzout_parms hipzout;
1157
1158 ehca_mrmw_map_acl(acl, &hipz_acl);
1159 ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
1160
1161 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1162 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1163 &hipzout);
1164 if (h_ret != H_SUCCESS) {
1165 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
1166 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1167 "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1168 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
1169 shca->ipz_hca_handle.handle,
1170 e_origmr->ipz_mr_handle.handle,
1171 e_origmr->ib.ib_mr.lkey);
1172 ret = ehca2ib_return_code(h_ret);
1173 goto ehca_reg_smr_exit0;
1174 }
1175 /* successful registration */
1176 e_newmr->num_kpages = e_origmr->num_kpages;
1177 e_newmr->num_hwpages = e_origmr->num_hwpages;
1178 e_newmr->hwpage_size = e_origmr->hwpage_size;
1179 e_newmr->start = iova_start;
1180 e_newmr->size = e_origmr->size;
1181 e_newmr->acl = acl;
1182 e_newmr->ipz_mr_handle = hipzout.handle;
1183 *lkey = hipzout.lkey;
1184 *rkey = hipzout.rkey;
1185 return 0;
1186
1187ehca_reg_smr_exit0:
1188 if (ret)
1189 ehca_err(&shca->ib_device, "ret=%i shca=%p e_origmr=%p "
1190 "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1191 ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
1192 return ret;
1193} /* end ehca_reg_smr() */
1194
1195/*----------------------------------------------------------------------*/
1196static inline void *ehca_calc_sectbase(int top, int dir, int idx)
1197{
1198 unsigned long ret = idx;
1199 ret |= dir << EHCA_DIR_INDEX_SHIFT;
1200 ret |= top << EHCA_TOP_INDEX_SHIFT;
1201 return __va(ret << SECTION_SIZE_BITS);
1202}
1203
1204#define ehca_bmap_valid(entry) \
1205 ((u64)entry != (u64)EHCA_INVAL_ADDR)
1206
1207static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
1208 struct ehca_shca *shca, struct ehca_mr *mr,
1209 struct ehca_mr_pginfo *pginfo)
1210{
1211 u64 h_ret = 0;
1212 unsigned long page = 0;
1213 u64 rpage = __pa(kpage);
1214 int page_count;
1215
1216 void *sectbase = ehca_calc_sectbase(top, dir, idx);
1217 if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) {
1218 ehca_err(&shca->ib_device, "reg_mr_section will probably fail:"
1219 "hwpage_size does not fit to "
1220 "section start address");
1221 }
1222 page_count = EHCA_SECTSIZE / pginfo->hwpage_size;
1223
1224 while (page < page_count) {
1225 u64 rnum;
1226 for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count);
1227 rnum++) {
1228 void *pg = sectbase + ((page++) * pginfo->hwpage_size);
1229 kpage[rnum] = __pa(pg);
1230 }
1231
1232 h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr,
1233 ehca_encode_hwpage_size(pginfo->hwpage_size),
1234 0, rpage, rnum);
1235
1236 if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) {
1237 ehca_err(&shca->ib_device, "register_rpage_mr failed");
1238 return h_ret;
1239 }
1240 }
1241 return h_ret;
1242}
1243
1244static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage,
1245 struct ehca_shca *shca, struct ehca_mr *mr,
1246 struct ehca_mr_pginfo *pginfo)
1247{
1248 u64 hret = H_SUCCESS;
1249 int idx;
1250
1251 for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) {
1252 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx]))
1253 continue;
1254
1255 hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr,
1256 pginfo);
1257 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
1258 return hret;
1259 }
1260 return hret;
1261}
1262
1263static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca,
1264 struct ehca_mr *mr,
1265 struct ehca_mr_pginfo *pginfo)
1266{
1267 u64 hret = H_SUCCESS;
1268 int dir;
1269
1270 for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
1271 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
1272 continue;
1273
1274 hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
1275 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
1276 return hret;
1277 }
1278 return hret;
1279}
1280
1281/* register internal max-MR to internal SHCA */
1282int ehca_reg_internal_maxmr(
1283 struct ehca_shca *shca,
1284 struct ehca_pd *e_pd,
1285 struct ehca_mr **e_maxmr) /*OUT*/
1286{
1287 int ret;
1288 struct ehca_mr *e_mr;
1289 u64 *iova_start;
1290 u64 size_maxmr;
1291 struct ehca_mr_pginfo pginfo;
1292 u32 num_kpages;
1293 u32 num_hwpages;
1294 u64 hw_pgsize;
1295
1296 if (!ehca_bmap) {
1297 ret = -EFAULT;
1298 goto ehca_reg_internal_maxmr_exit0;
1299 }
1300
1301 e_mr = ehca_mr_new();
1302 if (!e_mr) {
1303 ehca_err(&shca->ib_device, "out of memory");
1304 ret = -ENOMEM;
1305 goto ehca_reg_internal_maxmr_exit0;
1306 }
1307 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
1308
1309 /* register internal max-MR on HCA */
1310 size_maxmr = ehca_mr_len;
1311 iova_start = (u64 *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START));
1312 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
1313 PAGE_SIZE);
1314 hw_pgsize = ehca_get_max_hwpage_size(shca);
1315 num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr,
1316 hw_pgsize);
1317
1318 memset(&pginfo, 0, sizeof(pginfo));
1319 pginfo.type = EHCA_MR_PGI_PHYS;
1320 pginfo.num_kpages = num_kpages;
1321 pginfo.num_hwpages = num_hwpages;
1322 pginfo.hwpage_size = hw_pgsize;
1323 pginfo.u.phy.addr = 0;
1324 pginfo.u.phy.size = size_maxmr;
1325
1326 ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
1327 &pginfo, &e_mr->ib.ib_mr.lkey,
1328 &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR);
1329 if (ret) {
1330 ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1331 "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
1332 "num_hwpages=%x", e_mr, iova_start, size_maxmr,
1333 num_kpages, num_hwpages);
1334 goto ehca_reg_internal_maxmr_exit1;
1335 }
1336
1337 /* successful registration of all pages */
1338 e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
1339 e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
1340 e_mr->ib.ib_mr.uobject = NULL;
1341 atomic_inc(&(e_pd->ib_pd.usecnt));
1342 *e_maxmr = e_mr;
1343 return 0;
1344
1345ehca_reg_internal_maxmr_exit1:
1346 ehca_mr_delete(e_mr);
1347ehca_reg_internal_maxmr_exit0:
1348 if (ret)
1349 ehca_err(&shca->ib_device, "ret=%i shca=%p e_pd=%p e_maxmr=%p",
1350 ret, shca, e_pd, e_maxmr);
1351 return ret;
1352} /* end ehca_reg_internal_maxmr() */
1353
1354/*----------------------------------------------------------------------*/
1355
1356int ehca_reg_maxmr(struct ehca_shca *shca,
1357 struct ehca_mr *e_newmr,
1358 u64 *iova_start,
1359 int acl,
1360 struct ehca_pd *e_pd,
1361 u32 *lkey,
1362 u32 *rkey)
1363{
1364 u64 h_ret;
1365 struct ehca_mr *e_origmr = shca->maxmr;
1366 u32 hipz_acl;
1367 struct ehca_mr_hipzout_parms hipzout;
1368
1369 ehca_mrmw_map_acl(acl, &hipz_acl);
1370 ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
1371
1372 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1373 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1374 &hipzout);
1375 if (h_ret != H_SUCCESS) {
1376 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
1377 "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1378 h_ret, e_origmr, shca->ipz_hca_handle.handle,
1379 e_origmr->ipz_mr_handle.handle,
1380 e_origmr->ib.ib_mr.lkey);
1381 return ehca2ib_return_code(h_ret);
1382 }
1383 /* successful registration */
1384 e_newmr->num_kpages = e_origmr->num_kpages;
1385 e_newmr->num_hwpages = e_origmr->num_hwpages;
1386 e_newmr->hwpage_size = e_origmr->hwpage_size;
1387 e_newmr->start = iova_start;
1388 e_newmr->size = e_origmr->size;
1389 e_newmr->acl = acl;
1390 e_newmr->ipz_mr_handle = hipzout.handle;
1391 *lkey = hipzout.lkey;
1392 *rkey = hipzout.rkey;
1393 return 0;
1394} /* end ehca_reg_maxmr() */
1395
1396/*----------------------------------------------------------------------*/
1397
1398int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
1399{
1400 int ret;
1401 struct ehca_mr *e_maxmr;
1402 struct ib_pd *ib_pd;
1403
1404 if (!shca->maxmr) {
1405 ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
1406 ret = -EINVAL;
1407 goto ehca_dereg_internal_maxmr_exit0;
1408 }
1409
1410 e_maxmr = shca->maxmr;
1411 ib_pd = e_maxmr->ib.ib_mr.pd;
1412 shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
1413
1414 ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
1415 if (ret) {
1416 ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
1417 "ret=%i e_maxmr=%p shca=%p lkey=%x",
1418 ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
1419 shca->maxmr = e_maxmr;
1420 goto ehca_dereg_internal_maxmr_exit0;
1421 }
1422
1423 atomic_dec(&ib_pd->usecnt);
1424
1425ehca_dereg_internal_maxmr_exit0:
1426 if (ret)
1427 ehca_err(&shca->ib_device, "ret=%i shca=%p shca->maxmr=%p",
1428 ret, shca, shca->maxmr);
1429 return ret;
1430} /* end ehca_dereg_internal_maxmr() */
1431
1432/*----------------------------------------------------------------------*/
1433
1434/* check page list of map FMR verb for validness */
1435int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
1436 u64 *page_list,
1437 int list_len)
1438{
1439 u32 i;
1440 u64 *page;
1441
1442 if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
1443 ehca_gen_err("bad list_len, list_len=%x "
1444 "e_fmr->fmr_max_pages=%x fmr=%p",
1445 list_len, e_fmr->fmr_max_pages, e_fmr);
1446 return -EINVAL;
1447 }
1448
1449 /* each page must be aligned */
1450 page = page_list;
1451 for (i = 0; i < list_len; i++) {
1452 if (*page % e_fmr->fmr_page_size) {
1453 ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p "
1454 "fmr_page_size=%x", i, *page, page, e_fmr,
1455 e_fmr->fmr_page_size);
1456 return -EINVAL;
1457 }
1458 page++;
1459 }
1460
1461 return 0;
1462} /* end ehca_fmr_check_page_list() */
1463
1464/*----------------------------------------------------------------------*/
1465
1466/* PAGE_SIZE >= pginfo->hwpage_size */
1467static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
1468 u32 number,
1469 u64 *kpage)
1470{
1471 int ret = 0;
1472 u64 pgaddr;
1473 u32 j = 0;
1474 int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
1475 struct scatterlist **sg = &pginfo->u.usr.next_sg;
1476
1477 while (*sg != NULL) {
1478 pgaddr = page_to_pfn(sg_page(*sg))
1479 << PAGE_SHIFT;
1480 *kpage = pgaddr + (pginfo->next_hwpage *
1481 pginfo->hwpage_size);
1482 if (!(*kpage)) {
1483 ehca_gen_err("pgaddr=%llx "
1484 "sg_dma_address=%llx "
1485 "entry=%llx next_hwpage=%llx",
1486 pgaddr, (u64)sg_dma_address(*sg),
1487 pginfo->u.usr.next_nmap,
1488 pginfo->next_hwpage);
1489 return -EFAULT;
1490 }
1491 (pginfo->hwpage_cnt)++;
1492 (pginfo->next_hwpage)++;
1493 kpage++;
1494 if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
1495 (pginfo->kpage_cnt)++;
1496 (pginfo->u.usr.next_nmap)++;
1497 pginfo->next_hwpage = 0;
1498 *sg = sg_next(*sg);
1499 }
1500 j++;
1501 if (j >= number)
1502 break;
1503 }
1504
1505 return ret;
1506}
1507
1508/*
1509 * check given pages for contiguous layout
1510 * last page addr is returned in prev_pgaddr for further check
1511 */
1512static int ehca_check_kpages_per_ate(struct scatterlist **sg,
1513 int num_pages,
1514 u64 *prev_pgaddr)
1515{
1516 for (; *sg && num_pages > 0; *sg = sg_next(*sg), num_pages--) {
1517 u64 pgaddr = page_to_pfn(sg_page(*sg)) << PAGE_SHIFT;
1518 if (ehca_debug_level >= 3)
1519 ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
1520 *(u64 *)__va(pgaddr));
1521 if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
1522 ehca_gen_err("uncontiguous page found pgaddr=%llx "
1523 "prev_pgaddr=%llx entries_left_in_hwpage=%x",
1524 pgaddr, *prev_pgaddr, num_pages);
1525 return -EINVAL;
1526 }
1527 *prev_pgaddr = pgaddr;
1528 }
1529 return 0;
1530}
1531
1532/* PAGE_SIZE < pginfo->hwpage_size */
1533static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1534 u32 number,
1535 u64 *kpage)
1536{
1537 int ret = 0;
1538 u64 pgaddr, prev_pgaddr;
1539 u32 j = 0;
1540 int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
1541 int nr_kpages = kpages_per_hwpage;
1542 struct scatterlist **sg = &pginfo->u.usr.next_sg;
1543
1544 while (*sg != NULL) {
1545
1546 if (nr_kpages == kpages_per_hwpage) {
1547 pgaddr = (page_to_pfn(sg_page(*sg))
1548 << PAGE_SHIFT);
1549 *kpage = pgaddr;
1550 if (!(*kpage)) {
1551 ehca_gen_err("pgaddr=%llx entry=%llx",
1552 pgaddr, pginfo->u.usr.next_nmap);
1553 ret = -EFAULT;
1554 return ret;
1555 }
1556 /*
1557 * The first page in a hwpage must be aligned;
1558 * the first MR page is exempt from this rule.
1559 */
1560 if (pgaddr & (pginfo->hwpage_size - 1)) {
1561 if (pginfo->hwpage_cnt) {
1562 ehca_gen_err(
1563 "invalid alignment "
1564 "pgaddr=%llx entry=%llx "
1565 "mr_pgsize=%llx",
1566 pgaddr, pginfo->u.usr.next_nmap,
1567 pginfo->hwpage_size);
1568 ret = -EFAULT;
1569 return ret;
1570 }
1571 /* first MR page */
1572 pginfo->kpage_cnt =
1573 (pgaddr &
1574 (pginfo->hwpage_size - 1)) >>
1575 PAGE_SHIFT;
1576 nr_kpages -= pginfo->kpage_cnt;
1577 *kpage = pgaddr &
1578 ~(pginfo->hwpage_size - 1);
1579 }
1580 if (ehca_debug_level >= 3) {
1581 u64 val = *(u64 *)__va(pgaddr);
1582 ehca_gen_dbg("kpage=%llx page=%llx "
1583 "value=%016llx",
1584 *kpage, pgaddr, val);
1585 }
1586 prev_pgaddr = pgaddr;
1587 *sg = sg_next(*sg);
1588 pginfo->kpage_cnt++;
1589 pginfo->u.usr.next_nmap++;
1590 nr_kpages--;
1591 if (!nr_kpages)
1592 goto next_kpage;
1593 continue;
1594 }
1595
1596 ret = ehca_check_kpages_per_ate(sg, nr_kpages,
1597 &prev_pgaddr);
1598 if (ret)
1599 return ret;
1600 pginfo->kpage_cnt += nr_kpages;
1601 pginfo->u.usr.next_nmap += nr_kpages;
1602
1603next_kpage:
1604 nr_kpages = kpages_per_hwpage;
1605 (pginfo->hwpage_cnt)++;
1606 kpage++;
1607 j++;
1608 if (j >= number)
1609 break;
1610 }
1611
1612 return ret;
1613}
1614
1615static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
1616 u32 number, u64 *kpage)
1617{
1618 int ret = 0;
1619 u64 addr = pginfo->u.phy.addr;
1620 u64 size = pginfo->u.phy.size;
1621 u64 num_hw, offs_hw;
1622 u32 i = 0;
1623
1624 num_hw = NUM_CHUNKS((addr % pginfo->hwpage_size) + size,
1625 pginfo->hwpage_size);
1626 offs_hw = (addr & ~(pginfo->hwpage_size - 1)) / pginfo->hwpage_size;
1627
1628 while (pginfo->next_hwpage < offs_hw + num_hw) {
1629 /* sanity check */
1630 if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
1631 (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
1632 ehca_gen_err("kpage_cnt >= num_kpages, "
1633 "kpage_cnt=%llx num_kpages=%llx "
1634 "hwpage_cnt=%llx "
1635 "num_hwpages=%llx i=%x",
1636 pginfo->kpage_cnt,
1637 pginfo->num_kpages,
1638 pginfo->hwpage_cnt,
1639 pginfo->num_hwpages, i);
1640 return -EFAULT;
1641 }
1642 *kpage = (addr & ~(pginfo->hwpage_size - 1)) +
1643 (pginfo->next_hwpage * pginfo->hwpage_size);
1644 if ( !(*kpage) && addr ) {
1645 ehca_gen_err("addr=%llx size=%llx "
1646 "next_hwpage=%llx", addr,
1647 size, pginfo->next_hwpage);
1648 return -EFAULT;
1649 }
1650 (pginfo->hwpage_cnt)++;
1651 (pginfo->next_hwpage)++;
1652 if (PAGE_SIZE >= pginfo->hwpage_size) {
1653 if (pginfo->next_hwpage %
1654 (PAGE_SIZE / pginfo->hwpage_size) == 0)
1655 (pginfo->kpage_cnt)++;
1656 } else
1657 pginfo->kpage_cnt += pginfo->hwpage_size /
1658 PAGE_SIZE;
1659 kpage++;
1660 i++;
1661 if (i >= number) break;
1662 }
1663 if (pginfo->next_hwpage >= offs_hw + num_hw) {
1664 pginfo->next_hwpage = 0;
1665 }
1666
1667 return ret;
1668}
1669
1670static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
1671 u32 number, u64 *kpage)
1672{
1673 int ret = 0;
1674 u64 *fmrlist;
1675 u32 i;
1676
1677 /* loop over desired page_list entries */
1678 fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
1679 for (i = 0; i < number; i++) {
1680 *kpage = (*fmrlist & ~(pginfo->hwpage_size - 1)) +
1681 pginfo->next_hwpage * pginfo->hwpage_size;
1682 if ( !(*kpage) ) {
1683 ehca_gen_err("*fmrlist=%llx fmrlist=%p "
1684 "next_listelem=%llx next_hwpage=%llx",
1685 *fmrlist, fmrlist,
1686 pginfo->u.fmr.next_listelem,
1687 pginfo->next_hwpage);
1688 return -EFAULT;
1689 }
1690 (pginfo->hwpage_cnt)++;
1691 if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) {
1692 if (pginfo->next_hwpage %
1693 (pginfo->u.fmr.fmr_pgsize /
1694 pginfo->hwpage_size) == 0) {
1695 (pginfo->kpage_cnt)++;
1696 (pginfo->u.fmr.next_listelem)++;
1697 fmrlist++;
1698 pginfo->next_hwpage = 0;
1699 } else
1700 (pginfo->next_hwpage)++;
1701 } else {
1702 unsigned int cnt_per_hwpage = pginfo->hwpage_size /
1703 pginfo->u.fmr.fmr_pgsize;
1704 unsigned int j;
1705 u64 prev = *kpage;
1706 /* check if adrs are contiguous */
1707 for (j = 1; j < cnt_per_hwpage; j++) {
1708 u64 p = fmrlist[j] & ~(pginfo->hwpage_size - 1);
1709 if (prev + pginfo->u.fmr.fmr_pgsize != p) {
1710 ehca_gen_err("uncontiguous fmr pages "
1711 "found prev=%llx p=%llx "
1712 "idx=%x", prev, p, i + j);
1713 return -EINVAL;
1714 }
1715 prev = p;
1716 }
1717 pginfo->kpage_cnt += cnt_per_hwpage;
1718 pginfo->u.fmr.next_listelem += cnt_per_hwpage;
1719 fmrlist += cnt_per_hwpage;
1720 }
1721 kpage++;
1722 }
1723 return ret;
1724}
1725
1726/* setup page buffer from page info */
1727int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
1728 u32 number,
1729 u64 *kpage)
1730{
1731 int ret;
1732
1733 switch (pginfo->type) {
1734 case EHCA_MR_PGI_PHYS:
1735 ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
1736 break;
1737 case EHCA_MR_PGI_USER:
1738 ret = PAGE_SIZE >= pginfo->hwpage_size ?
1739 ehca_set_pagebuf_user1(pginfo, number, kpage) :
1740 ehca_set_pagebuf_user2(pginfo, number, kpage);
1741 break;
1742 case EHCA_MR_PGI_FMR:
1743 ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
1744 break;
1745 default:
1746 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1747 ret = -EFAULT;
1748 break;
1749 }
1750 return ret;
1751} /* end ehca_set_pagebuf() */
1752
1753/*----------------------------------------------------------------------*/
1754
1755/*
1756 * check MR if it is a max-MR, i.e. uses whole memory
1757 * in case it's a max-MR 1 is returned, else 0
1758 */
1759int ehca_mr_is_maxmr(u64 size,
1760 u64 *iova_start)
1761{
1762 /* a MR is treated as max-MR only if it fits following: */
1763 if ((size == ehca_mr_len) &&
1764 (iova_start == (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)))) {
1765 ehca_gen_dbg("this is a max-MR");
1766 return 1;
1767 } else
1768 return 0;
1769} /* end ehca_mr_is_maxmr() */
1770
1771/*----------------------------------------------------------------------*/
1772
1773/* map access control for MR/MW. This routine is used for MR and MW. */
1774void ehca_mrmw_map_acl(int ib_acl,
1775 u32 *hipz_acl)
1776{
1777 *hipz_acl = 0;
1778 if (ib_acl & IB_ACCESS_REMOTE_READ)
1779 *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
1780 if (ib_acl & IB_ACCESS_REMOTE_WRITE)
1781 *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
1782 if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
1783 *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
1784 if (ib_acl & IB_ACCESS_LOCAL_WRITE)
1785 *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
1786 if (ib_acl & IB_ACCESS_MW_BIND)
1787 *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
1788} /* end ehca_mrmw_map_acl() */
1789
1790/*----------------------------------------------------------------------*/
1791
1792/* sets page size in hipz access control for MR/MW. */
1793void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/
1794{
1795 *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24);
1796} /* end ehca_mrmw_set_pgsize_hipz_acl() */
1797
1798/*----------------------------------------------------------------------*/
1799
1800/*
1801 * reverse map access control for MR/MW.
1802 * This routine is used for MR and MW.
1803 */
1804void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
1805 int *ib_acl) /*OUT*/
1806{
1807 *ib_acl = 0;
1808 if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
1809 *ib_acl |= IB_ACCESS_REMOTE_READ;
1810 if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
1811 *ib_acl |= IB_ACCESS_REMOTE_WRITE;
1812 if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
1813 *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
1814 if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
1815 *ib_acl |= IB_ACCESS_LOCAL_WRITE;
1816 if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
1817 *ib_acl |= IB_ACCESS_MW_BIND;
1818} /* end ehca_mrmw_reverse_map_acl() */
1819
1820
1821/*----------------------------------------------------------------------*/
1822
1823/*
1824 * MR destructor and constructor
1825 * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
1826 * except struct ib_mr and spinlock
1827 */
1828void ehca_mr_deletenew(struct ehca_mr *mr)
1829{
1830 mr->flags = 0;
1831 mr->num_kpages = 0;
1832 mr->num_hwpages = 0;
1833 mr->acl = 0;
1834 mr->start = NULL;
1835 mr->fmr_page_size = 0;
1836 mr->fmr_max_pages = 0;
1837 mr->fmr_max_maps = 0;
1838 mr->fmr_map_cnt = 0;
1839 memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
1840 memset(&mr->galpas, 0, sizeof(mr->galpas));
1841} /* end ehca_mr_deletenew() */
1842
1843int ehca_init_mrmw_cache(void)
1844{
1845 mr_cache = kmem_cache_create("ehca_cache_mr",
1846 sizeof(struct ehca_mr), 0,
1847 SLAB_HWCACHE_ALIGN,
1848 NULL);
1849 if (!mr_cache)
1850 return -ENOMEM;
1851 mw_cache = kmem_cache_create("ehca_cache_mw",
1852 sizeof(struct ehca_mw), 0,
1853 SLAB_HWCACHE_ALIGN,
1854 NULL);
1855 if (!mw_cache) {
1856 kmem_cache_destroy(mr_cache);
1857 mr_cache = NULL;
1858 return -ENOMEM;
1859 }
1860 return 0;
1861}
1862
1863void ehca_cleanup_mrmw_cache(void)
1864{
1865 kmem_cache_destroy(mr_cache);
1866 kmem_cache_destroy(mw_cache);
1867}
1868
1869static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap,
1870 int dir)
1871{
1872 if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) {
1873 ehca_top_bmap->dir[dir] =
1874 kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL);
1875 if (!ehca_top_bmap->dir[dir])
1876 return -ENOMEM;
1877 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
1878 memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE);
1879 }
1880 return 0;
1881}
1882
1883static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir)
1884{
1885 if (!ehca_bmap_valid(ehca_bmap->top[top])) {
1886 ehca_bmap->top[top] =
1887 kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL);
1888 if (!ehca_bmap->top[top])
1889 return -ENOMEM;
1890 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
1891 memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE);
1892 }
1893 return ehca_init_top_bmap(ehca_bmap->top[top], dir);
1894}
1895
1896static inline int ehca_calc_index(unsigned long i, unsigned long s)
1897{
1898 return (i >> s) & EHCA_INDEX_MASK;
1899}
1900
1901void ehca_destroy_busmap(void)
1902{
1903 int top, dir;
1904
1905 if (!ehca_bmap)
1906 return;
1907
1908 for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
1909 if (!ehca_bmap_valid(ehca_bmap->top[top]))
1910 continue;
1911 for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
1912 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
1913 continue;
1914
1915 kfree(ehca_bmap->top[top]->dir[dir]);
1916 }
1917
1918 kfree(ehca_bmap->top[top]);
1919 }
1920
1921 kfree(ehca_bmap);
1922 ehca_bmap = NULL;
1923}
1924
1925static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages)
1926{
1927 unsigned long i, start_section, end_section;
1928 int top, dir, idx;
1929
1930 if (!nr_pages)
1931 return 0;
1932
1933 if (!ehca_bmap) {
1934 ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL);
1935 if (!ehca_bmap)
1936 return -ENOMEM;
1937 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
1938 memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE);
1939 }
1940
1941 start_section = (pfn * PAGE_SIZE) / EHCA_SECTSIZE;
1942 end_section = ((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE;
1943 for (i = start_section; i < end_section; i++) {
1944 int ret;
1945 top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT);
1946 dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT);
1947 idx = i & EHCA_INDEX_MASK;
1948
1949 ret = ehca_init_bmap(ehca_bmap, top, dir);
1950 if (ret) {
1951 ehca_destroy_busmap();
1952 return ret;
1953 }
1954 ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len;
1955 ehca_mr_len += EHCA_SECTSIZE;
1956 }
1957 return 0;
1958}
1959
1960static int ehca_is_hugepage(unsigned long pfn)
1961{
1962 int page_order;
1963
1964 if (pfn & EHCA_HUGEPAGE_PFN_MASK)
1965 return 0;
1966
1967 page_order = compound_order(pfn_to_page(pfn));
1968 if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT)
1969 return 0;
1970
1971 return 1;
1972}
1973
1974static int ehca_create_busmap_callback(unsigned long initial_pfn,
1975 unsigned long total_nr_pages, void *arg)
1976{
1977 int ret;
1978 unsigned long pfn, start_pfn, end_pfn, nr_pages;
1979
1980 if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE)
1981 return ehca_update_busmap(initial_pfn, total_nr_pages);
1982
1983 /* Given chunk is >= 16GB -> check for hugepages */
1984 start_pfn = initial_pfn;
1985 end_pfn = initial_pfn + total_nr_pages;
1986 pfn = start_pfn;
1987
1988 while (pfn < end_pfn) {
1989 if (ehca_is_hugepage(pfn)) {
1990 /* Add mem found in front of the hugepage */
1991 nr_pages = pfn - start_pfn;
1992 ret = ehca_update_busmap(start_pfn, nr_pages);
1993 if (ret)
1994 return ret;
1995 /* Skip the hugepage */
1996 pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE);
1997 start_pfn = pfn;
1998 } else
1999 pfn += (EHCA_SECTSIZE / PAGE_SIZE);
2000 }
2001
2002 /* Add mem found behind the hugepage(s) */
2003 nr_pages = pfn - start_pfn;
2004 return ehca_update_busmap(start_pfn, nr_pages);
2005}
2006
2007int ehca_create_busmap(void)
2008{
2009 int ret;
2010
2011 ehca_mr_len = 0;
2012 ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
2013 ehca_create_busmap_callback);
2014 return ret;
2015}
2016
2017static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
2018 struct ehca_mr *e_mr,
2019 struct ehca_mr_pginfo *pginfo)
2020{
2021 int top;
2022 u64 hret, *kpage;
2023
2024 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
2025 if (!kpage) {
2026 ehca_err(&shca->ib_device, "kpage alloc failed");
2027 return -ENOMEM;
2028 }
2029 for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
2030 if (!ehca_bmap_valid(ehca_bmap->top[top]))
2031 continue;
2032 hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo);
2033 if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
2034 break;
2035 }
2036
2037 ehca_free_fw_ctrlblock(kpage);
2038
2039 if (hret == H_SUCCESS)
2040 return 0; /* Everything is fine */
2041 else {
2042 ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, "
2043 "h_ret=%lli e_mr=%p top=%x lkey=%x "
2044 "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top,
2045 e_mr->ib.ib_mr.lkey,
2046 shca->ipz_hca_handle.handle,
2047 e_mr->ipz_mr_handle.handle);
2048 return ehca2ib_return_code(hret);
2049 }
2050}
2051
2052static u64 ehca_map_vaddr(void *caddr)
2053{
2054 int top, dir, idx;
2055 unsigned long abs_addr, offset;
2056 u64 entry;
2057
2058 if (!ehca_bmap)
2059 return EHCA_INVAL_ADDR;
2060
2061 abs_addr = __pa(caddr);
2062 top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT);
2063 if (!ehca_bmap_valid(ehca_bmap->top[top]))
2064 return EHCA_INVAL_ADDR;
2065
2066 dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT);
2067 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
2068 return EHCA_INVAL_ADDR;
2069
2070 idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT);
2071
2072 entry = ehca_bmap->top[top]->dir[dir]->ent[idx];
2073 if (ehca_bmap_valid(entry)) {
2074 offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1);
2075 return entry | offset;
2076 } else
2077 return EHCA_INVAL_ADDR;
2078}
2079
2080static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2081{
2082 return dma_addr == EHCA_INVAL_ADDR;
2083}
2084
2085static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr,
2086 size_t size, enum dma_data_direction direction)
2087{
2088 if (cpu_addr)
2089 return ehca_map_vaddr(cpu_addr);
2090 else
2091 return EHCA_INVAL_ADDR;
2092}
2093
2094static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
2095 enum dma_data_direction direction)
2096{
2097 /* This is only a stub; nothing to be done here */
2098}
2099
2100static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page,
2101 unsigned long offset, size_t size,
2102 enum dma_data_direction direction)
2103{
2104 u64 addr;
2105
2106 if (offset + size > PAGE_SIZE)
2107 return EHCA_INVAL_ADDR;
2108
2109 addr = ehca_map_vaddr(page_address(page));
2110 if (!ehca_dma_mapping_error(dev, addr))
2111 addr += offset;
2112
2113 return addr;
2114}
2115
2116static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
2117 enum dma_data_direction direction)
2118{
2119 /* This is only a stub; nothing to be done here */
2120}
2121
2122static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
2123 int nents, enum dma_data_direction direction)
2124{
2125 struct scatterlist *sg;
2126 int i;
2127
2128 for_each_sg(sgl, sg, nents, i) {
2129 u64 addr;
2130 addr = ehca_map_vaddr(sg_virt(sg));
2131 if (ehca_dma_mapping_error(dev, addr))
2132 return 0;
2133
2134 sg->dma_address = addr;
2135 sg->dma_length = sg->length;
2136 }
2137 return nents;
2138}
2139
2140static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
2141 int nents, enum dma_data_direction direction)
2142{
2143 /* This is only a stub; nothing to be done here */
2144}
2145
2146static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr,
2147 size_t size,
2148 enum dma_data_direction dir)
2149{
2150 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2151}
2152
2153static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr,
2154 size_t size,
2155 enum dma_data_direction dir)
2156{
2157 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2158}
2159
2160static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size,
2161 u64 *dma_handle, gfp_t flag)
2162{
2163 struct page *p;
2164 void *addr = NULL;
2165 u64 dma_addr;
2166
2167 p = alloc_pages(flag, get_order(size));
2168 if (p) {
2169 addr = page_address(p);
2170 dma_addr = ehca_map_vaddr(addr);
2171 if (ehca_dma_mapping_error(dev, dma_addr)) {
2172 free_pages((unsigned long)addr, get_order(size));
2173 return NULL;
2174 }
2175 if (dma_handle)
2176 *dma_handle = dma_addr;
2177 return addr;
2178 }
2179 return NULL;
2180}
2181
2182static void ehca_dma_free_coherent(struct ib_device *dev, size_t size,
2183 void *cpu_addr, u64 dma_handle)
2184{
2185 if (cpu_addr && size)
2186 free_pages((unsigned long)cpu_addr, get_order(size));
2187}
2188
2189
2190struct ib_dma_mapping_ops ehca_dma_mapping_ops = {
2191 .mapping_error = ehca_dma_mapping_error,
2192 .map_single = ehca_dma_map_single,
2193 .unmap_single = ehca_dma_unmap_single,
2194 .map_page = ehca_dma_map_page,
2195 .unmap_page = ehca_dma_unmap_page,
2196 .map_sg = ehca_dma_map_sg,
2197 .unmap_sg = ehca_dma_unmap_sg,
2198 .sync_single_for_cpu = ehca_dma_sync_single_for_cpu,
2199 .sync_single_for_device = ehca_dma_sync_single_for_device,
2200 .alloc_coherent = ehca_dma_alloc_coherent,
2201 .free_coherent = ehca_dma_free_coherent,
2202};
diff --git a/drivers/staging/rdma/ehca/ehca_mrmw.h b/drivers/staging/rdma/ehca/ehca_mrmw.h
deleted file mode 100644
index 52bfa95697f7..000000000000
--- a/drivers/staging/rdma/ehca/ehca_mrmw.h
+++ /dev/null
@@ -1,127 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * MR/MW declarations and inline functions
5 *
6 * Authors: Dietmar Decker <ddecker@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#ifndef _EHCA_MRMW_H_
43#define _EHCA_MRMW_H_
44
45enum ehca_reg_type {
46 EHCA_REG_MR,
47 EHCA_REG_BUSMAP_MR
48};
49
50int ehca_reg_mr(struct ehca_shca *shca,
51 struct ehca_mr *e_mr,
52 u64 *iova_start,
53 u64 size,
54 int acl,
55 struct ehca_pd *e_pd,
56 struct ehca_mr_pginfo *pginfo,
57 u32 *lkey,
58 u32 *rkey,
59 enum ehca_reg_type reg_type);
60
61int ehca_reg_mr_rpages(struct ehca_shca *shca,
62 struct ehca_mr *e_mr,
63 struct ehca_mr_pginfo *pginfo);
64
65int ehca_rereg_mr(struct ehca_shca *shca,
66 struct ehca_mr *e_mr,
67 u64 *iova_start,
68 u64 size,
69 int mr_access_flags,
70 struct ehca_pd *e_pd,
71 struct ehca_mr_pginfo *pginfo,
72 u32 *lkey,
73 u32 *rkey);
74
75int ehca_unmap_one_fmr(struct ehca_shca *shca,
76 struct ehca_mr *e_fmr);
77
78int ehca_reg_smr(struct ehca_shca *shca,
79 struct ehca_mr *e_origmr,
80 struct ehca_mr *e_newmr,
81 u64 *iova_start,
82 int acl,
83 struct ehca_pd *e_pd,
84 u32 *lkey,
85 u32 *rkey);
86
87int ehca_reg_internal_maxmr(struct ehca_shca *shca,
88 struct ehca_pd *e_pd,
89 struct ehca_mr **maxmr);
90
91int ehca_reg_maxmr(struct ehca_shca *shca,
92 struct ehca_mr *e_newmr,
93 u64 *iova_start,
94 int acl,
95 struct ehca_pd *e_pd,
96 u32 *lkey,
97 u32 *rkey);
98
99int ehca_dereg_internal_maxmr(struct ehca_shca *shca);
100
101int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
102 u64 *page_list,
103 int list_len);
104
105int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
106 u32 number,
107 u64 *kpage);
108
109int ehca_mr_is_maxmr(u64 size,
110 u64 *iova_start);
111
112void ehca_mrmw_map_acl(int ib_acl,
113 u32 *hipz_acl);
114
115void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl);
116
117void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
118 int *ib_acl);
119
120void ehca_mr_deletenew(struct ehca_mr *mr);
121
122int ehca_create_busmap(void);
123
124void ehca_destroy_busmap(void);
125
126extern struct ib_dma_mapping_ops ehca_dma_mapping_ops;
127#endif /*_EHCA_MRMW_H_*/
diff --git a/drivers/staging/rdma/ehca/ehca_pd.c b/drivers/staging/rdma/ehca/ehca_pd.c
deleted file mode 100644
index 2a8aae411941..000000000000
--- a/drivers/staging/rdma/ehca/ehca_pd.c
+++ /dev/null
@@ -1,123 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * PD functions
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 *
8 * Copyright (c) 2005 IBM Corporation
9 *
10 * All rights reserved.
11 *
12 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
13 * BSD.
14 *
15 * OpenIB BSD License
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are met:
19 *
20 * Redistributions of source code must retain the above copyright notice, this
21 * list of conditions and the following disclaimer.
22 *
23 * Redistributions in binary form must reproduce the above copyright notice,
24 * this list of conditions and the following disclaimer in the documentation
25 * and/or other materials
26 * provided with the distribution.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
29 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
32 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
35 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
36 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41#include <linux/slab.h>
42
43#include "ehca_tools.h"
44#include "ehca_iverbs.h"
45
46static struct kmem_cache *pd_cache;
47
48struct ib_pd *ehca_alloc_pd(struct ib_device *device,
49 struct ib_ucontext *context, struct ib_udata *udata)
50{
51 struct ehca_pd *pd;
52 int i;
53
54 pd = kmem_cache_zalloc(pd_cache, GFP_KERNEL);
55 if (!pd) {
56 ehca_err(device, "device=%p context=%p out of memory",
57 device, context);
58 return ERR_PTR(-ENOMEM);
59 }
60
61 for (i = 0; i < 2; i++) {
62 INIT_LIST_HEAD(&pd->free[i]);
63 INIT_LIST_HEAD(&pd->full[i]);
64 }
65 mutex_init(&pd->lock);
66
67 /*
68 * Kernel PD: when device = -1, 0
69 * User PD: when context != -1
70 */
71 if (!context) {
72 /*
73 * Kernel PDs after init reuses always
74 * the one created in ehca_shca_reopen()
75 */
76 struct ehca_shca *shca = container_of(device, struct ehca_shca,
77 ib_device);
78 pd->fw_pd.value = shca->pd->fw_pd.value;
79 } else
80 pd->fw_pd.value = (u64)pd;
81
82 return &pd->ib_pd;
83}
84
85int ehca_dealloc_pd(struct ib_pd *pd)
86{
87 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
88 int i, leftovers = 0;
89 struct ipz_small_queue_page *page, *tmp;
90
91 for (i = 0; i < 2; i++) {
92 list_splice(&my_pd->full[i], &my_pd->free[i]);
93 list_for_each_entry_safe(page, tmp, &my_pd->free[i], list) {
94 leftovers = 1;
95 free_page(page->page);
96 kmem_cache_free(small_qp_cache, page);
97 }
98 }
99
100 if (leftovers)
101 ehca_warn(pd->device,
102 "Some small queue pages were not freed");
103
104 kmem_cache_free(pd_cache, my_pd);
105
106 return 0;
107}
108
109int ehca_init_pd_cache(void)
110{
111 pd_cache = kmem_cache_create("ehca_cache_pd",
112 sizeof(struct ehca_pd), 0,
113 SLAB_HWCACHE_ALIGN,
114 NULL);
115 if (!pd_cache)
116 return -ENOMEM;
117 return 0;
118}
119
120void ehca_cleanup_pd_cache(void)
121{
122 kmem_cache_destroy(pd_cache);
123}
diff --git a/drivers/staging/rdma/ehca/ehca_qes.h b/drivers/staging/rdma/ehca/ehca_qes.h
deleted file mode 100644
index 90c4efa67586..000000000000
--- a/drivers/staging/rdma/ehca/ehca_qes.h
+++ /dev/null
@@ -1,260 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Hardware request structures
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Reinhard Ernst <rernst@de.ibm.com>
8 * Christoph Raisch <raisch@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43
44#ifndef _EHCA_QES_H_
45#define _EHCA_QES_H_
46
47#include "ehca_tools.h"
48
49/* virtual scatter gather entry to specify remote addresses with length */
50struct ehca_vsgentry {
51 u64 vaddr;
52 u32 lkey;
53 u32 length;
54};
55
56#define GRH_FLAG_MASK EHCA_BMASK_IBM( 7, 7)
57#define GRH_IPVERSION_MASK EHCA_BMASK_IBM( 0, 3)
58#define GRH_TCLASS_MASK EHCA_BMASK_IBM( 4, 12)
59#define GRH_FLOWLABEL_MASK EHCA_BMASK_IBM(13, 31)
60#define GRH_PAYLEN_MASK EHCA_BMASK_IBM(32, 47)
61#define GRH_NEXTHEADER_MASK EHCA_BMASK_IBM(48, 55)
62#define GRH_HOPLIMIT_MASK EHCA_BMASK_IBM(56, 63)
63
64/*
65 * Unreliable Datagram Address Vector Format
66 * see IBTA Vol1 chapter 8.3 Global Routing Header
67 */
68struct ehca_ud_av {
69 u8 sl;
70 u8 lnh;
71 u16 dlid;
72 u8 reserved1;
73 u8 reserved2;
74 u8 reserved3;
75 u8 slid_path_bits;
76 u8 reserved4;
77 u8 ipd;
78 u8 reserved5;
79 u8 pmtu;
80 u32 reserved6;
81 u64 reserved7;
82 union {
83 struct {
84 u64 word_0; /* always set to 6 */
85 /*should be 0x1B for IB transport */
86 u64 word_1;
87 u64 word_2;
88 u64 word_3;
89 u64 word_4;
90 } grh;
91 struct {
92 u32 wd_0;
93 u32 wd_1;
94 /* DWord_1 --> SGID */
95
96 u32 sgid_wd3;
97 u32 sgid_wd2;
98
99 u32 sgid_wd1;
100 u32 sgid_wd0;
101 /* DWord_3 --> DGID */
102
103 u32 dgid_wd3;
104 u32 dgid_wd2;
105
106 u32 dgid_wd1;
107 u32 dgid_wd0;
108 } grh_l;
109 };
110};
111
112/* maximum number of sg entries allowed in a WQE */
113#define MAX_WQE_SG_ENTRIES 252
114
115#define WQE_OPTYPE_SEND 0x80
116#define WQE_OPTYPE_RDMAREAD 0x40
117#define WQE_OPTYPE_RDMAWRITE 0x20
118#define WQE_OPTYPE_CMPSWAP 0x10
119#define WQE_OPTYPE_FETCHADD 0x08
120#define WQE_OPTYPE_BIND 0x04
121
122#define WQE_WRFLAG_REQ_SIGNAL_COM 0x80
123#define WQE_WRFLAG_FENCE 0x40
124#define WQE_WRFLAG_IMM_DATA_PRESENT 0x20
125#define WQE_WRFLAG_SOLIC_EVENT 0x10
126
127#define WQEF_CACHE_HINT 0x80
128#define WQEF_CACHE_HINT_RD_WR 0x40
129#define WQEF_TIMED_WQE 0x20
130#define WQEF_PURGE 0x08
131#define WQEF_HIGH_NIBBLE 0xF0
132
133#define MW_BIND_ACCESSCTRL_R_WRITE 0x40
134#define MW_BIND_ACCESSCTRL_R_READ 0x20
135#define MW_BIND_ACCESSCTRL_R_ATOMIC 0x10
136
137struct ehca_wqe {
138 u64 work_request_id;
139 u8 optype;
140 u8 wr_flag;
141 u16 pkeyi;
142 u8 wqef;
143 u8 nr_of_data_seg;
144 u16 wqe_provided_slid;
145 u32 destination_qp_number;
146 u32 resync_psn_sqp;
147 u32 local_ee_context_qkey;
148 u32 immediate_data;
149 union {
150 struct {
151 u64 remote_virtual_address;
152 u32 rkey;
153 u32 reserved;
154 u64 atomic_1st_op_dma_len;
155 u64 atomic_2nd_op;
156 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
157
158 } nud;
159 struct {
160 u64 ehca_ud_av_ptr;
161 u64 reserved1;
162 u64 reserved2;
163 u64 reserved3;
164 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
165 } ud_avp;
166 struct {
167 struct ehca_ud_av ud_av;
168 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES -
169 2];
170 } ud_av;
171 struct {
172 u64 reserved0;
173 u64 reserved1;
174 u64 reserved2;
175 u64 reserved3;
176 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
177 } all_rcv;
178
179 struct {
180 u64 reserved;
181 u32 rkey;
182 u32 old_rkey;
183 u64 reserved1;
184 u64 reserved2;
185 u64 virtual_address;
186 u32 reserved3;
187 u32 length;
188 u32 reserved4;
189 u16 reserved5;
190 u8 reserved6;
191 u8 lr_ctl;
192 u32 lkey;
193 u32 reserved7;
194 u64 reserved8;
195 u64 reserved9;
196 u64 reserved10;
197 u64 reserved11;
198 } bind;
199 struct {
200 u64 reserved12;
201 u64 reserved13;
202 u32 size;
203 u32 start;
204 } inline_data;
205 } u;
206
207};
208
209#define WC_SEND_RECEIVE EHCA_BMASK_IBM(0, 0)
210#define WC_IMM_DATA EHCA_BMASK_IBM(1, 1)
211#define WC_GRH_PRESENT EHCA_BMASK_IBM(2, 2)
212#define WC_SE_BIT EHCA_BMASK_IBM(3, 3)
213#define WC_STATUS_ERROR_BIT 0x80000000
214#define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800
215#define WC_STATUS_PURGE_BIT 0x10
216#define WC_SEND_RECEIVE_BIT 0x80
217
218struct ehca_cqe {
219 u64 work_request_id;
220 u8 optype;
221 u8 w_completion_flags;
222 u16 reserved1;
223 u32 nr_bytes_transferred;
224 u32 immediate_data;
225 u32 local_qp_number;
226 u8 freed_resource_count;
227 u8 service_level;
228 u16 wqe_count;
229 u32 qp_token;
230 u32 qkey_ee_token;
231 u32 remote_qp_number;
232 u16 dlid;
233 u16 rlid;
234 u16 reserved2;
235 u16 pkey_index;
236 u32 cqe_timestamp;
237 u32 wqe_timestamp;
238 u8 wqe_timestamp_valid;
239 u8 reserved3;
240 u8 reserved4;
241 u8 cqe_flags;
242 u32 status;
243};
244
245struct ehca_eqe {
246 u64 entry;
247};
248
249struct ehca_mrte {
250 u64 starting_va;
251 u64 length; /* length of memory region in bytes*/
252 u32 pd;
253 u8 key_instance;
254 u8 pagesize;
255 u8 mr_control;
256 u8 local_remote_access_ctrl;
257 u8 reserved[0x20 - 0x18];
258 u64 at_pointer[4];
259};
260#endif /*_EHCA_QES_H_*/
diff --git a/drivers/staging/rdma/ehca/ehca_qp.c b/drivers/staging/rdma/ehca/ehca_qp.c
deleted file mode 100644
index 896c01f810f6..000000000000
--- a/drivers/staging/rdma/ehca/ehca_qp.c
+++ /dev/null
@@ -1,2256 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * QP functions
5 *
6 * Authors: Joachim Fenkes <fenkes@de.ibm.com>
7 * Stefan Roscher <stefan.roscher@de.ibm.com>
8 * Waleri Fomin <fomin@de.ibm.com>
9 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
10 * Reinhard Ernst <rernst@de.ibm.com>
11 * Heiko J Schick <schickhj@de.ibm.com>
12 *
13 * Copyright (c) 2005 IBM Corporation
14 *
15 * All rights reserved.
16 *
17 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
18 * BSD.
19 *
20 * OpenIB BSD License
21 *
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are met:
24 *
25 * Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following disclaimer.
27 *
28 * Redistributions in binary form must reproduce the above copyright notice,
29 * this list of conditions and the following disclaimer in the documentation
30 * and/or other materials
31 * provided with the distribution.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
44 */
45
46#include <linux/slab.h>
47
48#include "ehca_classes.h"
49#include "ehca_tools.h"
50#include "ehca_qes.h"
51#include "ehca_iverbs.h"
52#include "hcp_if.h"
53#include "hipz_fns.h"
54
55static struct kmem_cache *qp_cache;
56
57/*
58 * attributes not supported by query qp
59 */
60#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_ACCESS_FLAGS | \
61 IB_QP_EN_SQD_ASYNC_NOTIFY)
62
63/*
64 * ehca (internal) qp state values
65 */
66enum ehca_qp_state {
67 EHCA_QPS_RESET = 1,
68 EHCA_QPS_INIT = 2,
69 EHCA_QPS_RTR = 3,
70 EHCA_QPS_RTS = 5,
71 EHCA_QPS_SQD = 6,
72 EHCA_QPS_SQE = 8,
73 EHCA_QPS_ERR = 128
74};
75
76/*
77 * qp state transitions as defined by IB Arch Rel 1.1 page 431
78 */
79enum ib_qp_statetrans {
80 IB_QPST_ANY2RESET,
81 IB_QPST_ANY2ERR,
82 IB_QPST_RESET2INIT,
83 IB_QPST_INIT2RTR,
84 IB_QPST_INIT2INIT,
85 IB_QPST_RTR2RTS,
86 IB_QPST_RTS2SQD,
87 IB_QPST_RTS2RTS,
88 IB_QPST_SQD2RTS,
89 IB_QPST_SQE2RTS,
90 IB_QPST_SQD2SQD,
91 IB_QPST_MAX /* nr of transitions, this must be last!!! */
92};
93
94/*
95 * ib2ehca_qp_state maps IB to ehca qp_state
96 * returns ehca qp state corresponding to given ib qp state
97 */
98static inline enum ehca_qp_state ib2ehca_qp_state(enum ib_qp_state ib_qp_state)
99{
100 switch (ib_qp_state) {
101 case IB_QPS_RESET:
102 return EHCA_QPS_RESET;
103 case IB_QPS_INIT:
104 return EHCA_QPS_INIT;
105 case IB_QPS_RTR:
106 return EHCA_QPS_RTR;
107 case IB_QPS_RTS:
108 return EHCA_QPS_RTS;
109 case IB_QPS_SQD:
110 return EHCA_QPS_SQD;
111 case IB_QPS_SQE:
112 return EHCA_QPS_SQE;
113 case IB_QPS_ERR:
114 return EHCA_QPS_ERR;
115 default:
116 ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state);
117 return -EINVAL;
118 }
119}
120
121/*
122 * ehca2ib_qp_state maps ehca to IB qp_state
123 * returns ib qp state corresponding to given ehca qp state
124 */
125static inline enum ib_qp_state ehca2ib_qp_state(enum ehca_qp_state
126 ehca_qp_state)
127{
128 switch (ehca_qp_state) {
129 case EHCA_QPS_RESET:
130 return IB_QPS_RESET;
131 case EHCA_QPS_INIT:
132 return IB_QPS_INIT;
133 case EHCA_QPS_RTR:
134 return IB_QPS_RTR;
135 case EHCA_QPS_RTS:
136 return IB_QPS_RTS;
137 case EHCA_QPS_SQD:
138 return IB_QPS_SQD;
139 case EHCA_QPS_SQE:
140 return IB_QPS_SQE;
141 case EHCA_QPS_ERR:
142 return IB_QPS_ERR;
143 default:
144 ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state);
145 return -EINVAL;
146 }
147}
148
149/*
150 * ehca_qp_type used as index for req_attr and opt_attr of
151 * struct ehca_modqp_statetrans
152 */
153enum ehca_qp_type {
154 QPT_RC = 0,
155 QPT_UC = 1,
156 QPT_UD = 2,
157 QPT_SQP = 3,
158 QPT_MAX
159};
160
161/*
162 * ib2ehcaqptype maps Ib to ehca qp_type
163 * returns ehca qp type corresponding to ib qp type
164 */
165static inline enum ehca_qp_type ib2ehcaqptype(enum ib_qp_type ibqptype)
166{
167 switch (ibqptype) {
168 case IB_QPT_SMI:
169 case IB_QPT_GSI:
170 return QPT_SQP;
171 case IB_QPT_RC:
172 return QPT_RC;
173 case IB_QPT_UC:
174 return QPT_UC;
175 case IB_QPT_UD:
176 return QPT_UD;
177 default:
178 ehca_gen_err("Invalid ibqptype=%x", ibqptype);
179 return -EINVAL;
180 }
181}
182
183static inline enum ib_qp_statetrans get_modqp_statetrans(int ib_fromstate,
184 int ib_tostate)
185{
186 int index = -EINVAL;
187 switch (ib_tostate) {
188 case IB_QPS_RESET:
189 index = IB_QPST_ANY2RESET;
190 break;
191 case IB_QPS_INIT:
192 switch (ib_fromstate) {
193 case IB_QPS_RESET:
194 index = IB_QPST_RESET2INIT;
195 break;
196 case IB_QPS_INIT:
197 index = IB_QPST_INIT2INIT;
198 break;
199 }
200 break;
201 case IB_QPS_RTR:
202 if (ib_fromstate == IB_QPS_INIT)
203 index = IB_QPST_INIT2RTR;
204 break;
205 case IB_QPS_RTS:
206 switch (ib_fromstate) {
207 case IB_QPS_RTR:
208 index = IB_QPST_RTR2RTS;
209 break;
210 case IB_QPS_RTS:
211 index = IB_QPST_RTS2RTS;
212 break;
213 case IB_QPS_SQD:
214 index = IB_QPST_SQD2RTS;
215 break;
216 case IB_QPS_SQE:
217 index = IB_QPST_SQE2RTS;
218 break;
219 }
220 break;
221 case IB_QPS_SQD:
222 if (ib_fromstate == IB_QPS_RTS)
223 index = IB_QPST_RTS2SQD;
224 break;
225 case IB_QPS_SQE:
226 break;
227 case IB_QPS_ERR:
228 index = IB_QPST_ANY2ERR;
229 break;
230 default:
231 break;
232 }
233 return index;
234}
235
236/*
237 * ibqptype2servicetype returns hcp service type corresponding to given
238 * ib qp type used by create_qp()
239 */
240static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
241{
242 switch (ibqptype) {
243 case IB_QPT_SMI:
244 case IB_QPT_GSI:
245 return ST_UD;
246 case IB_QPT_RC:
247 return ST_RC;
248 case IB_QPT_UC:
249 return ST_UC;
250 case IB_QPT_UD:
251 return ST_UD;
252 case IB_QPT_RAW_IPV6:
253 return -EINVAL;
254 case IB_QPT_RAW_ETHERTYPE:
255 return -EINVAL;
256 default:
257 ehca_gen_err("Invalid ibqptype=%x", ibqptype);
258 return -EINVAL;
259 }
260}
261
262/*
263 * init userspace queue info from ipz_queue data
264 */
265static inline void queue2resp(struct ipzu_queue_resp *resp,
266 struct ipz_queue *queue)
267{
268 resp->qe_size = queue->qe_size;
269 resp->act_nr_of_sg = queue->act_nr_of_sg;
270 resp->queue_length = queue->queue_length;
271 resp->pagesize = queue->pagesize;
272 resp->toggle_state = queue->toggle_state;
273 resp->offset = queue->offset;
274}
275
276/*
277 * init_qp_queue initializes/constructs r/squeue and registers queue pages.
278 */
279static inline int init_qp_queue(struct ehca_shca *shca,
280 struct ehca_pd *pd,
281 struct ehca_qp *my_qp,
282 struct ipz_queue *queue,
283 int q_type,
284 u64 expected_hret,
285 struct ehca_alloc_queue_parms *parms,
286 int wqe_size)
287{
288 int ret, cnt, ipz_rc, nr_q_pages;
289 void *vpage;
290 u64 rpage, h_ret;
291 struct ib_device *ib_dev = &shca->ib_device;
292 struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;
293
294 if (!parms->queue_size)
295 return 0;
296
297 if (parms->is_small) {
298 nr_q_pages = 1;
299 ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
300 128 << parms->page_size,
301 wqe_size, parms->act_nr_sges, 1);
302 } else {
303 nr_q_pages = parms->queue_size;
304 ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
305 EHCA_PAGESIZE, wqe_size,
306 parms->act_nr_sges, 0);
307 }
308
309 if (!ipz_rc) {
310 ehca_err(ib_dev, "Cannot allocate page for queue. ipz_rc=%i",
311 ipz_rc);
312 return -EBUSY;
313 }
314
315 /* register queue pages */
316 for (cnt = 0; cnt < nr_q_pages; cnt++) {
317 vpage = ipz_qpageit_get_inc(queue);
318 if (!vpage) {
319 ehca_err(ib_dev, "ipz_qpageit_get_inc() "
320 "failed p_vpage= %p", vpage);
321 ret = -EINVAL;
322 goto init_qp_queue1;
323 }
324 rpage = __pa(vpage);
325
326 h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
327 my_qp->ipz_qp_handle,
328 NULL, 0, q_type,
329 rpage, parms->is_small ? 0 : 1,
330 my_qp->galpas.kernel);
331 if (cnt == (nr_q_pages - 1)) { /* last page! */
332 if (h_ret != expected_hret) {
333 ehca_err(ib_dev, "hipz_qp_register_rpage() "
334 "h_ret=%lli", h_ret);
335 ret = ehca2ib_return_code(h_ret);
336 goto init_qp_queue1;
337 }
338 vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
339 if (vpage) {
340 ehca_err(ib_dev, "ipz_qpageit_get_inc() "
341 "should not succeed vpage=%p", vpage);
342 ret = -EINVAL;
343 goto init_qp_queue1;
344 }
345 } else {
346 if (h_ret != H_PAGE_REGISTERED) {
347 ehca_err(ib_dev, "hipz_qp_register_rpage() "
348 "h_ret=%lli", h_ret);
349 ret = ehca2ib_return_code(h_ret);
350 goto init_qp_queue1;
351 }
352 }
353 }
354
355 ipz_qeit_reset(queue);
356
357 return 0;
358
359init_qp_queue1:
360 ipz_queue_dtor(pd, queue);
361 return ret;
362}
363
364static inline int ehca_calc_wqe_size(int act_nr_sge, int is_llqp)
365{
366 if (is_llqp)
367 return 128 << act_nr_sge;
368 else
369 return offsetof(struct ehca_wqe,
370 u.nud.sg_list[act_nr_sge]);
371}
372
373static void ehca_determine_small_queue(struct ehca_alloc_queue_parms *queue,
374 int req_nr_sge, int is_llqp)
375{
376 u32 wqe_size, q_size;
377 int act_nr_sge = req_nr_sge;
378
379 if (!is_llqp)
380 /* round up #SGEs so WQE size is a power of 2 */
381 for (act_nr_sge = 4; act_nr_sge <= 252;
382 act_nr_sge = 4 + 2 * act_nr_sge)
383 if (act_nr_sge >= req_nr_sge)
384 break;
385
386 wqe_size = ehca_calc_wqe_size(act_nr_sge, is_llqp);
387 q_size = wqe_size * (queue->max_wr + 1);
388
389 if (q_size <= 512)
390 queue->page_size = 2;
391 else if (q_size <= 1024)
392 queue->page_size = 3;
393 else
394 queue->page_size = 0;
395
396 queue->is_small = (queue->page_size != 0);
397}
398
399/* needs to be called with cq->spinlock held */
400void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq)
401{
402 struct list_head *list, *node;
403
404 /* TODO: support low latency QPs */
405 if (qp->ext_type == EQPT_LLQP)
406 return;
407
408 if (on_sq) {
409 list = &qp->send_cq->sqp_err_list;
410 node = &qp->sq_err_node;
411 } else {
412 list = &qp->recv_cq->rqp_err_list;
413 node = &qp->rq_err_node;
414 }
415
416 if (list_empty(node))
417 list_add_tail(node, list);
418
419 return;
420}
421
422static void del_from_err_list(struct ehca_cq *cq, struct list_head *node)
423{
424 unsigned long flags;
425
426 spin_lock_irqsave(&cq->spinlock, flags);
427
428 if (!list_empty(node))
429 list_del_init(node);
430
431 spin_unlock_irqrestore(&cq->spinlock, flags);
432}
433
434static void reset_queue_map(struct ehca_queue_map *qmap)
435{
436 int i;
437
438 qmap->tail = qmap->entries - 1;
439 qmap->left_to_poll = 0;
440 qmap->next_wqe_idx = 0;
441 for (i = 0; i < qmap->entries; i++) {
442 qmap->map[i].reported = 1;
443 qmap->map[i].cqe_req = 0;
444 }
445}
446
447/*
448 * Create an ib_qp struct that is either a QP or an SRQ, depending on
449 * the value of the is_srq parameter. If init_attr and srq_init_attr share
450 * fields, the field out of init_attr is used.
451 */
452static struct ehca_qp *internal_create_qp(
453 struct ib_pd *pd,
454 struct ib_qp_init_attr *init_attr,
455 struct ib_srq_init_attr *srq_init_attr,
456 struct ib_udata *udata, int is_srq)
457{
458 struct ehca_qp *my_qp, *my_srq = NULL;
459 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
460 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
461 ib_device);
462 struct ib_ucontext *context = NULL;
463 u64 h_ret;
464 int is_llqp = 0, has_srq = 0, is_user = 0;
465 int qp_type, max_send_sge, max_recv_sge, ret;
466
467 /* h_call's out parameters */
468 struct ehca_alloc_qp_parms parms;
469 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
470 unsigned long flags;
471
472 if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) {
473 ehca_err(pd->device, "Unable to create QP, max number of %i "
474 "QPs reached.", shca->max_num_qps);
475 ehca_err(pd->device, "To increase the maximum number of QPs "
476 "use the number_of_qps module parameter.\n");
477 return ERR_PTR(-ENOSPC);
478 }
479
480 if (init_attr->create_flags) {
481 atomic_dec(&shca->num_qps);
482 return ERR_PTR(-EINVAL);
483 }
484
485 memset(&parms, 0, sizeof(parms));
486 qp_type = init_attr->qp_type;
487
488 if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
489 init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
490 ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
491 init_attr->sq_sig_type);
492 atomic_dec(&shca->num_qps);
493 return ERR_PTR(-EINVAL);
494 }
495
496 /* save LLQP info */
497 if (qp_type & 0x80) {
498 is_llqp = 1;
499 parms.ext_type = EQPT_LLQP;
500 parms.ll_comp_flags = qp_type & LLQP_COMP_MASK;
501 }
502 qp_type &= 0x1F;
503 init_attr->qp_type &= 0x1F;
504
505 /* handle SRQ base QPs */
506 if (init_attr->srq) {
507 my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
508
509 if (qp_type == IB_QPT_UC) {
510 ehca_err(pd->device, "UC with SRQ not supported");
511 atomic_dec(&shca->num_qps);
512 return ERR_PTR(-EINVAL);
513 }
514
515 has_srq = 1;
516 parms.ext_type = EQPT_SRQBASE;
517 parms.srq_qpn = my_srq->real_qp_num;
518 }
519
520 if (is_llqp && has_srq) {
521 ehca_err(pd->device, "LLQPs can't have an SRQ");
522 atomic_dec(&shca->num_qps);
523 return ERR_PTR(-EINVAL);
524 }
525
526 /* handle SRQs */
527 if (is_srq) {
528 parms.ext_type = EQPT_SRQ;
529 parms.srq_limit = srq_init_attr->attr.srq_limit;
530 if (init_attr->cap.max_recv_sge > 3) {
531 ehca_err(pd->device, "no more than three SGEs "
532 "supported for SRQ pd=%p max_sge=%x",
533 pd, init_attr->cap.max_recv_sge);
534 atomic_dec(&shca->num_qps);
535 return ERR_PTR(-EINVAL);
536 }
537 }
538
539 /* check QP type */
540 if (qp_type != IB_QPT_UD &&
541 qp_type != IB_QPT_UC &&
542 qp_type != IB_QPT_RC &&
543 qp_type != IB_QPT_SMI &&
544 qp_type != IB_QPT_GSI) {
545 ehca_err(pd->device, "wrong QP Type=%x", qp_type);
546 atomic_dec(&shca->num_qps);
547 return ERR_PTR(-EINVAL);
548 }
549
550 if (is_llqp) {
551 switch (qp_type) {
552 case IB_QPT_RC:
553 if ((init_attr->cap.max_send_wr > 255) ||
554 (init_attr->cap.max_recv_wr > 255)) {
555 ehca_err(pd->device,
556 "Invalid Number of max_sq_wr=%x "
557 "or max_rq_wr=%x for RC LLQP",
558 init_attr->cap.max_send_wr,
559 init_attr->cap.max_recv_wr);
560 atomic_dec(&shca->num_qps);
561 return ERR_PTR(-EINVAL);
562 }
563 break;
564 case IB_QPT_UD:
565 if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP, shca->hca_cap)) {
566 ehca_err(pd->device, "UD LLQP not supported "
567 "by this adapter");
568 atomic_dec(&shca->num_qps);
569 return ERR_PTR(-ENOSYS);
570 }
571 if (!(init_attr->cap.max_send_sge <= 5
572 && init_attr->cap.max_send_sge >= 1
573 && init_attr->cap.max_recv_sge <= 5
574 && init_attr->cap.max_recv_sge >= 1)) {
575 ehca_err(pd->device,
576 "Invalid Number of max_send_sge=%x "
577 "or max_recv_sge=%x for UD LLQP",
578 init_attr->cap.max_send_sge,
579 init_attr->cap.max_recv_sge);
580 atomic_dec(&shca->num_qps);
581 return ERR_PTR(-EINVAL);
582 } else if (init_attr->cap.max_send_wr > 255) {
583 ehca_err(pd->device,
584 "Invalid Number of "
585 "max_send_wr=%x for UD QP_TYPE=%x",
586 init_attr->cap.max_send_wr, qp_type);
587 atomic_dec(&shca->num_qps);
588 return ERR_PTR(-EINVAL);
589 }
590 break;
591 default:
592 ehca_err(pd->device, "unsupported LL QP Type=%x",
593 qp_type);
594 atomic_dec(&shca->num_qps);
595 return ERR_PTR(-EINVAL);
596 }
597 } else {
598 int max_sge = (qp_type == IB_QPT_UD || qp_type == IB_QPT_SMI
599 || qp_type == IB_QPT_GSI) ? 250 : 252;
600
601 if (init_attr->cap.max_send_sge > max_sge
602 || init_attr->cap.max_recv_sge > max_sge) {
603 ehca_err(pd->device, "Invalid number of SGEs requested "
604 "send_sge=%x recv_sge=%x max_sge=%x",
605 init_attr->cap.max_send_sge,
606 init_attr->cap.max_recv_sge, max_sge);
607 atomic_dec(&shca->num_qps);
608 return ERR_PTR(-EINVAL);
609 }
610 }
611
612 my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
613 if (!my_qp) {
614 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
615 atomic_dec(&shca->num_qps);
616 return ERR_PTR(-ENOMEM);
617 }
618
619 if (pd->uobject && udata) {
620 is_user = 1;
621 context = pd->uobject->context;
622 }
623
624 atomic_set(&my_qp->nr_events, 0);
625 init_waitqueue_head(&my_qp->wait_completion);
626 spin_lock_init(&my_qp->spinlock_s);
627 spin_lock_init(&my_qp->spinlock_r);
628 my_qp->qp_type = qp_type;
629 my_qp->ext_type = parms.ext_type;
630 my_qp->state = IB_QPS_RESET;
631
632 if (init_attr->recv_cq)
633 my_qp->recv_cq =
634 container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
635 if (init_attr->send_cq)
636 my_qp->send_cq =
637 container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
638
639 idr_preload(GFP_KERNEL);
640 write_lock_irqsave(&ehca_qp_idr_lock, flags);
641
642 ret = idr_alloc(&ehca_qp_idr, my_qp, 0, 0x2000000, GFP_NOWAIT);
643 if (ret >= 0)
644 my_qp->token = ret;
645
646 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
647 idr_preload_end();
648 if (ret < 0) {
649 if (ret == -ENOSPC) {
650 ret = -EINVAL;
651 ehca_err(pd->device, "Invalid number of qp");
652 } else {
653 ret = -ENOMEM;
654 ehca_err(pd->device, "Can't allocate new idr entry.");
655 }
656 goto create_qp_exit0;
657 }
658
659 if (has_srq)
660 parms.srq_token = my_qp->token;
661
662 parms.servicetype = ibqptype2servicetype(qp_type);
663 if (parms.servicetype < 0) {
664 ret = -EINVAL;
665 ehca_err(pd->device, "Invalid qp_type=%x", qp_type);
666 goto create_qp_exit1;
667 }
668
669 /* Always signal by WQE so we can hide circ. WQEs */
670 parms.sigtype = HCALL_SIGT_BY_WQE;
671
672 /* UD_AV CIRCUMVENTION */
673 max_send_sge = init_attr->cap.max_send_sge;
674 max_recv_sge = init_attr->cap.max_recv_sge;
675 if (parms.servicetype == ST_UD && !is_llqp) {
676 max_send_sge += 2;
677 max_recv_sge += 2;
678 }
679
680 parms.token = my_qp->token;
681 parms.eq_handle = shca->eq.ipz_eq_handle;
682 parms.pd = my_pd->fw_pd;
683 if (my_qp->send_cq)
684 parms.send_cq_handle = my_qp->send_cq->ipz_cq_handle;
685 if (my_qp->recv_cq)
686 parms.recv_cq_handle = my_qp->recv_cq->ipz_cq_handle;
687
688 parms.squeue.max_wr = init_attr->cap.max_send_wr;
689 parms.rqueue.max_wr = init_attr->cap.max_recv_wr;
690 parms.squeue.max_sge = max_send_sge;
691 parms.rqueue.max_sge = max_recv_sge;
692
693 /* RC QPs need one more SWQE for unsolicited ack circumvention */
694 if (qp_type == IB_QPT_RC)
695 parms.squeue.max_wr++;
696
697 if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)) {
698 if (HAS_SQ(my_qp))
699 ehca_determine_small_queue(
700 &parms.squeue, max_send_sge, is_llqp);
701 if (HAS_RQ(my_qp))
702 ehca_determine_small_queue(
703 &parms.rqueue, max_recv_sge, is_llqp);
704 parms.qp_storage =
705 (parms.squeue.is_small || parms.rqueue.is_small);
706 }
707
708 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user);
709 if (h_ret != H_SUCCESS) {
710 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
711 h_ret);
712 ret = ehca2ib_return_code(h_ret);
713 goto create_qp_exit1;
714 }
715
716 ib_qp_num = my_qp->real_qp_num = parms.real_qp_num;
717 my_qp->ipz_qp_handle = parms.qp_handle;
718 my_qp->galpas = parms.galpas;
719
720 swqe_size = ehca_calc_wqe_size(parms.squeue.act_nr_sges, is_llqp);
721 rwqe_size = ehca_calc_wqe_size(parms.rqueue.act_nr_sges, is_llqp);
722
723 switch (qp_type) {
724 case IB_QPT_RC:
725 if (is_llqp) {
726 parms.squeue.act_nr_sges = 1;
727 parms.rqueue.act_nr_sges = 1;
728 }
729 /* hide the extra WQE */
730 parms.squeue.act_nr_wqes--;
731 break;
732 case IB_QPT_UD:
733 case IB_QPT_GSI:
734 case IB_QPT_SMI:
735 /* UD circumvention */
736 if (is_llqp) {
737 parms.squeue.act_nr_sges = 1;
738 parms.rqueue.act_nr_sges = 1;
739 } else {
740 parms.squeue.act_nr_sges -= 2;
741 parms.rqueue.act_nr_sges -= 2;
742 }
743
744 if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) {
745 parms.squeue.act_nr_wqes = init_attr->cap.max_send_wr;
746 parms.rqueue.act_nr_wqes = init_attr->cap.max_recv_wr;
747 parms.squeue.act_nr_sges = init_attr->cap.max_send_sge;
748 parms.rqueue.act_nr_sges = init_attr->cap.max_recv_sge;
749 ib_qp_num = (qp_type == IB_QPT_SMI) ? 0 : 1;
750 }
751
752 break;
753
754 default:
755 break;
756 }
757
758 /* initialize r/squeue and register queue pages */
759 if (HAS_SQ(my_qp)) {
760 ret = init_qp_queue(
761 shca, my_pd, my_qp, &my_qp->ipz_squeue, 0,
762 HAS_RQ(my_qp) ? H_PAGE_REGISTERED : H_SUCCESS,
763 &parms.squeue, swqe_size);
764 if (ret) {
765 ehca_err(pd->device, "Couldn't initialize squeue "
766 "and pages ret=%i", ret);
767 goto create_qp_exit2;
768 }
769
770 if (!is_user) {
771 my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
772 my_qp->ipz_squeue.qe_size;
773 my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
774 sizeof(struct ehca_qmap_entry));
775 if (!my_qp->sq_map.map) {
776 ehca_err(pd->device, "Couldn't allocate squeue "
777 "map ret=%i", ret);
778 goto create_qp_exit3;
779 }
780 INIT_LIST_HEAD(&my_qp->sq_err_node);
781 /* to avoid the generation of bogus flush CQEs */
782 reset_queue_map(&my_qp->sq_map);
783 }
784 }
785
786 if (HAS_RQ(my_qp)) {
787 ret = init_qp_queue(
788 shca, my_pd, my_qp, &my_qp->ipz_rqueue, 1,
789 H_SUCCESS, &parms.rqueue, rwqe_size);
790 if (ret) {
791 ehca_err(pd->device, "Couldn't initialize rqueue "
792 "and pages ret=%i", ret);
793 goto create_qp_exit4;
794 }
795 if (!is_user) {
796 my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
797 my_qp->ipz_rqueue.qe_size;
798 my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
799 sizeof(struct ehca_qmap_entry));
800 if (!my_qp->rq_map.map) {
801 ehca_err(pd->device, "Couldn't allocate squeue "
802 "map ret=%i", ret);
803 goto create_qp_exit5;
804 }
805 INIT_LIST_HEAD(&my_qp->rq_err_node);
806 /* to avoid the generation of bogus flush CQEs */
807 reset_queue_map(&my_qp->rq_map);
808 }
809 } else if (init_attr->srq && !is_user) {
810 /* this is a base QP, use the queue map of the SRQ */
811 my_qp->rq_map = my_srq->rq_map;
812 INIT_LIST_HEAD(&my_qp->rq_err_node);
813
814 my_qp->ipz_rqueue = my_srq->ipz_rqueue;
815 }
816
817 if (is_srq) {
818 my_qp->ib_srq.pd = &my_pd->ib_pd;
819 my_qp->ib_srq.device = my_pd->ib_pd.device;
820
821 my_qp->ib_srq.srq_context = init_attr->qp_context;
822 my_qp->ib_srq.event_handler = init_attr->event_handler;
823 } else {
824 my_qp->ib_qp.qp_num = ib_qp_num;
825 my_qp->ib_qp.pd = &my_pd->ib_pd;
826 my_qp->ib_qp.device = my_pd->ib_pd.device;
827
828 my_qp->ib_qp.recv_cq = init_attr->recv_cq;
829 my_qp->ib_qp.send_cq = init_attr->send_cq;
830
831 my_qp->ib_qp.qp_type = qp_type;
832 my_qp->ib_qp.srq = init_attr->srq;
833
834 my_qp->ib_qp.qp_context = init_attr->qp_context;
835 my_qp->ib_qp.event_handler = init_attr->event_handler;
836 }
837
838 init_attr->cap.max_inline_data = 0; /* not supported yet */
839 init_attr->cap.max_recv_sge = parms.rqueue.act_nr_sges;
840 init_attr->cap.max_recv_wr = parms.rqueue.act_nr_wqes;
841 init_attr->cap.max_send_sge = parms.squeue.act_nr_sges;
842 init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes;
843 my_qp->init_attr = *init_attr;
844
845 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
846 shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
847 &my_qp->ib_qp;
848 if (ehca_nr_ports < 0) {
849 /* alloc array to cache subsequent modify qp parms
850 * for autodetect mode
851 */
852 my_qp->mod_qp_parm =
853 kzalloc(EHCA_MOD_QP_PARM_MAX *
854 sizeof(*my_qp->mod_qp_parm),
855 GFP_KERNEL);
856 if (!my_qp->mod_qp_parm) {
857 ehca_err(pd->device,
858 "Could not alloc mod_qp_parm");
859 goto create_qp_exit5;
860 }
861 }
862 }
863
864 /* NOTE: define_apq0() not supported yet */
865 if (qp_type == IB_QPT_GSI) {
866 h_ret = ehca_define_sqp(shca, my_qp, init_attr);
867 if (h_ret != H_SUCCESS) {
868 kfree(my_qp->mod_qp_parm);
869 my_qp->mod_qp_parm = NULL;
870 /* the QP pointer is no longer valid */
871 shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
872 NULL;
873 ret = ehca2ib_return_code(h_ret);
874 goto create_qp_exit6;
875 }
876 }
877
878 if (my_qp->send_cq) {
879 ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp);
880 if (ret) {
881 ehca_err(pd->device,
882 "Couldn't assign qp to send_cq ret=%i", ret);
883 goto create_qp_exit7;
884 }
885 }
886
887 /* copy queues, galpa data to user space */
888 if (context && udata) {
889 struct ehca_create_qp_resp resp;
890 memset(&resp, 0, sizeof(resp));
891
892 resp.qp_num = my_qp->real_qp_num;
893 resp.token = my_qp->token;
894 resp.qp_type = my_qp->qp_type;
895 resp.ext_type = my_qp->ext_type;
896 resp.qkey = my_qp->qkey;
897 resp.real_qp_num = my_qp->real_qp_num;
898
899 if (HAS_SQ(my_qp))
900 queue2resp(&resp.ipz_squeue, &my_qp->ipz_squeue);
901 if (HAS_RQ(my_qp))
902 queue2resp(&resp.ipz_rqueue, &my_qp->ipz_rqueue);
903 resp.fw_handle_ofs = (u32)
904 (my_qp->galpas.user.fw_handle & (PAGE_SIZE - 1));
905
906 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
907 ehca_err(pd->device, "Copy to udata failed");
908 ret = -EINVAL;
909 goto create_qp_exit8;
910 }
911 }
912
913 return my_qp;
914
915create_qp_exit8:
916 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
917
918create_qp_exit7:
919 kfree(my_qp->mod_qp_parm);
920
921create_qp_exit6:
922 if (HAS_RQ(my_qp) && !is_user)
923 vfree(my_qp->rq_map.map);
924
925create_qp_exit5:
926 if (HAS_RQ(my_qp))
927 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
928
929create_qp_exit4:
930 if (HAS_SQ(my_qp) && !is_user)
931 vfree(my_qp->sq_map.map);
932
933create_qp_exit3:
934 if (HAS_SQ(my_qp))
935 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
936
937create_qp_exit2:
938 hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
939
940create_qp_exit1:
941 write_lock_irqsave(&ehca_qp_idr_lock, flags);
942 idr_remove(&ehca_qp_idr, my_qp->token);
943 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
944
945create_qp_exit0:
946 kmem_cache_free(qp_cache, my_qp);
947 atomic_dec(&shca->num_qps);
948 return ERR_PTR(ret);
949}
950
951struct ib_qp *ehca_create_qp(struct ib_pd *pd,
952 struct ib_qp_init_attr *qp_init_attr,
953 struct ib_udata *udata)
954{
955 struct ehca_qp *ret;
956
957 ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0);
958 return IS_ERR(ret) ? (struct ib_qp *)ret : &ret->ib_qp;
959}
960
961static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
962 struct ib_uobject *uobject);
963
964struct ib_srq *ehca_create_srq(struct ib_pd *pd,
965 struct ib_srq_init_attr *srq_init_attr,
966 struct ib_udata *udata)
967{
968 struct ib_qp_init_attr qp_init_attr;
969 struct ehca_qp *my_qp;
970 struct ib_srq *ret;
971 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
972 ib_device);
973 struct hcp_modify_qp_control_block *mqpcb;
974 u64 hret, update_mask;
975
976 if (srq_init_attr->srq_type != IB_SRQT_BASIC)
977 return ERR_PTR(-ENOSYS);
978
979 /* For common attributes, internal_create_qp() takes its info
980 * out of qp_init_attr, so copy all common attrs there.
981 */
982 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
983 qp_init_attr.event_handler = srq_init_attr->event_handler;
984 qp_init_attr.qp_context = srq_init_attr->srq_context;
985 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
986 qp_init_attr.qp_type = IB_QPT_RC;
987 qp_init_attr.cap.max_recv_wr = srq_init_attr->attr.max_wr;
988 qp_init_attr.cap.max_recv_sge = srq_init_attr->attr.max_sge;
989
990 my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1);
991 if (IS_ERR(my_qp))
992 return (struct ib_srq *)my_qp;
993
994 /* copy back return values */
995 srq_init_attr->attr.max_wr = qp_init_attr.cap.max_recv_wr;
996 srq_init_attr->attr.max_sge = 3;
997
998 /* drive SRQ into RTR state */
999 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1000 if (!mqpcb) {
1001 ehca_err(pd->device, "Could not get zeroed page for mqpcb "
1002 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
1003 ret = ERR_PTR(-ENOMEM);
1004 goto create_srq1;
1005 }
1006
1007 mqpcb->qp_state = EHCA_QPS_INIT;
1008 mqpcb->prim_phys_port = 1;
1009 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
1010 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
1011 my_qp->ipz_qp_handle,
1012 &my_qp->pf,
1013 update_mask,
1014 mqpcb, my_qp->galpas.kernel);
1015 if (hret != H_SUCCESS) {
1016 ehca_err(pd->device, "Could not modify SRQ to INIT "
1017 "ehca_qp=%p qp_num=%x h_ret=%lli",
1018 my_qp, my_qp->real_qp_num, hret);
1019 goto create_srq2;
1020 }
1021
1022 mqpcb->qp_enable = 1;
1023 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
1024 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
1025 my_qp->ipz_qp_handle,
1026 &my_qp->pf,
1027 update_mask,
1028 mqpcb, my_qp->galpas.kernel);
1029 if (hret != H_SUCCESS) {
1030 ehca_err(pd->device, "Could not enable SRQ "
1031 "ehca_qp=%p qp_num=%x h_ret=%lli",
1032 my_qp, my_qp->real_qp_num, hret);
1033 goto create_srq2;
1034 }
1035
1036 mqpcb->qp_state = EHCA_QPS_RTR;
1037 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
1038 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
1039 my_qp->ipz_qp_handle,
1040 &my_qp->pf,
1041 update_mask,
1042 mqpcb, my_qp->galpas.kernel);
1043 if (hret != H_SUCCESS) {
1044 ehca_err(pd->device, "Could not modify SRQ to RTR "
1045 "ehca_qp=%p qp_num=%x h_ret=%lli",
1046 my_qp, my_qp->real_qp_num, hret);
1047 goto create_srq2;
1048 }
1049
1050 ehca_free_fw_ctrlblock(mqpcb);
1051
1052 return &my_qp->ib_srq;
1053
1054create_srq2:
1055 ret = ERR_PTR(ehca2ib_return_code(hret));
1056 ehca_free_fw_ctrlblock(mqpcb);
1057
1058create_srq1:
1059 internal_destroy_qp(pd->device, my_qp, my_qp->ib_srq.uobject);
1060
1061 return ret;
1062}
1063
1064/*
1065 * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
1066 * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
1067 * returns total number of bad wqes in bad_wqe_cnt
1068 */
1069static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
1070 int *bad_wqe_cnt)
1071{
1072 u64 h_ret;
1073 struct ipz_queue *squeue;
1074 void *bad_send_wqe_p, *bad_send_wqe_v;
1075 u64 q_ofs;
1076 struct ehca_wqe *wqe;
1077 int qp_num = my_qp->ib_qp.qp_num;
1078
1079 /* get send wqe pointer */
1080 h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
1081 my_qp->ipz_qp_handle, &my_qp->pf,
1082 &bad_send_wqe_p, NULL, 2);
1083 if (h_ret != H_SUCCESS) {
1084 ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
1085 " ehca_qp=%p qp_num=%x h_ret=%lli",
1086 my_qp, qp_num, h_ret);
1087 return ehca2ib_return_code(h_ret);
1088 }
1089 bad_send_wqe_p = (void *)((u64)bad_send_wqe_p & (~(1L << 63)));
1090 ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
1091 qp_num, bad_send_wqe_p);
1092 /* convert wqe pointer to vadr */
1093 bad_send_wqe_v = __va((u64)bad_send_wqe_p);
1094 if (ehca_debug_level >= 2)
1095 ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
1096 squeue = &my_qp->ipz_squeue;
1097 if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
1098 ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x"
1099 " bad_send_wqe_p=%p", qp_num, bad_send_wqe_p);
1100 return -EFAULT;
1101 }
1102
1103 /* loop sets wqe's purge bit */
1104 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
1105 *bad_wqe_cnt = 0;
1106 while (wqe->optype != 0xff && wqe->wqef != 0xff) {
1107 if (ehca_debug_level >= 2)
1108 ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
1109 wqe->nr_of_data_seg = 0; /* suppress data access */
1110 wqe->wqef = WQEF_PURGE; /* WQE to be purged */
1111 q_ofs = ipz_queue_advance_offset(squeue, q_ofs);
1112 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
1113 *bad_wqe_cnt = (*bad_wqe_cnt)+1;
1114 }
1115 /*
1116 * bad wqe will be reprocessed and ignored when pol_cq() is called,
1117 * i.e. nr of wqes with flush error status is one less
1118 */
1119 ehca_dbg(&shca->ib_device, "qp_num=%x flusherr_wqe_cnt=%x",
1120 qp_num, (*bad_wqe_cnt)-1);
1121 wqe->wqef = 0;
1122
1123 return 0;
1124}
1125
1126static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
1127 struct ehca_queue_map *qmap)
1128{
1129 void *wqe_v;
1130 u64 q_ofs;
1131 u32 wqe_idx;
1132 unsigned int tail_idx;
1133
1134 /* convert real to abs address */
1135 wqe_p = wqe_p & (~(1UL << 63));
1136
1137 wqe_v = __va(wqe_p);
1138
1139 if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
1140 ehca_gen_err("Invalid offset for calculating left cqes "
1141 "wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v);
1142 return -EFAULT;
1143 }
1144
1145 tail_idx = next_index(qmap->tail, qmap->entries);
1146 wqe_idx = q_ofs / ipz_queue->qe_size;
1147
1148 /* check all processed wqes, whether a cqe is requested or not */
1149 while (tail_idx != wqe_idx) {
1150 if (qmap->map[tail_idx].cqe_req)
1151 qmap->left_to_poll++;
1152 tail_idx = next_index(tail_idx, qmap->entries);
1153 }
1154 /* save index in queue, where we have to start flushing */
1155 qmap->next_wqe_idx = wqe_idx;
1156 return 0;
1157}
1158
1159static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
1160{
1161 u64 h_ret;
1162 void *send_wqe_p, *recv_wqe_p;
1163 int ret;
1164 unsigned long flags;
1165 int qp_num = my_qp->ib_qp.qp_num;
1166
1167 /* this hcall is not supported on base QPs */
1168 if (my_qp->ext_type != EQPT_SRQBASE) {
1169 /* get send and receive wqe pointer */
1170 h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
1171 my_qp->ipz_qp_handle, &my_qp->pf,
1172 &send_wqe_p, &recv_wqe_p, 4);
1173 if (h_ret != H_SUCCESS) {
1174 ehca_err(&shca->ib_device, "disable_and_get_wqe() "
1175 "failed ehca_qp=%p qp_num=%x h_ret=%lli",
1176 my_qp, qp_num, h_ret);
1177 return ehca2ib_return_code(h_ret);
1178 }
1179
1180 /*
1181 * acquire lock to ensure that nobody is polling the cq which
1182 * could mean that the qmap->tail pointer is in an
1183 * inconsistent state.
1184 */
1185 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1186 ret = calc_left_cqes((u64)send_wqe_p, &my_qp->ipz_squeue,
1187 &my_qp->sq_map);
1188 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1189 if (ret)
1190 return ret;
1191
1192
1193 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1194 ret = calc_left_cqes((u64)recv_wqe_p, &my_qp->ipz_rqueue,
1195 &my_qp->rq_map);
1196 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
1197 if (ret)
1198 return ret;
1199 } else {
1200 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1201 my_qp->sq_map.left_to_poll = 0;
1202 my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
1203 my_qp->sq_map.entries);
1204 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1205
1206 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1207 my_qp->rq_map.left_to_poll = 0;
1208 my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
1209 my_qp->rq_map.entries);
1210 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
1211 }
1212
1213 /* this assures flush cqes being generated only for pending wqes */
1214 if ((my_qp->sq_map.left_to_poll == 0) &&
1215 (my_qp->rq_map.left_to_poll == 0)) {
1216 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1217 ehca_add_to_err_list(my_qp, 1);
1218 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1219
1220 if (HAS_RQ(my_qp)) {
1221 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1222 ehca_add_to_err_list(my_qp, 0);
1223 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock,
1224 flags);
1225 }
1226 }
1227
1228 return 0;
1229}
1230
1231/*
1232 * internal_modify_qp with circumvention to handle aqp0 properly
1233 * smi_reset2init indicates if this is an internal reset-to-init-call for
1234 * smi. This flag must always be zero if called from ehca_modify_qp()!
1235 * This internal func was intorduced to avoid recursion of ehca_modify_qp()!
1236 */
1237static int internal_modify_qp(struct ib_qp *ibqp,
1238 struct ib_qp_attr *attr,
1239 int attr_mask, int smi_reset2init)
1240{
1241 enum ib_qp_state qp_cur_state, qp_new_state;
1242 int cnt, qp_attr_idx, ret = 0;
1243 enum ib_qp_statetrans statetrans;
1244 struct hcp_modify_qp_control_block *mqpcb;
1245 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
1246 struct ehca_shca *shca =
1247 container_of(ibqp->pd->device, struct ehca_shca, ib_device);
1248 u64 update_mask;
1249 u64 h_ret;
1250 int bad_wqe_cnt = 0;
1251 int is_user = 0;
1252 int squeue_locked = 0;
1253 unsigned long flags = 0;
1254
1255 /* do query_qp to obtain current attr values */
1256 mqpcb = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
1257 if (!mqpcb) {
1258 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
1259 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
1260 return -ENOMEM;
1261 }
1262
1263 h_ret = hipz_h_query_qp(shca->ipz_hca_handle,
1264 my_qp->ipz_qp_handle,
1265 &my_qp->pf,
1266 mqpcb, my_qp->galpas.kernel);
1267 if (h_ret != H_SUCCESS) {
1268 ehca_err(ibqp->device, "hipz_h_query_qp() failed "
1269 "ehca_qp=%p qp_num=%x h_ret=%lli",
1270 my_qp, ibqp->qp_num, h_ret);
1271 ret = ehca2ib_return_code(h_ret);
1272 goto modify_qp_exit1;
1273 }
1274 if (ibqp->uobject)
1275 is_user = 1;
1276
1277 qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
1278
1279 if (qp_cur_state == -EINVAL) { /* invalid qp state */
1280 ret = -EINVAL;
1281 ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x "
1282 "ehca_qp=%p qp_num=%x",
1283 mqpcb->qp_state, my_qp, ibqp->qp_num);
1284 goto modify_qp_exit1;
1285 }
1286 /*
1287 * circumvention to set aqp0 initial state to init
1288 * as expected by IB spec
1289 */
1290 if (smi_reset2init == 0 &&
1291 ibqp->qp_type == IB_QPT_SMI &&
1292 qp_cur_state == IB_QPS_RESET &&
1293 (attr_mask & IB_QP_STATE) &&
1294 attr->qp_state == IB_QPS_INIT) { /* RESET -> INIT */
1295 struct ib_qp_attr smiqp_attr = {
1296 .qp_state = IB_QPS_INIT,
1297 .port_num = my_qp->init_attr.port_num,
1298 .pkey_index = 0,
1299 .qkey = 0
1300 };
1301 int smiqp_attr_mask = IB_QP_STATE | IB_QP_PORT |
1302 IB_QP_PKEY_INDEX | IB_QP_QKEY;
1303 int smirc = internal_modify_qp(
1304 ibqp, &smiqp_attr, smiqp_attr_mask, 1);
1305 if (smirc) {
1306 ehca_err(ibqp->device, "SMI RESET -> INIT failed. "
1307 "ehca_modify_qp() rc=%i", smirc);
1308 ret = H_PARAMETER;
1309 goto modify_qp_exit1;
1310 }
1311 qp_cur_state = IB_QPS_INIT;
1312 ehca_dbg(ibqp->device, "SMI RESET -> INIT succeeded");
1313 }
1314 /* is transmitted current state equal to "real" current state */
1315 if ((attr_mask & IB_QP_CUR_STATE) &&
1316 qp_cur_state != attr->cur_qp_state) {
1317 ret = -EINVAL;
1318 ehca_err(ibqp->device,
1319 "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
1320 " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
1321 attr->cur_qp_state, qp_cur_state, my_qp, ibqp->qp_num);
1322 goto modify_qp_exit1;
1323 }
1324
1325 ehca_dbg(ibqp->device, "ehca_qp=%p qp_num=%x current qp_state=%x "
1326 "new qp_state=%x attribute_mask=%x",
1327 my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
1328
1329 qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
1330 if (!smi_reset2init &&
1331 !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
1332 attr_mask, IB_LINK_LAYER_UNSPECIFIED)) {
1333 ret = -EINVAL;
1334 ehca_err(ibqp->device,
1335 "Invalid qp transition new_state=%x cur_state=%x "
1336 "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state,
1337 qp_cur_state, my_qp, ibqp->qp_num, attr_mask);
1338 goto modify_qp_exit1;
1339 }
1340
1341 mqpcb->qp_state = ib2ehca_qp_state(qp_new_state);
1342 if (mqpcb->qp_state)
1343 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
1344 else {
1345 ret = -EINVAL;
1346 ehca_err(ibqp->device, "Invalid new qp state=%x "
1347 "ehca_qp=%p qp_num=%x",
1348 qp_new_state, my_qp, ibqp->qp_num);
1349 goto modify_qp_exit1;
1350 }
1351
1352 /* retrieve state transition struct to get req and opt attrs */
1353 statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state);
1354 if (statetrans < 0) {
1355 ret = -EINVAL;
1356 ehca_err(ibqp->device, "<INVALID STATE CHANGE> qp_cur_state=%x "
1357 "new_qp_state=%x State_xsition=%x ehca_qp=%p "
1358 "qp_num=%x", qp_cur_state, qp_new_state,
1359 statetrans, my_qp, ibqp->qp_num);
1360 goto modify_qp_exit1;
1361 }
1362
1363 qp_attr_idx = ib2ehcaqptype(ibqp->qp_type);
1364
1365 if (qp_attr_idx < 0) {
1366 ret = qp_attr_idx;
1367 ehca_err(ibqp->device,
1368 "Invalid QP type=%x ehca_qp=%p qp_num=%x",
1369 ibqp->qp_type, my_qp, ibqp->qp_num);
1370 goto modify_qp_exit1;
1371 }
1372
1373 ehca_dbg(ibqp->device,
1374 "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
1375 my_qp, ibqp->qp_num, statetrans);
1376
1377 /* eHCA2 rev2 and higher require the SEND_GRH_FLAG to be set
1378 * in non-LL UD QPs.
1379 */
1380 if ((my_qp->qp_type == IB_QPT_UD) &&
1381 (my_qp->ext_type != EQPT_LLQP) &&
1382 (statetrans == IB_QPST_INIT2RTR) &&
1383 (shca->hw_level >= 0x22)) {
1384 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
1385 mqpcb->send_grh_flag = 1;
1386 }
1387
1388 /* sqe -> rts: set purge bit of bad wqe before actual trans */
1389 if ((my_qp->qp_type == IB_QPT_UD ||
1390 my_qp->qp_type == IB_QPT_GSI ||
1391 my_qp->qp_type == IB_QPT_SMI) &&
1392 statetrans == IB_QPST_SQE2RTS) {
1393 /* mark next free wqe if kernel */
1394 if (!ibqp->uobject) {
1395 struct ehca_wqe *wqe;
1396 /* lock send queue */
1397 spin_lock_irqsave(&my_qp->spinlock_s, flags);
1398 squeue_locked = 1;
1399 /* mark next free wqe */
1400 wqe = (struct ehca_wqe *)
1401 ipz_qeit_get(&my_qp->ipz_squeue);
1402 wqe->optype = wqe->wqef = 0xff;
1403 ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p",
1404 ibqp->qp_num, wqe);
1405 }
1406 ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt);
1407 if (ret) {
1408 ehca_err(ibqp->device, "prepare_sqe_rts() failed "
1409 "ehca_qp=%p qp_num=%x ret=%i",
1410 my_qp, ibqp->qp_num, ret);
1411 goto modify_qp_exit2;
1412 }
1413 }
1414
1415 /*
1416 * enable RDMA_Atomic_Control if reset->init und reliable con
1417 * this is necessary since gen2 does not provide that flag,
1418 * but pHyp requires it
1419 */
1420 if (statetrans == IB_QPST_RESET2INIT &&
1421 (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_UC)) {
1422 mqpcb->rdma_atomic_ctrl = 3;
1423 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL, 1);
1424 }
1425 /* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */
1426 if (statetrans == IB_QPST_INIT2RTR &&
1427 (ibqp->qp_type == IB_QPT_UC) &&
1428 !(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)) {
1429 mqpcb->rdma_nr_atomic_resp_res = 1; /* default to 1 */
1430 update_mask |=
1431 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
1432 }
1433
1434 if (attr_mask & IB_QP_PKEY_INDEX) {
1435 if (attr->pkey_index >= 16) {
1436 ret = -EINVAL;
1437 ehca_err(ibqp->device, "Invalid pkey_index=%x. "
1438 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1439 attr->pkey_index, my_qp, ibqp->qp_num);
1440 goto modify_qp_exit2;
1441 }
1442 mqpcb->prim_p_key_idx = attr->pkey_index;
1443 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
1444 }
1445 if (attr_mask & IB_QP_PORT) {
1446 struct ehca_sport *sport;
1447 struct ehca_qp *aqp1;
1448 if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
1449 ret = -EINVAL;
1450 ehca_err(ibqp->device, "Invalid port=%x. "
1451 "ehca_qp=%p qp_num=%x num_ports=%x",
1452 attr->port_num, my_qp, ibqp->qp_num,
1453 shca->num_ports);
1454 goto modify_qp_exit2;
1455 }
1456 sport = &shca->sport[attr->port_num - 1];
1457 if (!sport->ibqp_sqp[IB_QPT_GSI]) {
1458 /* should not occur */
1459 ret = -EFAULT;
1460 ehca_err(ibqp->device, "AQP1 was not created for "
1461 "port=%x", attr->port_num);
1462 goto modify_qp_exit2;
1463 }
1464 aqp1 = container_of(sport->ibqp_sqp[IB_QPT_GSI],
1465 struct ehca_qp, ib_qp);
1466 if (ibqp->qp_type != IB_QPT_GSI &&
1467 ibqp->qp_type != IB_QPT_SMI &&
1468 aqp1->mod_qp_parm) {
1469 /*
1470 * firmware will reject this modify_qp() because
1471 * port is not activated/initialized fully
1472 */
1473 ret = -EFAULT;
1474 ehca_warn(ibqp->device, "Couldn't modify qp port=%x: "
1475 "either port is being activated (try again) "
1476 "or cabling issue", attr->port_num);
1477 goto modify_qp_exit2;
1478 }
1479 mqpcb->prim_phys_port = attr->port_num;
1480 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
1481 }
1482 if (attr_mask & IB_QP_QKEY) {
1483 mqpcb->qkey = attr->qkey;
1484 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1);
1485 }
1486 if (attr_mask & IB_QP_AV) {
1487 mqpcb->dlid = attr->ah_attr.dlid;
1488 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1);
1489 mqpcb->source_path_bits = attr->ah_attr.src_path_bits;
1490 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS, 1);
1491 mqpcb->service_level = attr->ah_attr.sl;
1492 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1);
1493
1494 if (ehca_calc_ipd(shca, mqpcb->prim_phys_port,
1495 attr->ah_attr.static_rate,
1496 &mqpcb->max_static_rate)) {
1497 ret = -EINVAL;
1498 goto modify_qp_exit2;
1499 }
1500 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
1501
1502 /*
1503 * Always supply the GRH flag, even if it's zero, to give the
1504 * hypervisor a clear "yes" or "no" instead of a "perhaps"
1505 */
1506 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
1507
1508 /*
1509 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1510 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1511 */
1512 if (attr->ah_attr.ah_flags == IB_AH_GRH) {
1513 mqpcb->send_grh_flag = 1;
1514
1515 mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index;
1516 update_mask |=
1517 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1);
1518
1519 for (cnt = 0; cnt < 16; cnt++)
1520 mqpcb->dest_gid.byte[cnt] =
1521 attr->ah_attr.grh.dgid.raw[cnt];
1522
1523 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID, 1);
1524 mqpcb->flow_label = attr->ah_attr.grh.flow_label;
1525 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL, 1);
1526 mqpcb->hop_limit = attr->ah_attr.grh.hop_limit;
1527 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT, 1);
1528 mqpcb->traffic_class = attr->ah_attr.grh.traffic_class;
1529 update_mask |=
1530 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS, 1);
1531 }
1532 }
1533
1534 if (attr_mask & IB_QP_PATH_MTU) {
1535 /* store ld(MTU) */
1536 my_qp->mtu_shift = attr->path_mtu + 7;
1537 mqpcb->path_mtu = attr->path_mtu;
1538 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
1539 }
1540 if (attr_mask & IB_QP_TIMEOUT) {
1541 mqpcb->timeout = attr->timeout;
1542 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT, 1);
1543 }
1544 if (attr_mask & IB_QP_RETRY_CNT) {
1545 mqpcb->retry_count = attr->retry_cnt;
1546 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT, 1);
1547 }
1548 if (attr_mask & IB_QP_RNR_RETRY) {
1549 mqpcb->rnr_retry_count = attr->rnr_retry;
1550 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT, 1);
1551 }
1552 if (attr_mask & IB_QP_RQ_PSN) {
1553 mqpcb->receive_psn = attr->rq_psn;
1554 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN, 1);
1555 }
1556 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1557 mqpcb->rdma_nr_atomic_resp_res = attr->max_dest_rd_atomic < 3 ?
1558 attr->max_dest_rd_atomic : 2;
1559 update_mask |=
1560 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
1561 }
1562 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1563 mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic < 3 ?
1564 attr->max_rd_atomic : 2;
1565 update_mask |=
1566 EHCA_BMASK_SET
1567 (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1);
1568 }
1569 if (attr_mask & IB_QP_ALT_PATH) {
1570 if (attr->alt_port_num < 1
1571 || attr->alt_port_num > shca->num_ports) {
1572 ret = -EINVAL;
1573 ehca_err(ibqp->device, "Invalid alt_port=%x. "
1574 "ehca_qp=%p qp_num=%x num_ports=%x",
1575 attr->alt_port_num, my_qp, ibqp->qp_num,
1576 shca->num_ports);
1577 goto modify_qp_exit2;
1578 }
1579 mqpcb->alt_phys_port = attr->alt_port_num;
1580
1581 if (attr->alt_pkey_index >= 16) {
1582 ret = -EINVAL;
1583 ehca_err(ibqp->device, "Invalid alt_pkey_index=%x. "
1584 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1585 attr->pkey_index, my_qp, ibqp->qp_num);
1586 goto modify_qp_exit2;
1587 }
1588 mqpcb->alt_p_key_idx = attr->alt_pkey_index;
1589
1590 mqpcb->timeout_al = attr->alt_timeout;
1591 mqpcb->dlid_al = attr->alt_ah_attr.dlid;
1592 mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits;
1593 mqpcb->service_level_al = attr->alt_ah_attr.sl;
1594
1595 if (ehca_calc_ipd(shca, mqpcb->alt_phys_port,
1596 attr->alt_ah_attr.static_rate,
1597 &mqpcb->max_static_rate_al)) {
1598 ret = -EINVAL;
1599 goto modify_qp_exit2;
1600 }
1601
1602 /* OpenIB doesn't support alternate retry counts - copy them */
1603 mqpcb->retry_count_al = mqpcb->retry_count;
1604 mqpcb->rnr_retry_count_al = mqpcb->rnr_retry_count;
1605
1606 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_ALT_PHYS_PORT, 1)
1607 | EHCA_BMASK_SET(MQPCB_MASK_ALT_P_KEY_IDX, 1)
1608 | EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT_AL, 1)
1609 | EHCA_BMASK_SET(MQPCB_MASK_DLID_AL, 1)
1610 | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL, 1)
1611 | EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL, 1)
1612 | EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1)
1613 | EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT_AL, 1)
1614 | EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT_AL, 1);
1615
1616 /*
1617 * Always supply the GRH flag, even if it's zero, to give the
1618 * hypervisor a clear "yes" or "no" instead of a "perhaps"
1619 */
1620 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL, 1);
1621
1622 /*
1623 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1624 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1625 */
1626 if (attr->alt_ah_attr.ah_flags == IB_AH_GRH) {
1627 mqpcb->send_grh_flag_al = 1;
1628
1629 for (cnt = 0; cnt < 16; cnt++)
1630 mqpcb->dest_gid_al.byte[cnt] =
1631 attr->alt_ah_attr.grh.dgid.raw[cnt];
1632 mqpcb->source_gid_idx_al =
1633 attr->alt_ah_attr.grh.sgid_index;
1634 mqpcb->flow_label_al = attr->alt_ah_attr.grh.flow_label;
1635 mqpcb->hop_limit_al = attr->alt_ah_attr.grh.hop_limit;
1636 mqpcb->traffic_class_al =
1637 attr->alt_ah_attr.grh.traffic_class;
1638
1639 update_mask |=
1640 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL, 1)
1641 | EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL, 1)
1642 | EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL, 1)
1643 | EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL, 1) |
1644 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1);
1645 }
1646 }
1647
1648 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1649 mqpcb->min_rnr_nak_timer_field = attr->min_rnr_timer;
1650 update_mask |=
1651 EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD, 1);
1652 }
1653
1654 if (attr_mask & IB_QP_SQ_PSN) {
1655 mqpcb->send_psn = attr->sq_psn;
1656 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN, 1);
1657 }
1658
1659 if (attr_mask & IB_QP_DEST_QPN) {
1660 mqpcb->dest_qp_nr = attr->dest_qp_num;
1661 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR, 1);
1662 }
1663
1664 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1665 if (attr->path_mig_state != IB_MIG_REARM
1666 && attr->path_mig_state != IB_MIG_MIGRATED) {
1667 ret = -EINVAL;
1668 ehca_err(ibqp->device, "Invalid mig_state=%x",
1669 attr->path_mig_state);
1670 goto modify_qp_exit2;
1671 }
1672 mqpcb->path_migration_state = attr->path_mig_state + 1;
1673 if (attr->path_mig_state == IB_MIG_REARM)
1674 my_qp->mig_armed = 1;
1675 update_mask |=
1676 EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1);
1677 }
1678
1679 if (attr_mask & IB_QP_CAP) {
1680 mqpcb->max_nr_outst_send_wr = attr->cap.max_send_wr+1;
1681 update_mask |=
1682 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR, 1);
1683 mqpcb->max_nr_outst_recv_wr = attr->cap.max_recv_wr+1;
1684 update_mask |=
1685 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR, 1);
1686 /* no support for max_send/recv_sge yet */
1687 }
1688
1689 if (ehca_debug_level >= 2)
1690 ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
1691
1692 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
1693 my_qp->ipz_qp_handle,
1694 &my_qp->pf,
1695 update_mask,
1696 mqpcb, my_qp->galpas.kernel);
1697
1698 if (h_ret != H_SUCCESS) {
1699 ret = ehca2ib_return_code(h_ret);
1700 ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%lli "
1701 "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
1702 goto modify_qp_exit2;
1703 }
1704
1705 if ((my_qp->qp_type == IB_QPT_UD ||
1706 my_qp->qp_type == IB_QPT_GSI ||
1707 my_qp->qp_type == IB_QPT_SMI) &&
1708 statetrans == IB_QPST_SQE2RTS) {
1709 /* doorbell to reprocessing wqes */
1710 iosync(); /* serialize GAL register access */
1711 hipz_update_sqa(my_qp, bad_wqe_cnt-1);
1712 ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt);
1713 }
1714
1715 if (statetrans == IB_QPST_RESET2INIT ||
1716 statetrans == IB_QPST_INIT2INIT) {
1717 mqpcb->qp_enable = 1;
1718 mqpcb->qp_state = EHCA_QPS_INIT;
1719 update_mask = 0;
1720 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
1721
1722 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
1723 my_qp->ipz_qp_handle,
1724 &my_qp->pf,
1725 update_mask,
1726 mqpcb,
1727 my_qp->galpas.kernel);
1728
1729 if (h_ret != H_SUCCESS) {
1730 ret = ehca2ib_return_code(h_ret);
1731 ehca_err(ibqp->device, "ENABLE in context of "
1732 "RESET_2_INIT failed! Maybe you didn't get "
1733 "a LID h_ret=%lli ehca_qp=%p qp_num=%x",
1734 h_ret, my_qp, ibqp->qp_num);
1735 goto modify_qp_exit2;
1736 }
1737 }
1738 if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)
1739 && !is_user) {
1740 ret = check_for_left_cqes(my_qp, shca);
1741 if (ret)
1742 goto modify_qp_exit2;
1743 }
1744
1745 if (statetrans == IB_QPST_ANY2RESET) {
1746 ipz_qeit_reset(&my_qp->ipz_rqueue);
1747 ipz_qeit_reset(&my_qp->ipz_squeue);
1748
1749 if (qp_cur_state == IB_QPS_ERR && !is_user) {
1750 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
1751
1752 if (HAS_RQ(my_qp))
1753 del_from_err_list(my_qp->recv_cq,
1754 &my_qp->rq_err_node);
1755 }
1756 if (!is_user)
1757 reset_queue_map(&my_qp->sq_map);
1758
1759 if (HAS_RQ(my_qp) && !is_user)
1760 reset_queue_map(&my_qp->rq_map);
1761 }
1762
1763 if (attr_mask & IB_QP_QKEY)
1764 my_qp->qkey = attr->qkey;
1765
1766modify_qp_exit2:
1767 if (squeue_locked) { /* this means: sqe -> rts */
1768 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
1769 my_qp->sqerr_purgeflag = 1;
1770 }
1771
1772modify_qp_exit1:
1773 ehca_free_fw_ctrlblock(mqpcb);
1774
1775 return ret;
1776}
1777
1778int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1779 struct ib_udata *udata)
1780{
1781 int ret = 0;
1782
1783 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
1784 ib_device);
1785 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
1786
1787 /* The if-block below caches qp_attr to be modified for GSI and SMI
1788 * qps during the initialization by ib_mad. When the respective port
1789 * is activated, ie we got an event PORT_ACTIVE, we'll replay the
1790 * cached modify calls sequence, see ehca_recover_sqs() below.
1791 * Why that is required:
1792 * 1) If one port is connected, older code requires that port one
1793 * to be connected and module option nr_ports=1 to be given by
1794 * user, which is very inconvenient for end user.
1795 * 2) Firmware accepts modify_qp() only if respective port has become
1796 * active. Older code had a wait loop of 30sec create_qp()/
1797 * define_aqp1(), which is not appropriate in practice. This
1798 * code now removes that wait loop, see define_aqp1(), and always
1799 * reports all ports to ib_mad resp. users. Only activated ports
1800 * will then usable for the users.
1801 */
1802 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1803 int port = my_qp->init_attr.port_num;
1804 struct ehca_sport *sport = &shca->sport[port - 1];
1805 unsigned long flags;
1806 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
1807 /* cache qp_attr only during init */
1808 if (my_qp->mod_qp_parm) {
1809 struct ehca_mod_qp_parm *p;
1810 if (my_qp->mod_qp_parm_idx >= EHCA_MOD_QP_PARM_MAX) {
1811 ehca_err(&shca->ib_device,
1812 "mod_qp_parm overflow state=%x port=%x"
1813 " type=%x", attr->qp_state,
1814 my_qp->init_attr.port_num,
1815 ibqp->qp_type);
1816 spin_unlock_irqrestore(&sport->mod_sqp_lock,
1817 flags);
1818 return -EINVAL;
1819 }
1820 p = &my_qp->mod_qp_parm[my_qp->mod_qp_parm_idx];
1821 p->mask = attr_mask;
1822 p->attr = *attr;
1823 my_qp->mod_qp_parm_idx++;
1824 ehca_dbg(&shca->ib_device,
1825 "Saved qp_attr for state=%x port=%x type=%x",
1826 attr->qp_state, my_qp->init_attr.port_num,
1827 ibqp->qp_type);
1828 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1829 goto out;
1830 }
1831 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1832 }
1833
1834 ret = internal_modify_qp(ibqp, attr, attr_mask, 0);
1835
1836out:
1837 if ((ret == 0) && (attr_mask & IB_QP_STATE))
1838 my_qp->state = attr->qp_state;
1839
1840 return ret;
1841}
1842
1843void ehca_recover_sqp(struct ib_qp *sqp)
1844{
1845 struct ehca_qp *my_sqp = container_of(sqp, struct ehca_qp, ib_qp);
1846 int port = my_sqp->init_attr.port_num;
1847 struct ib_qp_attr attr;
1848 struct ehca_mod_qp_parm *qp_parm;
1849 int i, qp_parm_idx, ret;
1850 unsigned long flags, wr_cnt;
1851
1852 if (!my_sqp->mod_qp_parm)
1853 return;
1854 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x", port, sqp->qp_num);
1855
1856 qp_parm = my_sqp->mod_qp_parm;
1857 qp_parm_idx = my_sqp->mod_qp_parm_idx;
1858 for (i = 0; i < qp_parm_idx; i++) {
1859 attr = qp_parm[i].attr;
1860 ret = internal_modify_qp(sqp, &attr, qp_parm[i].mask, 0);
1861 if (ret) {
1862 ehca_err(sqp->device, "Could not modify SQP port=%x "
1863 "qp_num=%x ret=%x", port, sqp->qp_num, ret);
1864 goto free_qp_parm;
1865 }
1866 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x in state=%x",
1867 port, sqp->qp_num, attr.qp_state);
1868 }
1869
1870 /* re-trigger posted recv wrs */
1871 wr_cnt = my_sqp->ipz_rqueue.current_q_offset /
1872 my_sqp->ipz_rqueue.qe_size;
1873 if (wr_cnt) {
1874 spin_lock_irqsave(&my_sqp->spinlock_r, flags);
1875 hipz_update_rqa(my_sqp, wr_cnt);
1876 spin_unlock_irqrestore(&my_sqp->spinlock_r, flags);
1877 ehca_dbg(sqp->device, "doorbell port=%x qp_num=%x wr_cnt=%lx",
1878 port, sqp->qp_num, wr_cnt);
1879 }
1880
1881free_qp_parm:
1882 kfree(qp_parm);
1883 /* this prevents subsequent calls to modify_qp() to cache qp_attr */
1884 my_sqp->mod_qp_parm = NULL;
1885}
1886
1887int ehca_query_qp(struct ib_qp *qp,
1888 struct ib_qp_attr *qp_attr,
1889 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1890{
1891 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
1892 struct ehca_shca *shca = container_of(qp->device, struct ehca_shca,
1893 ib_device);
1894 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
1895 struct hcp_modify_qp_control_block *qpcb;
1896 int cnt, ret = 0;
1897 u64 h_ret;
1898
1899 if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
1900 ehca_err(qp->device, "Invalid attribute mask "
1901 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
1902 my_qp, qp->qp_num, qp_attr_mask);
1903 return -EINVAL;
1904 }
1905
1906 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1907 if (!qpcb) {
1908 ehca_err(qp->device, "Out of memory for qpcb "
1909 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
1910 return -ENOMEM;
1911 }
1912
1913 h_ret = hipz_h_query_qp(adapter_handle,
1914 my_qp->ipz_qp_handle,
1915 &my_qp->pf,
1916 qpcb, my_qp->galpas.kernel);
1917
1918 if (h_ret != H_SUCCESS) {
1919 ret = ehca2ib_return_code(h_ret);
1920 ehca_err(qp->device, "hipz_h_query_qp() failed "
1921 "ehca_qp=%p qp_num=%x h_ret=%lli",
1922 my_qp, qp->qp_num, h_ret);
1923 goto query_qp_exit1;
1924 }
1925
1926 qp_attr->cur_qp_state = ehca2ib_qp_state(qpcb->qp_state);
1927 qp_attr->qp_state = qp_attr->cur_qp_state;
1928
1929 if (qp_attr->cur_qp_state == -EINVAL) {
1930 ret = -EINVAL;
1931 ehca_err(qp->device, "Got invalid ehca_qp_state=%x "
1932 "ehca_qp=%p qp_num=%x",
1933 qpcb->qp_state, my_qp, qp->qp_num);
1934 goto query_qp_exit1;
1935 }
1936
1937 if (qp_attr->qp_state == IB_QPS_SQD)
1938 qp_attr->sq_draining = 1;
1939
1940 qp_attr->qkey = qpcb->qkey;
1941 qp_attr->path_mtu = qpcb->path_mtu;
1942 qp_attr->path_mig_state = qpcb->path_migration_state - 1;
1943 qp_attr->rq_psn = qpcb->receive_psn;
1944 qp_attr->sq_psn = qpcb->send_psn;
1945 qp_attr->min_rnr_timer = qpcb->min_rnr_nak_timer_field;
1946 qp_attr->cap.max_send_wr = qpcb->max_nr_outst_send_wr-1;
1947 qp_attr->cap.max_recv_wr = qpcb->max_nr_outst_recv_wr-1;
1948 /* UD_AV CIRCUMVENTION */
1949 if (my_qp->qp_type == IB_QPT_UD) {
1950 qp_attr->cap.max_send_sge =
1951 qpcb->actual_nr_sges_in_sq_wqe - 2;
1952 qp_attr->cap.max_recv_sge =
1953 qpcb->actual_nr_sges_in_rq_wqe - 2;
1954 } else {
1955 qp_attr->cap.max_send_sge =
1956 qpcb->actual_nr_sges_in_sq_wqe;
1957 qp_attr->cap.max_recv_sge =
1958 qpcb->actual_nr_sges_in_rq_wqe;
1959 }
1960
1961 qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;
1962 qp_attr->dest_qp_num = qpcb->dest_qp_nr;
1963
1964 qp_attr->pkey_index = qpcb->prim_p_key_idx;
1965 qp_attr->port_num = qpcb->prim_phys_port;
1966 qp_attr->timeout = qpcb->timeout;
1967 qp_attr->retry_cnt = qpcb->retry_count;
1968 qp_attr->rnr_retry = qpcb->rnr_retry_count;
1969
1970 qp_attr->alt_pkey_index = qpcb->alt_p_key_idx;
1971 qp_attr->alt_port_num = qpcb->alt_phys_port;
1972 qp_attr->alt_timeout = qpcb->timeout_al;
1973
1974 qp_attr->max_dest_rd_atomic = qpcb->rdma_nr_atomic_resp_res;
1975 qp_attr->max_rd_atomic = qpcb->rdma_atomic_outst_dest_qp;
1976
1977 /* primary av */
1978 qp_attr->ah_attr.sl = qpcb->service_level;
1979
1980 if (qpcb->send_grh_flag) {
1981 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1982 }
1983
1984 qp_attr->ah_attr.static_rate = qpcb->max_static_rate;
1985 qp_attr->ah_attr.dlid = qpcb->dlid;
1986 qp_attr->ah_attr.src_path_bits = qpcb->source_path_bits;
1987 qp_attr->ah_attr.port_num = qp_attr->port_num;
1988
1989 /* primary GRH */
1990 qp_attr->ah_attr.grh.traffic_class = qpcb->traffic_class;
1991 qp_attr->ah_attr.grh.hop_limit = qpcb->hop_limit;
1992 qp_attr->ah_attr.grh.sgid_index = qpcb->source_gid_idx;
1993 qp_attr->ah_attr.grh.flow_label = qpcb->flow_label;
1994
1995 for (cnt = 0; cnt < 16; cnt++)
1996 qp_attr->ah_attr.grh.dgid.raw[cnt] =
1997 qpcb->dest_gid.byte[cnt];
1998
1999 /* alternate AV */
2000 qp_attr->alt_ah_attr.sl = qpcb->service_level_al;
2001 if (qpcb->send_grh_flag_al) {
2002 qp_attr->alt_ah_attr.ah_flags = IB_AH_GRH;
2003 }
2004
2005 qp_attr->alt_ah_attr.static_rate = qpcb->max_static_rate_al;
2006 qp_attr->alt_ah_attr.dlid = qpcb->dlid_al;
2007 qp_attr->alt_ah_attr.src_path_bits = qpcb->source_path_bits_al;
2008
2009 /* alternate GRH */
2010 qp_attr->alt_ah_attr.grh.traffic_class = qpcb->traffic_class_al;
2011 qp_attr->alt_ah_attr.grh.hop_limit = qpcb->hop_limit_al;
2012 qp_attr->alt_ah_attr.grh.sgid_index = qpcb->source_gid_idx_al;
2013 qp_attr->alt_ah_attr.grh.flow_label = qpcb->flow_label_al;
2014
2015 for (cnt = 0; cnt < 16; cnt++)
2016 qp_attr->alt_ah_attr.grh.dgid.raw[cnt] =
2017 qpcb->dest_gid_al.byte[cnt];
2018
2019 /* return init attributes given in ehca_create_qp */
2020 if (qp_init_attr)
2021 *qp_init_attr = my_qp->init_attr;
2022
2023 if (ehca_debug_level >= 2)
2024 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
2025
2026query_qp_exit1:
2027 ehca_free_fw_ctrlblock(qpcb);
2028
2029 return ret;
2030}
2031
2032int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
2033 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
2034{
2035 struct ehca_qp *my_qp =
2036 container_of(ibsrq, struct ehca_qp, ib_srq);
2037 struct ehca_shca *shca =
2038 container_of(ibsrq->pd->device, struct ehca_shca, ib_device);
2039 struct hcp_modify_qp_control_block *mqpcb;
2040 u64 update_mask;
2041 u64 h_ret;
2042 int ret = 0;
2043
2044 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
2045 if (!mqpcb) {
2046 ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb "
2047 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
2048 return -ENOMEM;
2049 }
2050
2051 update_mask = 0;
2052 if (attr_mask & IB_SRQ_LIMIT) {
2053 attr_mask &= ~IB_SRQ_LIMIT;
2054 update_mask |=
2055 EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1)
2056 | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1);
2057 mqpcb->curr_srq_limit = attr->srq_limit;
2058 mqpcb->qp_aff_asyn_ev_log_reg =
2059 EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1);
2060 }
2061
2062 /* by now, all bits in attr_mask should have been cleared */
2063 if (attr_mask) {
2064 ehca_err(ibsrq->device, "invalid attribute mask bits set "
2065 "attr_mask=%x", attr_mask);
2066 ret = -EINVAL;
2067 goto modify_srq_exit0;
2068 }
2069
2070 if (ehca_debug_level >= 2)
2071 ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
2072
2073 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle,
2074 NULL, update_mask, mqpcb,
2075 my_qp->galpas.kernel);
2076
2077 if (h_ret != H_SUCCESS) {
2078 ret = ehca2ib_return_code(h_ret);
2079 ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%lli "
2080 "ehca_qp=%p qp_num=%x",
2081 h_ret, my_qp, my_qp->real_qp_num);
2082 }
2083
2084modify_srq_exit0:
2085 ehca_free_fw_ctrlblock(mqpcb);
2086
2087 return ret;
2088}
2089
2090int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
2091{
2092 struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq);
2093 struct ehca_shca *shca = container_of(srq->device, struct ehca_shca,
2094 ib_device);
2095 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
2096 struct hcp_modify_qp_control_block *qpcb;
2097 int ret = 0;
2098 u64 h_ret;
2099
2100 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
2101 if (!qpcb) {
2102 ehca_err(srq->device, "Out of memory for qpcb "
2103 "ehca_qp=%p qp_num=%x", my_qp, my_qp->real_qp_num);
2104 return -ENOMEM;
2105 }
2106
2107 h_ret = hipz_h_query_qp(adapter_handle, my_qp->ipz_qp_handle,
2108 NULL, qpcb, my_qp->galpas.kernel);
2109
2110 if (h_ret != H_SUCCESS) {
2111 ret = ehca2ib_return_code(h_ret);
2112 ehca_err(srq->device, "hipz_h_query_qp() failed "
2113 "ehca_qp=%p qp_num=%x h_ret=%lli",
2114 my_qp, my_qp->real_qp_num, h_ret);
2115 goto query_srq_exit1;
2116 }
2117
2118 srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;
2119 srq_attr->max_sge = 3;
2120 srq_attr->srq_limit = qpcb->curr_srq_limit;
2121
2122 if (ehca_debug_level >= 2)
2123 ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
2124
2125query_srq_exit1:
2126 ehca_free_fw_ctrlblock(qpcb);
2127
2128 return ret;
2129}
2130
2131static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2132 struct ib_uobject *uobject)
2133{
2134 struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device);
2135 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
2136 ib_pd);
2137 struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
2138 u32 qp_num = my_qp->real_qp_num;
2139 int ret;
2140 u64 h_ret;
2141 u8 port_num;
2142 int is_user = 0;
2143 enum ib_qp_type qp_type;
2144 unsigned long flags;
2145
2146 if (uobject) {
2147 is_user = 1;
2148 if (my_qp->mm_count_galpa ||
2149 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
2150 ehca_err(dev, "Resources still referenced in "
2151 "user space qp_num=%x", qp_num);
2152 return -EINVAL;
2153 }
2154 }
2155
2156 if (my_qp->send_cq) {
2157 ret = ehca_cq_unassign_qp(my_qp->send_cq, qp_num);
2158 if (ret) {
2159 ehca_err(dev, "Couldn't unassign qp from "
2160 "send_cq ret=%i qp_num=%x cq_num=%x", ret,
2161 qp_num, my_qp->send_cq->cq_number);
2162 return ret;
2163 }
2164 }
2165
2166 write_lock_irqsave(&ehca_qp_idr_lock, flags);
2167 idr_remove(&ehca_qp_idr, my_qp->token);
2168 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
2169
2170 /*
2171 * SRQs will never get into an error list and do not have a recv_cq,
2172 * so we need to skip them here.
2173 */
2174 if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user)
2175 del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
2176
2177 if (HAS_SQ(my_qp) && !is_user)
2178 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
2179
2180 /* now wait until all pending events have completed */
2181 wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events));
2182
2183 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
2184 if (h_ret != H_SUCCESS) {
2185 ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%lli "
2186 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
2187 return ehca2ib_return_code(h_ret);
2188 }
2189
2190 port_num = my_qp->init_attr.port_num;
2191 qp_type = my_qp->init_attr.qp_type;
2192
2193 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
2194 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
2195 kfree(my_qp->mod_qp_parm);
2196 my_qp->mod_qp_parm = NULL;
2197 shca->sport[port_num - 1].ibqp_sqp[qp_type] = NULL;
2198 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
2199 }
2200
2201 /* no support for IB_QPT_SMI yet */
2202 if (qp_type == IB_QPT_GSI) {
2203 struct ib_event event;
2204 ehca_info(dev, "device %s: port %x is inactive.",
2205 shca->ib_device.name, port_num);
2206 event.device = &shca->ib_device;
2207 event.event = IB_EVENT_PORT_ERR;
2208 event.element.port_num = port_num;
2209 shca->sport[port_num - 1].port_state = IB_PORT_DOWN;
2210 ib_dispatch_event(&event);
2211 }
2212
2213 if (HAS_RQ(my_qp)) {
2214 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
2215 if (!is_user)
2216 vfree(my_qp->rq_map.map);
2217 }
2218 if (HAS_SQ(my_qp)) {
2219 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
2220 if (!is_user)
2221 vfree(my_qp->sq_map.map);
2222 }
2223 kmem_cache_free(qp_cache, my_qp);
2224 atomic_dec(&shca->num_qps);
2225 return 0;
2226}
2227
2228int ehca_destroy_qp(struct ib_qp *qp)
2229{
2230 return internal_destroy_qp(qp->device,
2231 container_of(qp, struct ehca_qp, ib_qp),
2232 qp->uobject);
2233}
2234
2235int ehca_destroy_srq(struct ib_srq *srq)
2236{
2237 return internal_destroy_qp(srq->device,
2238 container_of(srq, struct ehca_qp, ib_srq),
2239 srq->uobject);
2240}
2241
2242int ehca_init_qp_cache(void)
2243{
2244 qp_cache = kmem_cache_create("ehca_cache_qp",
2245 sizeof(struct ehca_qp), 0,
2246 SLAB_HWCACHE_ALIGN,
2247 NULL);
2248 if (!qp_cache)
2249 return -ENOMEM;
2250 return 0;
2251}
2252
2253void ehca_cleanup_qp_cache(void)
2254{
2255 kmem_cache_destroy(qp_cache);
2256}
diff --git a/drivers/staging/rdma/ehca/ehca_reqs.c b/drivers/staging/rdma/ehca/ehca_reqs.c
deleted file mode 100644
index 11813b880e16..000000000000
--- a/drivers/staging/rdma/ehca/ehca_reqs.c
+++ /dev/null
@@ -1,953 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * post_send/recv, poll_cq, req_notify
5 *
6 * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
7 * Waleri Fomin <fomin@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 * Reinhard Ernst <rernst@de.ibm.com>
10 *
11 * Copyright (c) 2005 IBM Corporation
12 *
13 * All rights reserved.
14 *
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
17 *
18 * OpenIB BSD License
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
22 *
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
25 *
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44
45#include "ehca_classes.h"
46#include "ehca_tools.h"
47#include "ehca_qes.h"
48#include "ehca_iverbs.h"
49#include "hcp_if.h"
50#include "hipz_fns.h"
51
52/* in RC traffic, insert an empty RDMA READ every this many packets */
53#define ACK_CIRC_THRESHOLD 2000000
54
55static u64 replace_wr_id(u64 wr_id, u16 idx)
56{
57 u64 ret;
58
59 ret = wr_id & ~QMAP_IDX_MASK;
60 ret |= idx & QMAP_IDX_MASK;
61
62 return ret;
63}
64
65static u16 get_app_wr_id(u64 wr_id)
66{
67 return wr_id & QMAP_IDX_MASK;
68}
69
70static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
71 struct ehca_wqe *wqe_p,
72 struct ib_recv_wr *recv_wr,
73 u32 rq_map_idx)
74{
75 u8 cnt_ds;
76 if (unlikely((recv_wr->num_sge < 0) ||
77 (recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) {
78 ehca_gen_err("Invalid number of WQE SGE. "
79 "num_sqe=%x max_nr_of_sg=%x",
80 recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
81 return -EINVAL; /* invalid SG list length */
82 }
83
84 /* clear wqe header until sglist */
85 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
86
87 wqe_p->work_request_id = replace_wr_id(recv_wr->wr_id, rq_map_idx);
88 wqe_p->nr_of_data_seg = recv_wr->num_sge;
89
90 for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
91 wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr =
92 recv_wr->sg_list[cnt_ds].addr;
93 wqe_p->u.all_rcv.sg_list[cnt_ds].lkey =
94 recv_wr->sg_list[cnt_ds].lkey;
95 wqe_p->u.all_rcv.sg_list[cnt_ds].length =
96 recv_wr->sg_list[cnt_ds].length;
97 }
98
99 if (ehca_debug_level >= 3) {
100 ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
101 ipz_rqueue);
102 ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
103 }
104
105 return 0;
106}
107
108#if defined(DEBUG_GSI_SEND_WR)
109
110/* need ib_mad struct */
111#include <rdma/ib_mad.h>
112
113static void trace_ud_wr(const struct ib_ud_wr *ud_wr)
114{
115 int idx;
116 int j;
117 while (ud_wr) {
118 struct ib_mad_hdr *mad_hdr = ud_wrmad_hdr;
119 struct ib_sge *sge = ud_wr->wr.sg_list;
120 ehca_gen_dbg("ud_wr#%x wr_id=%lx num_sge=%x "
121 "send_flags=%x opcode=%x", idx, ud_wr->wr.wr_id,
122 ud_wr->wr.num_sge, ud_wr->wr.send_flags,
123 ud_wr->.wr.opcode);
124 if (mad_hdr) {
125 ehca_gen_dbg("ud_wr#%x mad_hdr base_version=%x "
126 "mgmt_class=%x class_version=%x method=%x "
127 "status=%x class_specific=%x tid=%lx "
128 "attr_id=%x resv=%x attr_mod=%x",
129 idx, mad_hdr->base_version,
130 mad_hdr->mgmt_class,
131 mad_hdr->class_version, mad_hdr->method,
132 mad_hdr->status, mad_hdr->class_specific,
133 mad_hdr->tid, mad_hdr->attr_id,
134 mad_hdr->resv,
135 mad_hdr->attr_mod);
136 }
137 for (j = 0; j < ud_wr->wr.num_sge; j++) {
138 u8 *data = __va(sge->addr);
139 ehca_gen_dbg("ud_wr#%x sge#%x addr=%p length=%x "
140 "lkey=%x",
141 idx, j, data, sge->length, sge->lkey);
142 /* assume length is n*16 */
143 ehca_dmp(data, sge->length, "ud_wr#%x sge#%x",
144 idx, j);
145 sge++;
146 } /* eof for j */
147 idx++;
148 ud_wr = ud_wr(ud_wr->wr.next);
149 } /* eof while ud_wr */
150}
151
152#endif /* DEBUG_GSI_SEND_WR */
153
154static inline int ehca_write_swqe(struct ehca_qp *qp,
155 struct ehca_wqe *wqe_p,
156 struct ib_send_wr *send_wr,
157 u32 sq_map_idx,
158 int hidden)
159{
160 u32 idx;
161 u64 dma_length;
162 struct ehca_av *my_av;
163 u32 remote_qkey;
164 struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx];
165
166 if (unlikely((send_wr->num_sge < 0) ||
167 (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
168 ehca_gen_err("Invalid number of WQE SGE. "
169 "num_sqe=%x max_nr_of_sg=%x",
170 send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg);
171 return -EINVAL; /* invalid SG list length */
172 }
173
174 /* clear wqe header until sglist */
175 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
176
177 wqe_p->work_request_id = replace_wr_id(send_wr->wr_id, sq_map_idx);
178
179 qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id);
180 qmap_entry->reported = 0;
181 qmap_entry->cqe_req = 0;
182
183 switch (send_wr->opcode) {
184 case IB_WR_SEND:
185 case IB_WR_SEND_WITH_IMM:
186 wqe_p->optype = WQE_OPTYPE_SEND;
187 break;
188 case IB_WR_RDMA_WRITE:
189 case IB_WR_RDMA_WRITE_WITH_IMM:
190 wqe_p->optype = WQE_OPTYPE_RDMAWRITE;
191 break;
192 case IB_WR_RDMA_READ:
193 wqe_p->optype = WQE_OPTYPE_RDMAREAD;
194 break;
195 default:
196 ehca_gen_err("Invalid opcode=%x", send_wr->opcode);
197 return -EINVAL; /* invalid opcode */
198 }
199
200 wqe_p->wqef = (send_wr->opcode) & WQEF_HIGH_NIBBLE;
201
202 wqe_p->wr_flag = 0;
203
204 if ((send_wr->send_flags & IB_SEND_SIGNALED ||
205 qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
206 && !hidden) {
207 wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
208 qmap_entry->cqe_req = 1;
209 }
210
211 if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
212 send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
213 /* this might not work as long as HW does not support it */
214 wqe_p->immediate_data = be32_to_cpu(send_wr->ex.imm_data);
215 wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
216 }
217
218 wqe_p->nr_of_data_seg = send_wr->num_sge;
219
220 switch (qp->qp_type) {
221 case IB_QPT_SMI:
222 case IB_QPT_GSI:
223 /* no break is intential here */
224 case IB_QPT_UD:
225 /* IB 1.2 spec C10-15 compliance */
226 remote_qkey = ud_wr(send_wr)->remote_qkey;
227 if (remote_qkey & 0x80000000)
228 remote_qkey = qp->qkey;
229
230 wqe_p->destination_qp_number = ud_wr(send_wr)->remote_qpn << 8;
231 wqe_p->local_ee_context_qkey = remote_qkey;
232 if (unlikely(!ud_wr(send_wr)->ah)) {
233 ehca_gen_err("ud_wr(send_wr) is NULL. qp=%p", qp);
234 return -EINVAL;
235 }
236 if (unlikely(ud_wr(send_wr)->remote_qpn == 0)) {
237 ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num);
238 return -EINVAL;
239 }
240 my_av = container_of(ud_wr(send_wr)->ah, struct ehca_av, ib_ah);
241 wqe_p->u.ud_av.ud_av = my_av->av;
242
243 /*
244 * omitted check of IB_SEND_INLINE
245 * since HW does not support it
246 */
247 for (idx = 0; idx < send_wr->num_sge; idx++) {
248 wqe_p->u.ud_av.sg_list[idx].vaddr =
249 send_wr->sg_list[idx].addr;
250 wqe_p->u.ud_av.sg_list[idx].lkey =
251 send_wr->sg_list[idx].lkey;
252 wqe_p->u.ud_av.sg_list[idx].length =
253 send_wr->sg_list[idx].length;
254 } /* eof for idx */
255 if (qp->qp_type == IB_QPT_SMI ||
256 qp->qp_type == IB_QPT_GSI)
257 wqe_p->u.ud_av.ud_av.pmtu = 1;
258 if (qp->qp_type == IB_QPT_GSI) {
259 wqe_p->pkeyi = ud_wr(send_wr)->pkey_index;
260#ifdef DEBUG_GSI_SEND_WR
261 trace_ud_wr(ud_wr(send_wr));
262#endif /* DEBUG_GSI_SEND_WR */
263 }
264 break;
265
266 case IB_QPT_UC:
267 if (send_wr->send_flags & IB_SEND_FENCE)
268 wqe_p->wr_flag |= WQE_WRFLAG_FENCE;
269 /* no break is intentional here */
270 case IB_QPT_RC:
271 /* TODO: atomic not implemented */
272 wqe_p->u.nud.remote_virtual_address =
273 rdma_wr(send_wr)->remote_addr;
274 wqe_p->u.nud.rkey = rdma_wr(send_wr)->rkey;
275
276 /*
277 * omitted checking of IB_SEND_INLINE
278 * since HW does not support it
279 */
280 dma_length = 0;
281 for (idx = 0; idx < send_wr->num_sge; idx++) {
282 wqe_p->u.nud.sg_list[idx].vaddr =
283 send_wr->sg_list[idx].addr;
284 wqe_p->u.nud.sg_list[idx].lkey =
285 send_wr->sg_list[idx].lkey;
286 wqe_p->u.nud.sg_list[idx].length =
287 send_wr->sg_list[idx].length;
288 dma_length += send_wr->sg_list[idx].length;
289 } /* eof idx */
290 wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
291
292 /* unsolicited ack circumvention */
293 if (send_wr->opcode == IB_WR_RDMA_READ) {
294 /* on RDMA read, switch on and reset counters */
295 qp->message_count = qp->packet_count = 0;
296 qp->unsol_ack_circ = 1;
297 } else
298 /* else estimate #packets */
299 qp->packet_count += (dma_length >> qp->mtu_shift) + 1;
300
301 break;
302
303 default:
304 ehca_gen_err("Invalid qptype=%x", qp->qp_type);
305 return -EINVAL;
306 }
307
308 if (ehca_debug_level >= 3) {
309 ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
310 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
311 }
312 return 0;
313}
314
315/* map_ib_wc_status converts raw cqe_status to ib_wc_status */
316static inline void map_ib_wc_status(u32 cqe_status,
317 enum ib_wc_status *wc_status)
318{
319 if (unlikely(cqe_status & WC_STATUS_ERROR_BIT)) {
320 switch (cqe_status & 0x3F) {
321 case 0x01:
322 case 0x21:
323 *wc_status = IB_WC_LOC_LEN_ERR;
324 break;
325 case 0x02:
326 case 0x22:
327 *wc_status = IB_WC_LOC_QP_OP_ERR;
328 break;
329 case 0x03:
330 case 0x23:
331 *wc_status = IB_WC_LOC_EEC_OP_ERR;
332 break;
333 case 0x04:
334 case 0x24:
335 *wc_status = IB_WC_LOC_PROT_ERR;
336 break;
337 case 0x05:
338 case 0x25:
339 *wc_status = IB_WC_WR_FLUSH_ERR;
340 break;
341 case 0x06:
342 *wc_status = IB_WC_MW_BIND_ERR;
343 break;
344 case 0x07: /* remote error - look into bits 20:24 */
345 switch ((cqe_status
346 & WC_STATUS_REMOTE_ERROR_FLAGS) >> 11) {
347 case 0x0:
348 /*
349 * PSN Sequence Error!
350 * couldn't find a matching status!
351 */
352 *wc_status = IB_WC_GENERAL_ERR;
353 break;
354 case 0x1:
355 *wc_status = IB_WC_REM_INV_REQ_ERR;
356 break;
357 case 0x2:
358 *wc_status = IB_WC_REM_ACCESS_ERR;
359 break;
360 case 0x3:
361 *wc_status = IB_WC_REM_OP_ERR;
362 break;
363 case 0x4:
364 *wc_status = IB_WC_REM_INV_RD_REQ_ERR;
365 break;
366 }
367 break;
368 case 0x08:
369 *wc_status = IB_WC_RETRY_EXC_ERR;
370 break;
371 case 0x09:
372 *wc_status = IB_WC_RNR_RETRY_EXC_ERR;
373 break;
374 case 0x0A:
375 case 0x2D:
376 *wc_status = IB_WC_REM_ABORT_ERR;
377 break;
378 case 0x0B:
379 case 0x2E:
380 *wc_status = IB_WC_INV_EECN_ERR;
381 break;
382 case 0x0C:
383 case 0x2F:
384 *wc_status = IB_WC_INV_EEC_STATE_ERR;
385 break;
386 case 0x0D:
387 *wc_status = IB_WC_BAD_RESP_ERR;
388 break;
389 case 0x10:
390 /* WQE purged */
391 *wc_status = IB_WC_WR_FLUSH_ERR;
392 break;
393 default:
394 *wc_status = IB_WC_FATAL_ERR;
395
396 }
397 } else
398 *wc_status = IB_WC_SUCCESS;
399}
400
401static inline int post_one_send(struct ehca_qp *my_qp,
402 struct ib_send_wr *cur_send_wr,
403 int hidden)
404{
405 struct ehca_wqe *wqe_p;
406 int ret;
407 u32 sq_map_idx;
408 u64 start_offset = my_qp->ipz_squeue.current_q_offset;
409
410 /* get pointer next to free WQE */
411 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
412 if (unlikely(!wqe_p)) {
413 /* too many posted work requests: queue overflow */
414 ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
415 "qp_num=%x", my_qp->ib_qp.qp_num);
416 return -ENOMEM;
417 }
418
419 /*
420 * Get the index of the WQE in the send queue. The same index is used
421 * for writing into the sq_map.
422 */
423 sq_map_idx = start_offset / my_qp->ipz_squeue.qe_size;
424
425 /* write a SEND WQE into the QUEUE */
426 ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, sq_map_idx, hidden);
427 /*
428 * if something failed,
429 * reset the free entry pointer to the start value
430 */
431 if (unlikely(ret)) {
432 my_qp->ipz_squeue.current_q_offset = start_offset;
433 ehca_err(my_qp->ib_qp.device, "Could not write WQE "
434 "qp_num=%x", my_qp->ib_qp.qp_num);
435 return -EINVAL;
436 }
437
438 return 0;
439}
440
441int ehca_post_send(struct ib_qp *qp,
442 struct ib_send_wr *send_wr,
443 struct ib_send_wr **bad_send_wr)
444{
445 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
446 int wqe_cnt = 0;
447 int ret = 0;
448 unsigned long flags;
449
450 /* Reject WR if QP is in RESET, INIT or RTR state */
451 if (unlikely(my_qp->state < IB_QPS_RTS)) {
452 ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
453 my_qp->state, qp->qp_num);
454 ret = -EINVAL;
455 goto out;
456 }
457
458 /* LOCK the QUEUE */
459 spin_lock_irqsave(&my_qp->spinlock_s, flags);
460
461 /* Send an empty extra RDMA read if:
462 * 1) there has been an RDMA read on this connection before
463 * 2) no RDMA read occurred for ACK_CIRC_THRESHOLD link packets
464 * 3) we can be sure that any previous extra RDMA read has been
465 * processed so we don't overflow the SQ
466 */
467 if (unlikely(my_qp->unsol_ack_circ &&
468 my_qp->packet_count > ACK_CIRC_THRESHOLD &&
469 my_qp->message_count > my_qp->init_attr.cap.max_send_wr)) {
470 /* insert an empty RDMA READ to fix up the remote QP state */
471 struct ib_send_wr circ_wr;
472 memset(&circ_wr, 0, sizeof(circ_wr));
473 circ_wr.opcode = IB_WR_RDMA_READ;
474 post_one_send(my_qp, &circ_wr, 1); /* ignore retcode */
475 wqe_cnt++;
476 ehca_dbg(qp->device, "posted circ wr qp_num=%x", qp->qp_num);
477 my_qp->message_count = my_qp->packet_count = 0;
478 }
479
480 /* loop processes list of send reqs */
481 while (send_wr) {
482 ret = post_one_send(my_qp, send_wr, 0);
483 if (unlikely(ret)) {
484 goto post_send_exit0;
485 }
486 wqe_cnt++;
487 send_wr = send_wr->next;
488 }
489
490post_send_exit0:
491 iosync(); /* serialize GAL register access */
492 hipz_update_sqa(my_qp, wqe_cnt);
493 if (unlikely(ret || ehca_debug_level >= 2))
494 ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
495 my_qp, qp->qp_num, wqe_cnt, ret);
496 my_qp->message_count += wqe_cnt;
497 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
498
499out:
500 if (ret)
501 *bad_send_wr = send_wr;
502 return ret;
503}
504
505static int internal_post_recv(struct ehca_qp *my_qp,
506 struct ib_device *dev,
507 struct ib_recv_wr *recv_wr,
508 struct ib_recv_wr **bad_recv_wr)
509{
510 struct ehca_wqe *wqe_p;
511 int wqe_cnt = 0;
512 int ret = 0;
513 u32 rq_map_idx;
514 unsigned long flags;
515 struct ehca_qmap_entry *qmap_entry;
516
517 if (unlikely(!HAS_RQ(my_qp))) {
518 ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
519 my_qp, my_qp->real_qp_num, my_qp->ext_type);
520 ret = -ENODEV;
521 goto out;
522 }
523
524 /* LOCK the QUEUE */
525 spin_lock_irqsave(&my_qp->spinlock_r, flags);
526
527 /* loop processes list of recv reqs */
528 while (recv_wr) {
529 u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
530 /* get pointer next to free WQE */
531 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
532 if (unlikely(!wqe_p)) {
533 /* too many posted work requests: queue overflow */
534 ret = -ENOMEM;
535 ehca_err(dev, "Too many posted WQEs "
536 "qp_num=%x", my_qp->real_qp_num);
537 goto post_recv_exit0;
538 }
539 /*
540 * Get the index of the WQE in the recv queue. The same index
541 * is used for writing into the rq_map.
542 */
543 rq_map_idx = start_offset / my_qp->ipz_rqueue.qe_size;
544
545 /* write a RECV WQE into the QUEUE */
546 ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, recv_wr,
547 rq_map_idx);
548 /*
549 * if something failed,
550 * reset the free entry pointer to the start value
551 */
552 if (unlikely(ret)) {
553 my_qp->ipz_rqueue.current_q_offset = start_offset;
554 ret = -EINVAL;
555 ehca_err(dev, "Could not write WQE "
556 "qp_num=%x", my_qp->real_qp_num);
557 goto post_recv_exit0;
558 }
559
560 qmap_entry = &my_qp->rq_map.map[rq_map_idx];
561 qmap_entry->app_wr_id = get_app_wr_id(recv_wr->wr_id);
562 qmap_entry->reported = 0;
563 qmap_entry->cqe_req = 1;
564
565 wqe_cnt++;
566 recv_wr = recv_wr->next;
567 } /* eof for recv_wr */
568
569post_recv_exit0:
570 iosync(); /* serialize GAL register access */
571 hipz_update_rqa(my_qp, wqe_cnt);
572 if (unlikely(ret || ehca_debug_level >= 2))
573 ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
574 my_qp, my_qp->real_qp_num, wqe_cnt, ret);
575 spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
576
577out:
578 if (ret)
579 *bad_recv_wr = recv_wr;
580
581 return ret;
582}
583
584int ehca_post_recv(struct ib_qp *qp,
585 struct ib_recv_wr *recv_wr,
586 struct ib_recv_wr **bad_recv_wr)
587{
588 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
589
590 /* Reject WR if QP is in RESET state */
591 if (unlikely(my_qp->state == IB_QPS_RESET)) {
592 ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
593 my_qp->state, qp->qp_num);
594 *bad_recv_wr = recv_wr;
595 return -EINVAL;
596 }
597
598 return internal_post_recv(my_qp, qp->device, recv_wr, bad_recv_wr);
599}
600
601int ehca_post_srq_recv(struct ib_srq *srq,
602 struct ib_recv_wr *recv_wr,
603 struct ib_recv_wr **bad_recv_wr)
604{
605 return internal_post_recv(container_of(srq, struct ehca_qp, ib_srq),
606 srq->device, recv_wr, bad_recv_wr);
607}
608
609/*
610 * ib_wc_opcode table converts ehca wc opcode to ib
611 * Since we use zero to indicate invalid opcode, the actual ib opcode must
612 * be decremented!!!
613 */
614static const u8 ib_wc_opcode[255] = {
615 [0x01] = IB_WC_RECV+1,
616 [0x02] = IB_WC_RECV_RDMA_WITH_IMM+1,
617 [0x08] = IB_WC_FETCH_ADD+1,
618 [0x10] = IB_WC_COMP_SWAP+1,
619 [0x20] = IB_WC_RDMA_WRITE+1,
620 [0x40] = IB_WC_RDMA_READ+1,
621 [0x80] = IB_WC_SEND+1
622};
623
624/* internal function to poll one entry of cq */
625static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
626{
627 int ret = 0, qmap_tail_idx;
628 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
629 struct ehca_cqe *cqe;
630 struct ehca_qp *my_qp;
631 struct ehca_qmap_entry *qmap_entry;
632 struct ehca_queue_map *qmap;
633 int cqe_count = 0, is_error;
634
635repoll:
636 cqe = (struct ehca_cqe *)
637 ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
638 if (!cqe) {
639 ret = -EAGAIN;
640 if (ehca_debug_level >= 3)
641 ehca_dbg(cq->device, "Completion queue is empty "
642 "my_cq=%p cq_num=%x", my_cq, my_cq->cq_number);
643 goto poll_cq_one_exit0;
644 }
645
646 /* prevents loads being reordered across this point */
647 rmb();
648
649 cqe_count++;
650 if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
651 struct ehca_qp *qp;
652 int purgeflag;
653 unsigned long flags;
654
655 qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number);
656 if (!qp) {
657 ehca_err(cq->device, "cq_num=%x qp_num=%x "
658 "could not find qp -> ignore cqe",
659 my_cq->cq_number, cqe->local_qp_number);
660 ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
661 my_cq->cq_number, cqe->local_qp_number);
662 /* ignore this purged cqe */
663 goto repoll;
664 }
665 spin_lock_irqsave(&qp->spinlock_s, flags);
666 purgeflag = qp->sqerr_purgeflag;
667 spin_unlock_irqrestore(&qp->spinlock_s, flags);
668
669 if (purgeflag) {
670 ehca_dbg(cq->device,
671 "Got CQE with purged bit qp_num=%x src_qp=%x",
672 cqe->local_qp_number, cqe->remote_qp_number);
673 if (ehca_debug_level >= 2)
674 ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
675 cqe->local_qp_number,
676 cqe->remote_qp_number);
677 /*
678 * ignore this to avoid double cqes of bad wqe
679 * that caused sqe and turn off purge flag
680 */
681 qp->sqerr_purgeflag = 0;
682 goto repoll;
683 }
684 }
685
686 is_error = cqe->status & WC_STATUS_ERROR_BIT;
687
688 /* trace error CQEs if debug_level >= 1, trace all CQEs if >= 3 */
689 if (unlikely(ehca_debug_level >= 3 || (ehca_debug_level && is_error))) {
690 ehca_dbg(cq->device,
691 "Received %sCOMPLETION ehca_cq=%p cq_num=%x -----",
692 is_error ? "ERROR " : "", my_cq, my_cq->cq_number);
693 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
694 my_cq, my_cq->cq_number);
695 ehca_dbg(cq->device,
696 "ehca_cq=%p cq_num=%x -------------------------",
697 my_cq, my_cq->cq_number);
698 }
699
700 read_lock(&ehca_qp_idr_lock);
701 my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
702 read_unlock(&ehca_qp_idr_lock);
703 if (!my_qp)
704 goto repoll;
705 wc->qp = &my_qp->ib_qp;
706
707 qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
708 if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
709 /* We got a send completion. */
710 qmap = &my_qp->sq_map;
711 else
712 /* We got a receive completion. */
713 qmap = &my_qp->rq_map;
714
715 /* advance the tail pointer */
716 qmap->tail = qmap_tail_idx;
717
718 if (is_error) {
719 /*
720 * set left_to_poll to 0 because in error state, we will not
721 * get any additional CQEs
722 */
723 my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
724 my_qp->sq_map.entries);
725 my_qp->sq_map.left_to_poll = 0;
726 ehca_add_to_err_list(my_qp, 1);
727
728 my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
729 my_qp->rq_map.entries);
730 my_qp->rq_map.left_to_poll = 0;
731 if (HAS_RQ(my_qp))
732 ehca_add_to_err_list(my_qp, 0);
733 }
734
735 qmap_entry = &qmap->map[qmap_tail_idx];
736 if (qmap_entry->reported) {
737 ehca_warn(cq->device, "Double cqe on qp_num=%#x",
738 my_qp->real_qp_num);
739 /* found a double cqe, discard it and read next one */
740 goto repoll;
741 }
742
743 wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id);
744 qmap_entry->reported = 1;
745
746 /* if left_to_poll is decremented to 0, add the QP to the error list */
747 if (qmap->left_to_poll > 0) {
748 qmap->left_to_poll--;
749 if ((my_qp->sq_map.left_to_poll == 0) &&
750 (my_qp->rq_map.left_to_poll == 0)) {
751 ehca_add_to_err_list(my_qp, 1);
752 if (HAS_RQ(my_qp))
753 ehca_add_to_err_list(my_qp, 0);
754 }
755 }
756
757 /* eval ib_wc_opcode */
758 wc->opcode = ib_wc_opcode[cqe->optype]-1;
759 if (unlikely(wc->opcode == -1)) {
760 ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x "
761 "ehca_cq=%p cq_num=%x",
762 cqe->optype, cqe->status, my_cq, my_cq->cq_number);
763 /* dump cqe for other infos */
764 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
765 my_cq, my_cq->cq_number);
766 /* update also queue adder to throw away this entry!!! */
767 goto repoll;
768 }
769
770 /* eval ib_wc_status */
771 if (unlikely(is_error)) {
772 /* complete with errors */
773 map_ib_wc_status(cqe->status, &wc->status);
774 wc->vendor_err = wc->status;
775 } else
776 wc->status = IB_WC_SUCCESS;
777
778 wc->byte_len = cqe->nr_bytes_transferred;
779 wc->pkey_index = cqe->pkey_index;
780 wc->slid = cqe->rlid;
781 wc->dlid_path_bits = cqe->dlid;
782 wc->src_qp = cqe->remote_qp_number;
783 /*
784 * HW has "Immed data present" and "GRH present" in bits 6 and 5.
785 * SW defines those in bits 1 and 0, so we can just shift and mask.
786 */
787 wc->wc_flags = (cqe->w_completion_flags >> 5) & 3;
788 wc->ex.imm_data = cpu_to_be32(cqe->immediate_data);
789 wc->sl = cqe->service_level;
790
791poll_cq_one_exit0:
792 if (cqe_count > 0)
793 hipz_update_feca(my_cq, cqe_count);
794
795 return ret;
796}
797
798static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
799 struct ib_wc *wc, int num_entries,
800 struct ipz_queue *ipz_queue, int on_sq)
801{
802 int nr = 0;
803 struct ehca_wqe *wqe;
804 u64 offset;
805 struct ehca_queue_map *qmap;
806 struct ehca_qmap_entry *qmap_entry;
807
808 if (on_sq)
809 qmap = &my_qp->sq_map;
810 else
811 qmap = &my_qp->rq_map;
812
813 qmap_entry = &qmap->map[qmap->next_wqe_idx];
814
815 while ((nr < num_entries) && (qmap_entry->reported == 0)) {
816 /* generate flush CQE */
817
818 memset(wc, 0, sizeof(*wc));
819
820 offset = qmap->next_wqe_idx * ipz_queue->qe_size;
821 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
822 if (!wqe) {
823 ehca_err(cq->device, "Invalid wqe offset=%#llx on "
824 "qp_num=%#x", offset, my_qp->real_qp_num);
825 return nr;
826 }
827
828 wc->wr_id = replace_wr_id(wqe->work_request_id,
829 qmap_entry->app_wr_id);
830
831 if (on_sq) {
832 switch (wqe->optype) {
833 case WQE_OPTYPE_SEND:
834 wc->opcode = IB_WC_SEND;
835 break;
836 case WQE_OPTYPE_RDMAWRITE:
837 wc->opcode = IB_WC_RDMA_WRITE;
838 break;
839 case WQE_OPTYPE_RDMAREAD:
840 wc->opcode = IB_WC_RDMA_READ;
841 break;
842 default:
843 ehca_err(cq->device, "Invalid optype=%x",
844 wqe->optype);
845 return nr;
846 }
847 } else
848 wc->opcode = IB_WC_RECV;
849
850 if (wqe->wr_flag & WQE_WRFLAG_IMM_DATA_PRESENT) {
851 wc->ex.imm_data = wqe->immediate_data;
852 wc->wc_flags |= IB_WC_WITH_IMM;
853 }
854
855 wc->status = IB_WC_WR_FLUSH_ERR;
856
857 wc->qp = &my_qp->ib_qp;
858
859 /* mark as reported and advance next_wqe pointer */
860 qmap_entry->reported = 1;
861 qmap->next_wqe_idx = next_index(qmap->next_wqe_idx,
862 qmap->entries);
863 qmap_entry = &qmap->map[qmap->next_wqe_idx];
864
865 wc++; nr++;
866 }
867
868 return nr;
869
870}
871
872int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
873{
874 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
875 int nr;
876 struct ehca_qp *err_qp;
877 struct ib_wc *current_wc = wc;
878 int ret = 0;
879 unsigned long flags;
880 int entries_left = num_entries;
881
882 if (num_entries < 1) {
883 ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
884 "cq_num=%x", num_entries, my_cq, my_cq->cq_number);
885 ret = -EINVAL;
886 goto poll_cq_exit0;
887 }
888
889 spin_lock_irqsave(&my_cq->spinlock, flags);
890
891 /* generate flush cqes for send queues */
892 list_for_each_entry(err_qp, &my_cq->sqp_err_list, sq_err_node) {
893 nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
894 &err_qp->ipz_squeue, 1);
895 entries_left -= nr;
896 current_wc += nr;
897
898 if (entries_left == 0)
899 break;
900 }
901
902 /* generate flush cqes for receive queues */
903 list_for_each_entry(err_qp, &my_cq->rqp_err_list, rq_err_node) {
904 nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
905 &err_qp->ipz_rqueue, 0);
906 entries_left -= nr;
907 current_wc += nr;
908
909 if (entries_left == 0)
910 break;
911 }
912
913 for (nr = 0; nr < entries_left; nr++) {
914 ret = ehca_poll_cq_one(cq, current_wc);
915 if (ret)
916 break;
917 current_wc++;
918 } /* eof for nr */
919 entries_left -= nr;
920
921 spin_unlock_irqrestore(&my_cq->spinlock, flags);
922 if (ret == -EAGAIN || !ret)
923 ret = num_entries - entries_left;
924
925poll_cq_exit0:
926 return ret;
927}
928
929int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
930{
931 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
932 int ret = 0;
933
934 switch (notify_flags & IB_CQ_SOLICITED_MASK) {
935 case IB_CQ_SOLICITED:
936 hipz_set_cqx_n0(my_cq, 1);
937 break;
938 case IB_CQ_NEXT_COMP:
939 hipz_set_cqx_n1(my_cq, 1);
940 break;
941 default:
942 return -EINVAL;
943 }
944
945 if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
946 unsigned long spl_flags;
947 spin_lock_irqsave(&my_cq->spinlock, spl_flags);
948 ret = ipz_qeit_is_valid(&my_cq->ipz_queue);
949 spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
950 }
951
952 return ret;
953}
diff --git a/drivers/staging/rdma/ehca/ehca_sqp.c b/drivers/staging/rdma/ehca/ehca_sqp.c
deleted file mode 100644
index 376b031c2c7f..000000000000
--- a/drivers/staging/rdma/ehca/ehca_sqp.c
+++ /dev/null
@@ -1,245 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * SQP functions
5 *
6 * Authors: Khadija Souissi <souissi@de.ibm.com>
7 * Heiko J Schick <schickhj@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include <rdma/ib_mad.h>
43
44#include "ehca_classes.h"
45#include "ehca_tools.h"
46#include "ehca_iverbs.h"
47#include "hcp_if.h"
48
49#define IB_MAD_STATUS_REDIRECT cpu_to_be16(0x0002)
50#define IB_MAD_STATUS_UNSUP_VERSION cpu_to_be16(0x0004)
51#define IB_MAD_STATUS_UNSUP_METHOD cpu_to_be16(0x0008)
52
53#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
54
55/**
56 * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
57 * pair is created successfully, the corresponding port gets active.
58 *
59 * Define Special Queue pair 0 (SMI QP) is still not supported.
60 *
61 * @qp_init_attr: Queue pair init attributes with port and queue pair type
62 */
63
64u64 ehca_define_sqp(struct ehca_shca *shca,
65 struct ehca_qp *ehca_qp,
66 struct ib_qp_init_attr *qp_init_attr)
67{
68 u32 pma_qp_nr, bma_qp_nr;
69 u64 ret;
70 u8 port = qp_init_attr->port_num;
71 int counter;
72
73 shca->sport[port - 1].port_state = IB_PORT_DOWN;
74
75 switch (qp_init_attr->qp_type) {
76 case IB_QPT_SMI:
77 /* function not supported yet */
78 break;
79 case IB_QPT_GSI:
80 ret = hipz_h_define_aqp1(shca->ipz_hca_handle,
81 ehca_qp->ipz_qp_handle,
82 ehca_qp->galpas.kernel,
83 (u32) qp_init_attr->port_num,
84 &pma_qp_nr, &bma_qp_nr);
85
86 if (ret != H_SUCCESS) {
87 ehca_err(&shca->ib_device,
88 "Can't define AQP1 for port %x. h_ret=%lli",
89 port, ret);
90 return ret;
91 }
92 shca->sport[port - 1].pma_qp_nr = pma_qp_nr;
93 ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x",
94 port, pma_qp_nr);
95 break;
96 default:
97 ehca_err(&shca->ib_device, "invalid qp_type=%x",
98 qp_init_attr->qp_type);
99 return H_PARAMETER;
100 }
101
102 if (ehca_nr_ports < 0) /* autodetect mode */
103 return H_SUCCESS;
104
105 for (counter = 0;
106 shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
107 counter < ehca_port_act_time;
108 counter++) {
109 ehca_dbg(&shca->ib_device, "... wait until port %x is active",
110 port);
111 msleep_interruptible(1000);
112 }
113
114 if (counter == ehca_port_act_time) {
115 ehca_err(&shca->ib_device, "Port %x is not active.", port);
116 return H_HARDWARE;
117 }
118
119 return H_SUCCESS;
120}
121
122struct ib_perf {
123 struct ib_mad_hdr mad_hdr;
124 u8 reserved[40];
125 u8 data[192];
126} __attribute__ ((packed));
127
128/* TC/SL/FL packed into 32 bits, as in ClassPortInfo */
129struct tcslfl {
130 u32 tc:8;
131 u32 sl:4;
132 u32 fl:20;
133} __attribute__ ((packed));
134
135/* IP Version/TC/FL packed into 32 bits, as in GRH */
136struct vertcfl {
137 u32 ver:4;
138 u32 tc:8;
139 u32 fl:20;
140} __attribute__ ((packed));
141
142static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
143 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
144 const struct ib_mad *in_mad, struct ib_mad *out_mad)
145{
146 const struct ib_perf *in_perf = (const struct ib_perf *)in_mad;
147 struct ib_perf *out_perf = (struct ib_perf *)out_mad;
148 struct ib_class_port_info *poi =
149 (struct ib_class_port_info *)out_perf->data;
150 struct tcslfl *tcslfl =
151 (struct tcslfl *)&poi->redirect_tcslfl;
152 struct ehca_shca *shca =
153 container_of(ibdev, struct ehca_shca, ib_device);
154 struct ehca_sport *sport = &shca->sport[port_num - 1];
155
156 ehca_dbg(ibdev, "method=%x", in_perf->mad_hdr.method);
157
158 *out_mad = *in_mad;
159
160 if (in_perf->mad_hdr.class_version != 1) {
161 ehca_warn(ibdev, "Unsupported class_version=%x",
162 in_perf->mad_hdr.class_version);
163 out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_VERSION;
164 goto perf_reply;
165 }
166
167 switch (in_perf->mad_hdr.method) {
168 case IB_MGMT_METHOD_GET:
169 case IB_MGMT_METHOD_SET:
170 /* set class port info for redirection */
171 out_perf->mad_hdr.attr_id = IB_PMA_CLASS_PORT_INFO;
172 out_perf->mad_hdr.status = IB_MAD_STATUS_REDIRECT;
173 memset(poi, 0, sizeof(*poi));
174 poi->base_version = 1;
175 poi->class_version = 1;
176 poi->resp_time_value = 18;
177
178 /* copy local routing information from WC where applicable */
179 tcslfl->sl = in_wc->sl;
180 poi->redirect_lid =
181 sport->saved_attr.lid | in_wc->dlid_path_bits;
182 poi->redirect_qp = sport->pma_qp_nr;
183 poi->redirect_qkey = IB_QP1_QKEY;
184
185 ehca_query_pkey(ibdev, port_num, in_wc->pkey_index,
186 &poi->redirect_pkey);
187
188 /* if request was globally routed, copy route info */
189 if (in_grh) {
190 const struct vertcfl *vertcfl =
191 (const struct vertcfl *)&in_grh->version_tclass_flow;
192 memcpy(poi->redirect_gid, in_grh->dgid.raw,
193 sizeof(poi->redirect_gid));
194 tcslfl->tc = vertcfl->tc;
195 tcslfl->fl = vertcfl->fl;
196 } else
197 /* else only fill in default GID */
198 ehca_query_gid(ibdev, port_num, 0,
199 (union ib_gid *)&poi->redirect_gid);
200
201 ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
202 sport->saved_attr.lid, sport->pma_qp_nr);
203 break;
204
205 case IB_MGMT_METHOD_GET_RESP:
206 return IB_MAD_RESULT_FAILURE;
207
208 default:
209 out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_METHOD;
210 break;
211 }
212
213perf_reply:
214 out_perf->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
215
216 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
217}
218
219int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
220 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
221 const struct ib_mad_hdr *in, size_t in_mad_size,
222 struct ib_mad_hdr *out, size_t *out_mad_size,
223 u16 *out_mad_pkey_index)
224{
225 int ret;
226 const struct ib_mad *in_mad = (const struct ib_mad *)in;
227 struct ib_mad *out_mad = (struct ib_mad *)out;
228
229 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
230 *out_mad_size != sizeof(*out_mad)))
231 return IB_MAD_RESULT_FAILURE;
232
233 if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
234 return IB_MAD_RESULT_FAILURE;
235
236 /* accept only pma request */
237 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
238 return IB_MAD_RESULT_SUCCESS;
239
240 ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
241 ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh,
242 in_mad, out_mad);
243
244 return ret;
245}
diff --git a/drivers/staging/rdma/ehca/ehca_tools.h b/drivers/staging/rdma/ehca/ehca_tools.h
deleted file mode 100644
index d280b12aae64..000000000000
--- a/drivers/staging/rdma/ehca/ehca_tools.h
+++ /dev/null
@@ -1,155 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * auxiliary functions
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Khadija Souissi <souissik@de.ibm.com>
9 * Waleri Fomin <fomin@de.ibm.com>
10 * Heiko J Schick <schickhj@de.ibm.com>
11 *
12 * Copyright (c) 2005 IBM Corporation
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43
44#ifndef EHCA_TOOLS_H
45#define EHCA_TOOLS_H
46
47#include <linux/kernel.h>
48#include <linux/spinlock.h>
49#include <linux/delay.h>
50#include <linux/idr.h>
51#include <linux/kthread.h>
52#include <linux/mm.h>
53#include <linux/mman.h>
54#include <linux/module.h>
55#include <linux/moduleparam.h>
56#include <linux/vmalloc.h>
57#include <linux/notifier.h>
58#include <linux/cpu.h>
59#include <linux/device.h>
60
61#include <linux/atomic.h>
62#include <asm/ibmebus.h>
63#include <asm/io.h>
64#include <asm/pgtable.h>
65#include <asm/hvcall.h>
66
67extern int ehca_debug_level;
68
69#define ehca_dbg(ib_dev, format, arg...) \
70 do { \
71 if (unlikely(ehca_debug_level)) \
72 dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \
73 "PU%04x EHCA_DBG:%s " format "\n", \
74 raw_smp_processor_id(), __func__, \
75 ## arg); \
76 } while (0)
77
78#define ehca_info(ib_dev, format, arg...) \
79 dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \
80 raw_smp_processor_id(), __func__, ## arg)
81
82#define ehca_warn(ib_dev, format, arg...) \
83 dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \
84 raw_smp_processor_id(), __func__, ## arg)
85
86#define ehca_err(ib_dev, format, arg...) \
87 dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \
88 raw_smp_processor_id(), __func__, ## arg)
89
90/* use this one only if no ib_dev available */
91#define ehca_gen_dbg(format, arg...) \
92 do { \
93 if (unlikely(ehca_debug_level)) \
94 printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n", \
95 raw_smp_processor_id(), __func__, ## arg); \
96 } while (0)
97
98#define ehca_gen_warn(format, arg...) \
99 printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n", \
100 raw_smp_processor_id(), __func__, ## arg)
101
102#define ehca_gen_err(format, arg...) \
103 printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \
104 raw_smp_processor_id(), __func__, ## arg)
105
106/**
107 * ehca_dmp - printk a memory block, whose length is n*8 bytes.
108 * Each line has the following layout:
109 * <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex>
110 */
111#define ehca_dmp(adr, len, format, args...) \
112 do { \
113 unsigned int x; \
114 unsigned int l = (unsigned int)(len); \
115 unsigned char *deb = (unsigned char *)(adr); \
116 for (x = 0; x < l; x += 16) { \
117 printk(KERN_INFO "EHCA_DMP:%s " format \
118 " adr=%p ofs=%04x %016llx %016llx\n", \
119 __func__, ##args, deb, x, \
120 *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
121 deb += 16; \
122 } \
123 } while (0)
124
125/* define a bitmask, little endian version */
126#define EHCA_BMASK(pos, length) (((pos) << 16) + (length))
127
128/* define a bitmask, the ibm way... */
129#define EHCA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
130
131/* internal function, don't use */
132#define EHCA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
133
134/* internal function, don't use */
135#define EHCA_BMASK_MASK(mask) (~0ULL >> ((64 - (mask)) & 0xffff))
136
137/**
138 * EHCA_BMASK_SET - return value shifted and masked by mask
139 * variable|=EHCA_BMASK_SET(MY_MASK,0x4711) ORs the bits in variable
140 * variable&=~EHCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask
141 * in variable
142 */
143#define EHCA_BMASK_SET(mask, value) \
144 ((EHCA_BMASK_MASK(mask) & ((u64)(value))) << EHCA_BMASK_SHIFTPOS(mask))
145
146/**
147 * EHCA_BMASK_GET - extract a parameter from value by mask
148 */
149#define EHCA_BMASK_GET(mask, value) \
150 (EHCA_BMASK_MASK(mask) & (((u64)(value)) >> EHCA_BMASK_SHIFTPOS(mask)))
151
152/* Converts ehca to ib return code */
153int ehca2ib_return_code(u64 ehca_rc);
154
155#endif /* EHCA_TOOLS_H */
diff --git a/drivers/staging/rdma/ehca/ehca_uverbs.c b/drivers/staging/rdma/ehca/ehca_uverbs.c
deleted file mode 100644
index 1a1d5d99fcf9..000000000000
--- a/drivers/staging/rdma/ehca/ehca_uverbs.c
+++ /dev/null
@@ -1,309 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * userspace support verbs
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Heiko J Schick <schickhj@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43#include <linux/slab.h>
44
45#include "ehca_classes.h"
46#include "ehca_iverbs.h"
47#include "ehca_mrmw.h"
48#include "ehca_tools.h"
49#include "hcp_if.h"
50
51struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
52 struct ib_udata *udata)
53{
54 struct ehca_ucontext *my_context;
55
56 my_context = kzalloc(sizeof *my_context, GFP_KERNEL);
57 if (!my_context) {
58 ehca_err(device, "Out of memory device=%p", device);
59 return ERR_PTR(-ENOMEM);
60 }
61
62 return &my_context->ib_ucontext;
63}
64
65int ehca_dealloc_ucontext(struct ib_ucontext *context)
66{
67 kfree(container_of(context, struct ehca_ucontext, ib_ucontext));
68 return 0;
69}
70
71static void ehca_mm_open(struct vm_area_struct *vma)
72{
73 u32 *count = (u32 *)vma->vm_private_data;
74 if (!count) {
75 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
76 vma->vm_start, vma->vm_end);
77 return;
78 }
79 (*count)++;
80 if (!(*count))
81 ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx",
82 vma->vm_start, vma->vm_end);
83 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
84 vma->vm_start, vma->vm_end, *count);
85}
86
87static void ehca_mm_close(struct vm_area_struct *vma)
88{
89 u32 *count = (u32 *)vma->vm_private_data;
90 if (!count) {
91 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
92 vma->vm_start, vma->vm_end);
93 return;
94 }
95 (*count)--;
96 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
97 vma->vm_start, vma->vm_end, *count);
98}
99
100static const struct vm_operations_struct vm_ops = {
101 .open = ehca_mm_open,
102 .close = ehca_mm_close,
103};
104
105static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
106 u32 *mm_count)
107{
108 int ret;
109 u64 vsize, physical;
110
111 vsize = vma->vm_end - vma->vm_start;
112 if (vsize < EHCA_PAGESIZE) {
113 ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start);
114 return -EINVAL;
115 }
116
117 physical = galpas->user.fw_handle;
118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
119 ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
120 /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
121 ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
122 vma->vm_page_prot);
123 if (unlikely(ret)) {
124 ehca_gen_err("remap_pfn_range() failed ret=%i", ret);
125 return -ENOMEM;
126 }
127
128 vma->vm_private_data = mm_count;
129 (*mm_count)++;
130 vma->vm_ops = &vm_ops;
131
132 return 0;
133}
134
135static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
136 u32 *mm_count)
137{
138 int ret;
139 u64 start, ofs;
140 struct page *page;
141
142 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
143 start = vma->vm_start;
144 for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
145 u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
146 page = virt_to_page(virt_addr);
147 ret = vm_insert_page(vma, start, page);
148 if (unlikely(ret)) {
149 ehca_gen_err("vm_insert_page() failed rc=%i", ret);
150 return ret;
151 }
152 start += PAGE_SIZE;
153 }
154 vma->vm_private_data = mm_count;
155 (*mm_count)++;
156 vma->vm_ops = &vm_ops;
157
158 return 0;
159}
160
161static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
162 u32 rsrc_type)
163{
164 int ret;
165
166 switch (rsrc_type) {
167 case 0: /* galpa fw handle */
168 ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number);
169 ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa);
170 if (unlikely(ret)) {
171 ehca_err(cq->ib_cq.device,
172 "ehca_mmap_fw() failed rc=%i cq_num=%x",
173 ret, cq->cq_number);
174 return ret;
175 }
176 break;
177
178 case 1: /* cq queue_addr */
179 ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number);
180 ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue);
181 if (unlikely(ret)) {
182 ehca_err(cq->ib_cq.device,
183 "ehca_mmap_queue() failed rc=%i cq_num=%x",
184 ret, cq->cq_number);
185 return ret;
186 }
187 break;
188
189 default:
190 ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x",
191 rsrc_type, cq->cq_number);
192 return -EINVAL;
193 }
194
195 return 0;
196}
197
198static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
199 u32 rsrc_type)
200{
201 int ret;
202
203 switch (rsrc_type) {
204 case 0: /* galpa fw handle */
205 ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
207 if (unlikely(ret)) {
208 ehca_err(qp->ib_qp.device,
209 "remap_pfn_range() failed ret=%i qp_num=%x",
210 ret, qp->ib_qp.qp_num);
211 return -ENOMEM;
212 }
213 break;
214
215 case 1: /* qp rqueue_addr */
216 ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num);
217 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
218 &qp->mm_count_rqueue);
219 if (unlikely(ret)) {
220 ehca_err(qp->ib_qp.device,
221 "ehca_mmap_queue(rq) failed rc=%i qp_num=%x",
222 ret, qp->ib_qp.qp_num);
223 return ret;
224 }
225 break;
226
227 case 2: /* qp squeue_addr */
228 ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num);
229 ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
230 &qp->mm_count_squeue);
231 if (unlikely(ret)) {
232 ehca_err(qp->ib_qp.device,
233 "ehca_mmap_queue(sq) failed rc=%i qp_num=%x",
234 ret, qp->ib_qp.qp_num);
235 return ret;
236 }
237 break;
238
239 default:
240 ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x",
241 rsrc_type, qp->ib_qp.qp_num);
242 return -EINVAL;
243 }
244
245 return 0;
246}
247
248int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
249{
250 u64 fileoffset = vma->vm_pgoff;
251 u32 idr_handle = fileoffset & 0x1FFFFFF;
252 u32 q_type = (fileoffset >> 27) & 0x1; /* CQ, QP,... */
253 u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */
254 u32 ret;
255 struct ehca_cq *cq;
256 struct ehca_qp *qp;
257 struct ib_uobject *uobject;
258
259 switch (q_type) {
260 case 0: /* CQ */
261 read_lock(&ehca_cq_idr_lock);
262 cq = idr_find(&ehca_cq_idr, idr_handle);
263 read_unlock(&ehca_cq_idr_lock);
264
265 /* make sure this mmap really belongs to the authorized user */
266 if (!cq)
267 return -EINVAL;
268
269 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
270 return -EINVAL;
271
272 ret = ehca_mmap_cq(vma, cq, rsrc_type);
273 if (unlikely(ret)) {
274 ehca_err(cq->ib_cq.device,
275 "ehca_mmap_cq() failed rc=%i cq_num=%x",
276 ret, cq->cq_number);
277 return ret;
278 }
279 break;
280
281 case 1: /* QP */
282 read_lock(&ehca_qp_idr_lock);
283 qp = idr_find(&ehca_qp_idr, idr_handle);
284 read_unlock(&ehca_qp_idr_lock);
285
286 /* make sure this mmap really belongs to the authorized user */
287 if (!qp)
288 return -EINVAL;
289
290 uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject;
291 if (!uobject || uobject->context != context)
292 return -EINVAL;
293
294 ret = ehca_mmap_qp(vma, qp, rsrc_type);
295 if (unlikely(ret)) {
296 ehca_err(qp->ib_qp.device,
297 "ehca_mmap_qp() failed rc=%i qp_num=%x",
298 ret, qp->ib_qp.qp_num);
299 return ret;
300 }
301 break;
302
303 default:
304 ehca_gen_err("bad queue type %x", q_type);
305 return -EINVAL;
306 }
307
308 return 0;
309}
diff --git a/drivers/staging/rdma/ehca/hcp_if.c b/drivers/staging/rdma/ehca/hcp_if.c
deleted file mode 100644
index 89517ffb4389..000000000000
--- a/drivers/staging/rdma/ehca/hcp_if.c
+++ /dev/null
@@ -1,949 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Firmware Infiniband Interface code for POWER
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 * Gerd Bayer <gerd.bayer@de.ibm.com>
10 * Waleri Fomin <fomin@de.ibm.com>
11 *
12 * Copyright (c) 2005 IBM Corporation
13 *
14 * All rights reserved.
15 *
16 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
17 * BSD.
18 *
19 * OpenIB BSD License
20 *
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions are met:
23 *
24 * Redistributions of source code must retain the above copyright notice, this
25 * list of conditions and the following disclaimer.
26 *
27 * Redistributions in binary form must reproduce the above copyright notice,
28 * this list of conditions and the following disclaimer in the documentation
29 * and/or other materials
30 * provided with the distribution.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
33 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
36 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
37 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
39 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
40 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42 * POSSIBILITY OF SUCH DAMAGE.
43 */
44
45#include <asm/hvcall.h>
46#include "ehca_tools.h"
47#include "hcp_if.h"
48#include "hcp_phyp.h"
49#include "hipz_fns.h"
50#include "ipz_pt_fn.h"
51
52#define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11)
53#define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12)
54#define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15)
55#define H_ALL_RES_QP_STORAGE EHCA_BMASK_IBM(16, 17)
56#define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18)
57#define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21)
58#define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23)
59#define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31)
60#define H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE EHCA_BMASK_IBM(32, 35)
61#define H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE EHCA_BMASK_IBM(36, 39)
62#define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63)
63
64#define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15)
65#define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
66#define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
67#define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
68
69#define H_ALL_RES_QP_UD_AV_LKEY EHCA_BMASK_IBM(32, 63)
70#define H_ALL_RES_QP_SRQ_QP_TOKEN EHCA_BMASK_IBM(0, 31)
71#define H_ALL_RES_QP_SRQ_QP_HANDLE EHCA_BMASK_IBM(0, 64)
72#define H_ALL_RES_QP_SRQ_LIMIT EHCA_BMASK_IBM(48, 63)
73#define H_ALL_RES_QP_SRQ_QPN EHCA_BMASK_IBM(40, 63)
74
75#define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
76#define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
77#define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
78#define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31)
79
80#define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
81#define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
82
83#define H_MP_INIT_TYPE EHCA_BMASK_IBM(44, 47)
84#define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48)
85#define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49)
86
87#define HCALL4_REGS_FORMAT "r4=%lx r5=%lx r6=%lx r7=%lx"
88#define HCALL7_REGS_FORMAT HCALL4_REGS_FORMAT " r8=%lx r9=%lx r10=%lx"
89#define HCALL9_REGS_FORMAT HCALL7_REGS_FORMAT " r11=%lx r12=%lx"
90
91static DEFINE_SPINLOCK(hcall_lock);
92
93static long ehca_plpar_hcall_norets(unsigned long opcode,
94 unsigned long arg1,
95 unsigned long arg2,
96 unsigned long arg3,
97 unsigned long arg4,
98 unsigned long arg5,
99 unsigned long arg6,
100 unsigned long arg7)
101{
102 long ret;
103 int i, sleep_msecs;
104 unsigned long flags = 0;
105
106 if (unlikely(ehca_debug_level >= 2))
107 ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
108 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
109
110 for (i = 0; i < 5; i++) {
111 /* serialize hCalls to work around firmware issue */
112 if (ehca_lock_hcalls)
113 spin_lock_irqsave(&hcall_lock, flags);
114
115 ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
116 arg5, arg6, arg7);
117
118 if (ehca_lock_hcalls)
119 spin_unlock_irqrestore(&hcall_lock, flags);
120
121 if (H_IS_LONG_BUSY(ret)) {
122 sleep_msecs = get_longbusy_msecs(ret);
123 msleep_interruptible(sleep_msecs);
124 continue;
125 }
126
127 if (ret < H_SUCCESS)
128 ehca_gen_err("opcode=%lx ret=%li " HCALL7_REGS_FORMAT,
129 opcode, ret, arg1, arg2, arg3,
130 arg4, arg5, arg6, arg7);
131 else
132 if (unlikely(ehca_debug_level >= 2))
133 ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
134
135 return ret;
136 }
137
138 return H_BUSY;
139}
140
141static long ehca_plpar_hcall9(unsigned long opcode,
142 unsigned long *outs, /* array of 9 outputs */
143 unsigned long arg1,
144 unsigned long arg2,
145 unsigned long arg3,
146 unsigned long arg4,
147 unsigned long arg5,
148 unsigned long arg6,
149 unsigned long arg7,
150 unsigned long arg8,
151 unsigned long arg9)
152{
153 long ret;
154 int i, sleep_msecs;
155 unsigned long flags = 0;
156
157 if (unlikely(ehca_debug_level >= 2))
158 ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
159 arg1, arg2, arg3, arg4, arg5,
160 arg6, arg7, arg8, arg9);
161
162 for (i = 0; i < 5; i++) {
163 /* serialize hCalls to work around firmware issue */
164 if (ehca_lock_hcalls)
165 spin_lock_irqsave(&hcall_lock, flags);
166
167 ret = plpar_hcall9(opcode, outs,
168 arg1, arg2, arg3, arg4, arg5,
169 arg6, arg7, arg8, arg9);
170
171 if (ehca_lock_hcalls)
172 spin_unlock_irqrestore(&hcall_lock, flags);
173
174 if (H_IS_LONG_BUSY(ret)) {
175 sleep_msecs = get_longbusy_msecs(ret);
176 msleep_interruptible(sleep_msecs);
177 continue;
178 }
179
180 if (ret < H_SUCCESS) {
181 ehca_gen_err("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT,
182 opcode, arg1, arg2, arg3, arg4, arg5,
183 arg6, arg7, arg8, arg9);
184 ehca_gen_err("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
185 ret, outs[0], outs[1], outs[2], outs[3],
186 outs[4], outs[5], outs[6], outs[7],
187 outs[8]);
188 } else if (unlikely(ehca_debug_level >= 2))
189 ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
190 ret, outs[0], outs[1], outs[2], outs[3],
191 outs[4], outs[5], outs[6], outs[7],
192 outs[8]);
193 return ret;
194 }
195
196 return H_BUSY;
197}
198
199u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
200 struct ehca_pfeq *pfeq,
201 const u32 neq_control,
202 const u32 number_of_entries,
203 struct ipz_eq_handle *eq_handle,
204 u32 *act_nr_of_entries,
205 u32 *act_pages,
206 u32 *eq_ist)
207{
208 u64 ret;
209 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
210 u64 allocate_controls;
211
212 /* resource type */
213 allocate_controls = 3ULL;
214
215 /* ISN is associated */
216 if (neq_control != 1)
217 allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
218 else /* notification event queue */
219 allocate_controls = (1ULL << 63) | allocate_controls;
220
221 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
222 adapter_handle.handle, /* r4 */
223 allocate_controls, /* r5 */
224 number_of_entries, /* r6 */
225 0, 0, 0, 0, 0, 0);
226 eq_handle->handle = outs[0];
227 *act_nr_of_entries = (u32)outs[3];
228 *act_pages = (u32)outs[4];
229 *eq_ist = (u32)outs[5];
230
231 if (ret == H_NOT_ENOUGH_RESOURCES)
232 ehca_gen_err("Not enough resource - ret=%lli ", ret);
233
234 return ret;
235}
236
237u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
238 struct ipz_eq_handle eq_handle,
239 const u64 event_mask)
240{
241 return ehca_plpar_hcall_norets(H_RESET_EVENTS,
242 adapter_handle.handle, /* r4 */
243 eq_handle.handle, /* r5 */
244 event_mask, /* r6 */
245 0, 0, 0, 0);
246}
247
248u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
249 struct ehca_cq *cq,
250 struct ehca_alloc_cq_parms *param)
251{
252 int rc;
253 u64 ret;
254 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
255
256 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
257 adapter_handle.handle, /* r4 */
258 2, /* r5 */
259 param->eq_handle.handle, /* r6 */
260 cq->token, /* r7 */
261 param->nr_cqe, /* r8 */
262 0, 0, 0, 0);
263 cq->ipz_cq_handle.handle = outs[0];
264 param->act_nr_of_entries = (u32)outs[3];
265 param->act_pages = (u32)outs[4];
266
267 if (ret == H_SUCCESS) {
268 rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
269 if (rc) {
270 ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
271 rc, outs[5]);
272
273 ehca_plpar_hcall_norets(H_FREE_RESOURCE,
274 adapter_handle.handle, /* r4 */
275 cq->ipz_cq_handle.handle, /* r5 */
276 0, 0, 0, 0, 0);
277 ret = H_NO_MEM;
278 }
279 }
280
281 if (ret == H_NOT_ENOUGH_RESOURCES)
282 ehca_gen_err("Not enough resources. ret=%lli", ret);
283
284 return ret;
285}
286
287u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
288 struct ehca_alloc_qp_parms *parms, int is_user)
289{
290 int rc;
291 u64 ret;
292 u64 allocate_controls, max_r10_reg, r11, r12;
293 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
294
295 allocate_controls =
296 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
297 | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
298 | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
299 | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
300 | EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage)
301 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE,
302 parms->squeue.page_size)
303 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE,
304 parms->rqueue.page_size)
305 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
306 !!(parms->ll_comp_flags & LLQP_RECV_COMP))
307 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
308 !!(parms->ll_comp_flags & LLQP_SEND_COMP))
309 | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
310 parms->ud_av_l_key_ctl)
311 | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
312
313 max_r10_reg =
314 EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
315 parms->squeue.max_wr + 1)
316 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
317 parms->rqueue.max_wr + 1)
318 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
319 parms->squeue.max_sge)
320 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
321 parms->rqueue.max_sge);
322
323 r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);
324
325 if (parms->ext_type == EQPT_SRQ)
326 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
327 else
328 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);
329
330 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
331 adapter_handle.handle, /* r4 */
332 allocate_controls, /* r5 */
333 parms->send_cq_handle.handle,
334 parms->recv_cq_handle.handle,
335 parms->eq_handle.handle,
336 ((u64)parms->token << 32) | parms->pd.value,
337 max_r10_reg, r11, r12);
338
339 parms->qp_handle.handle = outs[0];
340 parms->real_qp_num = (u32)outs[1];
341 parms->squeue.act_nr_wqes =
342 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
343 parms->rqueue.act_nr_wqes =
344 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
345 parms->squeue.act_nr_sges =
346 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
347 parms->rqueue.act_nr_sges =
348 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
349 parms->squeue.queue_size =
350 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
351 parms->rqueue.queue_size =
352 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
353
354 if (ret == H_SUCCESS) {
355 rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
356 if (rc) {
357 ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
358 rc, outs[6]);
359
360 ehca_plpar_hcall_norets(H_FREE_RESOURCE,
361 adapter_handle.handle, /* r4 */
362 parms->qp_handle.handle, /* r5 */
363 0, 0, 0, 0, 0);
364 ret = H_NO_MEM;
365 }
366 }
367
368 if (ret == H_NOT_ENOUGH_RESOURCES)
369 ehca_gen_err("Not enough resources. ret=%lli", ret);
370
371 return ret;
372}
373
374u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
375 const u8 port_id,
376 struct hipz_query_port *query_port_response_block)
377{
378 u64 ret;
379 u64 r_cb = __pa(query_port_response_block);
380
381 if (r_cb & (EHCA_PAGESIZE-1)) {
382 ehca_gen_err("response block not page aligned");
383 return H_PARAMETER;
384 }
385
386 ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
387 adapter_handle.handle, /* r4 */
388 port_id, /* r5 */
389 r_cb, /* r6 */
390 0, 0, 0, 0);
391
392 if (ehca_debug_level >= 2)
393 ehca_dmp(query_port_response_block, 64, "response_block");
394
395 return ret;
396}
397
398u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
399 const u8 port_id, const u32 port_cap,
400 const u8 init_type, const int modify_mask)
401{
402 u64 port_attributes = port_cap;
403
404 if (modify_mask & IB_PORT_SHUTDOWN)
405 port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1);
406 if (modify_mask & IB_PORT_INIT_TYPE)
407 port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type);
408 if (modify_mask & IB_PORT_RESET_QKEY_CNTR)
409 port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1);
410
411 return ehca_plpar_hcall_norets(H_MODIFY_PORT,
412 adapter_handle.handle, /* r4 */
413 port_id, /* r5 */
414 port_attributes, /* r6 */
415 0, 0, 0, 0);
416}
417
418u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
419 struct hipz_query_hca *query_hca_rblock)
420{
421 u64 r_cb = __pa(query_hca_rblock);
422
423 if (r_cb & (EHCA_PAGESIZE-1)) {
424 ehca_gen_err("response_block=%p not page aligned",
425 query_hca_rblock);
426 return H_PARAMETER;
427 }
428
429 return ehca_plpar_hcall_norets(H_QUERY_HCA,
430 adapter_handle.handle, /* r4 */
431 r_cb, /* r5 */
432 0, 0, 0, 0, 0);
433}
434
435u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
436 const u8 pagesize,
437 const u8 queue_type,
438 const u64 resource_handle,
439 const u64 logical_address_of_page,
440 u64 count)
441{
442 return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
443 adapter_handle.handle, /* r4 */
444 (u64)queue_type | ((u64)pagesize) << 8,
445 /* r5 */
446 resource_handle, /* r6 */
447 logical_address_of_page, /* r7 */
448 count, /* r8 */
449 0, 0);
450}
451
452u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
453 const struct ipz_eq_handle eq_handle,
454 struct ehca_pfeq *pfeq,
455 const u8 pagesize,
456 const u8 queue_type,
457 const u64 logical_address_of_page,
458 const u64 count)
459{
460 if (count != 1) {
461 ehca_gen_err("Ppage counter=%llx", count);
462 return H_PARAMETER;
463 }
464 return hipz_h_register_rpage(adapter_handle,
465 pagesize,
466 queue_type,
467 eq_handle.handle,
468 logical_address_of_page, count);
469}
470
471u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
472 u32 ist)
473{
474 u64 ret;
475 ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
476 adapter_handle.handle, /* r4 */
477 ist, /* r5 */
478 0, 0, 0, 0, 0);
479
480 if (ret != H_SUCCESS && ret != H_BUSY)
481 ehca_gen_err("Could not query interrupt state.");
482
483 return ret;
484}
485
486u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
487 const struct ipz_cq_handle cq_handle,
488 struct ehca_pfcq *pfcq,
489 const u8 pagesize,
490 const u8 queue_type,
491 const u64 logical_address_of_page,
492 const u64 count,
493 const struct h_galpa gal)
494{
495 if (count != 1) {
496 ehca_gen_err("Page counter=%llx", count);
497 return H_PARAMETER;
498 }
499
500 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
501 cq_handle.handle, logical_address_of_page,
502 count);
503}
504
505u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
506 const struct ipz_qp_handle qp_handle,
507 struct ehca_pfqp *pfqp,
508 const u8 pagesize,
509 const u8 queue_type,
510 const u64 logical_address_of_page,
511 const u64 count,
512 const struct h_galpa galpa)
513{
514 if (count > 1) {
515 ehca_gen_err("Page counter=%llx", count);
516 return H_PARAMETER;
517 }
518
519 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
520 qp_handle.handle, logical_address_of_page,
521 count);
522}
523
524u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
525 const struct ipz_qp_handle qp_handle,
526 struct ehca_pfqp *pfqp,
527 void **log_addr_next_sq_wqe2processed,
528 void **log_addr_next_rq_wqe2processed,
529 int dis_and_get_function_code)
530{
531 u64 ret;
532 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
533
534 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
535 adapter_handle.handle, /* r4 */
536 dis_and_get_function_code, /* r5 */
537 qp_handle.handle, /* r6 */
538 0, 0, 0, 0, 0, 0);
539 if (log_addr_next_sq_wqe2processed)
540 *log_addr_next_sq_wqe2processed = (void *)outs[0];
541 if (log_addr_next_rq_wqe2processed)
542 *log_addr_next_rq_wqe2processed = (void *)outs[1];
543
544 return ret;
545}
546
547u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
548 const struct ipz_qp_handle qp_handle,
549 struct ehca_pfqp *pfqp,
550 const u64 update_mask,
551 struct hcp_modify_qp_control_block *mqpcb,
552 struct h_galpa gal)
553{
554 u64 ret;
555 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
556 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
557 adapter_handle.handle, /* r4 */
558 qp_handle.handle, /* r5 */
559 update_mask, /* r6 */
560 __pa(mqpcb), /* r7 */
561 0, 0, 0, 0, 0);
562
563 if (ret == H_NOT_ENOUGH_RESOURCES)
564 ehca_gen_err("Insufficient resources ret=%lli", ret);
565
566 return ret;
567}
568
569u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
570 const struct ipz_qp_handle qp_handle,
571 struct ehca_pfqp *pfqp,
572 struct hcp_modify_qp_control_block *qqpcb,
573 struct h_galpa gal)
574{
575 return ehca_plpar_hcall_norets(H_QUERY_QP,
576 adapter_handle.handle, /* r4 */
577 qp_handle.handle, /* r5 */
578 __pa(qqpcb), /* r6 */
579 0, 0, 0, 0);
580}
581
582u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
583 struct ehca_qp *qp)
584{
585 u64 ret;
586 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
587
588 ret = hcp_galpas_dtor(&qp->galpas);
589 if (ret) {
590 ehca_gen_err("Could not destruct qp->galpas");
591 return H_RESOURCE;
592 }
593 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
594 adapter_handle.handle, /* r4 */
595 /* function code */
596 1, /* r5 */
597 qp->ipz_qp_handle.handle, /* r6 */
598 0, 0, 0, 0, 0, 0);
599 if (ret == H_HARDWARE)
600 ehca_gen_err("HCA not operational. ret=%lli", ret);
601
602 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
603 adapter_handle.handle, /* r4 */
604 qp->ipz_qp_handle.handle, /* r5 */
605 0, 0, 0, 0, 0);
606
607 if (ret == H_RESOURCE)
608 ehca_gen_err("Resource still in use. ret=%lli", ret);
609
610 return ret;
611}
612
613u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
614 const struct ipz_qp_handle qp_handle,
615 struct h_galpa gal,
616 u32 port)
617{
618 return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
619 adapter_handle.handle, /* r4 */
620 qp_handle.handle, /* r5 */
621 port, /* r6 */
622 0, 0, 0, 0);
623}
624
625u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
626 const struct ipz_qp_handle qp_handle,
627 struct h_galpa gal,
628 u32 port, u32 * pma_qp_nr,
629 u32 * bma_qp_nr)
630{
631 u64 ret;
632 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
633
634 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
635 adapter_handle.handle, /* r4 */
636 qp_handle.handle, /* r5 */
637 port, /* r6 */
638 0, 0, 0, 0, 0, 0);
639 *pma_qp_nr = (u32)outs[0];
640 *bma_qp_nr = (u32)outs[1];
641
642 if (ret == H_ALIAS_EXIST)
643 ehca_gen_err("AQP1 already exists. ret=%lli", ret);
644
645 return ret;
646}
647
648u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
649 const struct ipz_qp_handle qp_handle,
650 struct h_galpa gal,
651 u16 mcg_dlid,
652 u64 subnet_prefix, u64 interface_id)
653{
654 u64 ret;
655
656 ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
657 adapter_handle.handle, /* r4 */
658 qp_handle.handle, /* r5 */
659 mcg_dlid, /* r6 */
660 interface_id, /* r7 */
661 subnet_prefix, /* r8 */
662 0, 0);
663
664 if (ret == H_NOT_ENOUGH_RESOURCES)
665 ehca_gen_err("Not enough resources. ret=%lli", ret);
666
667 return ret;
668}
669
670u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
671 const struct ipz_qp_handle qp_handle,
672 struct h_galpa gal,
673 u16 mcg_dlid,
674 u64 subnet_prefix, u64 interface_id)
675{
676 return ehca_plpar_hcall_norets(H_DETACH_MCQP,
677 adapter_handle.handle, /* r4 */
678 qp_handle.handle, /* r5 */
679 mcg_dlid, /* r6 */
680 interface_id, /* r7 */
681 subnet_prefix, /* r8 */
682 0, 0);
683}
684
685u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
686 struct ehca_cq *cq,
687 u8 force_flag)
688{
689 u64 ret;
690
691 ret = hcp_galpas_dtor(&cq->galpas);
692 if (ret) {
693 ehca_gen_err("Could not destruct cp->galpas");
694 return H_RESOURCE;
695 }
696
697 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
698 adapter_handle.handle, /* r4 */
699 cq->ipz_cq_handle.handle, /* r5 */
700 force_flag != 0 ? 1L : 0L, /* r6 */
701 0, 0, 0, 0);
702
703 if (ret == H_RESOURCE)
704 ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret);
705
706 return ret;
707}
708
709u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
710 struct ehca_eq *eq)
711{
712 u64 ret;
713
714 ret = hcp_galpas_dtor(&eq->galpas);
715 if (ret) {
716 ehca_gen_err("Could not destruct eq->galpas");
717 return H_RESOURCE;
718 }
719
720 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
721 adapter_handle.handle, /* r4 */
722 eq->ipz_eq_handle.handle, /* r5 */
723 0, 0, 0, 0, 0);
724
725 if (ret == H_RESOURCE)
726 ehca_gen_err("Resource in use. ret=%lli ", ret);
727
728 return ret;
729}
730
731u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
732 const struct ehca_mr *mr,
733 const u64 vaddr,
734 const u64 length,
735 const u32 access_ctrl,
736 const struct ipz_pd pd,
737 struct ehca_mr_hipzout_parms *outparms)
738{
739 u64 ret;
740 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
741
742 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
743 adapter_handle.handle, /* r4 */
744 5, /* r5 */
745 vaddr, /* r6 */
746 length, /* r7 */
747 (((u64)access_ctrl) << 32ULL), /* r8 */
748 pd.value, /* r9 */
749 0, 0, 0);
750 outparms->handle.handle = outs[0];
751 outparms->lkey = (u32)outs[2];
752 outparms->rkey = (u32)outs[3];
753
754 return ret;
755}
756
757u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
758 const struct ehca_mr *mr,
759 const u8 pagesize,
760 const u8 queue_type,
761 const u64 logical_address_of_page,
762 const u64 count)
763{
764 u64 ret;
765
766 if (unlikely(ehca_debug_level >= 3)) {
767 if (count > 1) {
768 u64 *kpage;
769 int i;
770 kpage = __va(logical_address_of_page);
771 for (i = 0; i < count; i++)
772 ehca_gen_dbg("kpage[%d]=%p",
773 i, (void *)kpage[i]);
774 } else
775 ehca_gen_dbg("kpage=%p",
776 (void *)logical_address_of_page);
777 }
778
779 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
780 ehca_gen_err("logical_address_of_page not on a 4k boundary "
781 "adapter_handle=%llx mr=%p mr_handle=%llx "
782 "pagesize=%x queue_type=%x "
783 "logical_address_of_page=%llx count=%llx",
784 adapter_handle.handle, mr,
785 mr->ipz_mr_handle.handle, pagesize, queue_type,
786 logical_address_of_page, count);
787 ret = H_PARAMETER;
788 } else
789 ret = hipz_h_register_rpage(adapter_handle, pagesize,
790 queue_type,
791 mr->ipz_mr_handle.handle,
792 logical_address_of_page, count);
793 return ret;
794}
795
796u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
797 const struct ehca_mr *mr,
798 struct ehca_mr_hipzout_parms *outparms)
799{
800 u64 ret;
801 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
802
803 ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
804 adapter_handle.handle, /* r4 */
805 mr->ipz_mr_handle.handle, /* r5 */
806 0, 0, 0, 0, 0, 0, 0);
807 outparms->len = outs[0];
808 outparms->vaddr = outs[1];
809 outparms->acl = outs[4] >> 32;
810 outparms->lkey = (u32)(outs[5] >> 32);
811 outparms->rkey = (u32)(outs[5] & (0xffffffff));
812
813 return ret;
814}
815
816u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
817 const struct ehca_mr *mr)
818{
819 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
820 adapter_handle.handle, /* r4 */
821 mr->ipz_mr_handle.handle, /* r5 */
822 0, 0, 0, 0, 0);
823}
824
825u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
826 const struct ehca_mr *mr,
827 const u64 vaddr_in,
828 const u64 length,
829 const u32 access_ctrl,
830 const struct ipz_pd pd,
831 const u64 mr_addr_cb,
832 struct ehca_mr_hipzout_parms *outparms)
833{
834 u64 ret;
835 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
836
837 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
838 adapter_handle.handle, /* r4 */
839 mr->ipz_mr_handle.handle, /* r5 */
840 vaddr_in, /* r6 */
841 length, /* r7 */
842 /* r8 */
843 ((((u64)access_ctrl) << 32ULL) | pd.value),
844 mr_addr_cb, /* r9 */
845 0, 0, 0);
846 outparms->vaddr = outs[1];
847 outparms->lkey = (u32)outs[2];
848 outparms->rkey = (u32)outs[3];
849
850 return ret;
851}
852
853u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
854 const struct ehca_mr *mr,
855 const struct ehca_mr *orig_mr,
856 const u64 vaddr_in,
857 const u32 access_ctrl,
858 const struct ipz_pd pd,
859 struct ehca_mr_hipzout_parms *outparms)
860{
861 u64 ret;
862 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
863
864 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
865 adapter_handle.handle, /* r4 */
866 orig_mr->ipz_mr_handle.handle, /* r5 */
867 vaddr_in, /* r6 */
868 (((u64)access_ctrl) << 32ULL), /* r7 */
869 pd.value, /* r8 */
870 0, 0, 0, 0);
871 outparms->handle.handle = outs[0];
872 outparms->lkey = (u32)outs[2];
873 outparms->rkey = (u32)outs[3];
874
875 return ret;
876}
877
878u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
879 const struct ehca_mw *mw,
880 const struct ipz_pd pd,
881 struct ehca_mw_hipzout_parms *outparms)
882{
883 u64 ret;
884 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
885
886 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
887 adapter_handle.handle, /* r4 */
888 6, /* r5 */
889 pd.value, /* r6 */
890 0, 0, 0, 0, 0, 0);
891 outparms->handle.handle = outs[0];
892 outparms->rkey = (u32)outs[3];
893
894 return ret;
895}
896
897u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
898 const struct ehca_mw *mw,
899 struct ehca_mw_hipzout_parms *outparms)
900{
901 u64 ret;
902 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
903
904 ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
905 adapter_handle.handle, /* r4 */
906 mw->ipz_mw_handle.handle, /* r5 */
907 0, 0, 0, 0, 0, 0, 0);
908 outparms->rkey = (u32)outs[3];
909
910 return ret;
911}
912
913u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
914 const struct ehca_mw *mw)
915{
916 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
917 adapter_handle.handle, /* r4 */
918 mw->ipz_mw_handle.handle, /* r5 */
919 0, 0, 0, 0, 0);
920}
921
922u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
923 const u64 ressource_handle,
924 void *rblock,
925 unsigned long *byte_count)
926{
927 u64 r_cb = __pa(rblock);
928
929 if (r_cb & (EHCA_PAGESIZE-1)) {
930 ehca_gen_err("rblock not page aligned.");
931 return H_PARAMETER;
932 }
933
934 return ehca_plpar_hcall_norets(H_ERROR_DATA,
935 adapter_handle.handle,
936 ressource_handle,
937 r_cb,
938 0, 0, 0, 0);
939}
940
941u64 hipz_h_eoi(int irq)
942{
943 unsigned long xirr;
944
945 iosync();
946 xirr = (0xffULL << 24) | irq;
947
948 return plpar_hcall_norets(H_EOI, xirr);
949}
diff --git a/drivers/staging/rdma/ehca/hcp_if.h b/drivers/staging/rdma/ehca/hcp_if.h
deleted file mode 100644
index a46e514c367b..000000000000
--- a/drivers/staging/rdma/ehca/hcp_if.h
+++ /dev/null
@@ -1,265 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Firmware Infiniband Interface code for POWER
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Gerd Bayer <gerd.bayer@de.ibm.com>
9 * Waleri Fomin <fomin@de.ibm.com>
10 *
11 * Copyright (c) 2005 IBM Corporation
12 *
13 * All rights reserved.
14 *
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
17 *
18 * OpenIB BSD License
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
22 *
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
25 *
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44#ifndef __HCP_IF_H__
45#define __HCP_IF_H__
46
47#include "ehca_classes.h"
48#include "ehca_tools.h"
49#include "hipz_hw.h"
50
51/*
52 * hipz_h_alloc_resource_eq allocates EQ resources in HW and FW, initialize
53 * resources, create the empty EQPT (ring).
54 */
55u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
56 struct ehca_pfeq *pfeq,
57 const u32 neq_control,
58 const u32 number_of_entries,
59 struct ipz_eq_handle *eq_handle,
60 u32 * act_nr_of_entries,
61 u32 * act_pages,
62 u32 * eq_ist);
63
64u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
65 struct ipz_eq_handle eq_handle,
66 const u64 event_mask);
67/*
68 * hipz_h_allocate_resource_cq allocates CQ resources in HW and FW, initialize
69 * resources, create the empty CQPT (ring).
70 */
71u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
72 struct ehca_cq *cq,
73 struct ehca_alloc_cq_parms *param);
74
75
76/*
77 * hipz_h_alloc_resource_qp allocates QP resources in HW and FW,
78 * initialize resources, create empty QPPTs (2 rings).
79 */
80u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
81 struct ehca_alloc_qp_parms *parms, int is_user);
82
83u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
84 const u8 port_id,
85 struct hipz_query_port *query_port_response_block);
86
87u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
88 const u8 port_id, const u32 port_cap,
89 const u8 init_type, const int modify_mask);
90
91u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
92 struct hipz_query_hca *query_hca_rblock);
93
94/*
95 * hipz_h_register_rpage internal function in hcp_if.h for all
96 * hcp_H_REGISTER_RPAGE calls.
97 */
98u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
99 const u8 pagesize,
100 const u8 queue_type,
101 const u64 resource_handle,
102 const u64 logical_address_of_page,
103 u64 count);
104
105u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
106 const struct ipz_eq_handle eq_handle,
107 struct ehca_pfeq *pfeq,
108 const u8 pagesize,
109 const u8 queue_type,
110 const u64 logical_address_of_page,
111 const u64 count);
112
113u64 hipz_h_query_int_state(const struct ipz_adapter_handle
114 hcp_adapter_handle,
115 u32 ist);
116
117u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
118 const struct ipz_cq_handle cq_handle,
119 struct ehca_pfcq *pfcq,
120 const u8 pagesize,
121 const u8 queue_type,
122 const u64 logical_address_of_page,
123 const u64 count,
124 const struct h_galpa gal);
125
126u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
127 const struct ipz_qp_handle qp_handle,
128 struct ehca_pfqp *pfqp,
129 const u8 pagesize,
130 const u8 queue_type,
131 const u64 logical_address_of_page,
132 const u64 count,
133 const struct h_galpa galpa);
134
135u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
136 const struct ipz_qp_handle qp_handle,
137 struct ehca_pfqp *pfqp,
138 void **log_addr_next_sq_wqe_tb_processed,
139 void **log_addr_next_rq_wqe_tb_processed,
140 int dis_and_get_function_code);
141enum hcall_sigt {
142 HCALL_SIGT_NO_CQE = 0,
143 HCALL_SIGT_BY_WQE = 1,
144 HCALL_SIGT_EVERY = 2
145};
146
147u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
148 const struct ipz_qp_handle qp_handle,
149 struct ehca_pfqp *pfqp,
150 const u64 update_mask,
151 struct hcp_modify_qp_control_block *mqpcb,
152 struct h_galpa gal);
153
154u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
155 const struct ipz_qp_handle qp_handle,
156 struct ehca_pfqp *pfqp,
157 struct hcp_modify_qp_control_block *qqpcb,
158 struct h_galpa gal);
159
160u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
161 struct ehca_qp *qp);
162
163u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
164 const struct ipz_qp_handle qp_handle,
165 struct h_galpa gal,
166 u32 port);
167
168u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
169 const struct ipz_qp_handle qp_handle,
170 struct h_galpa gal,
171 u32 port, u32 * pma_qp_nr,
172 u32 * bma_qp_nr);
173
174u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
175 const struct ipz_qp_handle qp_handle,
176 struct h_galpa gal,
177 u16 mcg_dlid,
178 u64 subnet_prefix, u64 interface_id);
179
180u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
181 const struct ipz_qp_handle qp_handle,
182 struct h_galpa gal,
183 u16 mcg_dlid,
184 u64 subnet_prefix, u64 interface_id);
185
186u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
187 struct ehca_cq *cq,
188 u8 force_flag);
189
190u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
191 struct ehca_eq *eq);
192
193/*
194 * hipz_h_alloc_resource_mr allocates MR resources in HW and FW, initialize
195 * resources.
196 */
197u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
198 const struct ehca_mr *mr,
199 const u64 vaddr,
200 const u64 length,
201 const u32 access_ctrl,
202 const struct ipz_pd pd,
203 struct ehca_mr_hipzout_parms *outparms);
204
205/* hipz_h_register_rpage_mr registers MR resource pages in HW and FW */
206u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
207 const struct ehca_mr *mr,
208 const u8 pagesize,
209 const u8 queue_type,
210 const u64 logical_address_of_page,
211 const u64 count);
212
213/* hipz_h_query_mr queries MR in HW and FW */
214u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
215 const struct ehca_mr *mr,
216 struct ehca_mr_hipzout_parms *outparms);
217
218/* hipz_h_free_resource_mr frees MR resources in HW and FW */
219u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
220 const struct ehca_mr *mr);
221
222/* hipz_h_reregister_pmr reregisters MR in HW and FW */
223u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
224 const struct ehca_mr *mr,
225 const u64 vaddr_in,
226 const u64 length,
227 const u32 access_ctrl,
228 const struct ipz_pd pd,
229 const u64 mr_addr_cb,
230 struct ehca_mr_hipzout_parms *outparms);
231
232/* hipz_h_register_smr register shared MR in HW and FW */
233u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
234 const struct ehca_mr *mr,
235 const struct ehca_mr *orig_mr,
236 const u64 vaddr_in,
237 const u32 access_ctrl,
238 const struct ipz_pd pd,
239 struct ehca_mr_hipzout_parms *outparms);
240
241/*
242 * hipz_h_alloc_resource_mw allocates MW resources in HW and FW, initialize
243 * resources.
244 */
245u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
246 const struct ehca_mw *mw,
247 const struct ipz_pd pd,
248 struct ehca_mw_hipzout_parms *outparms);
249
250/* hipz_h_query_mw queries MW in HW and FW */
251u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
252 const struct ehca_mw *mw,
253 struct ehca_mw_hipzout_parms *outparms);
254
255/* hipz_h_free_resource_mw frees MW resources in HW and FW */
256u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
257 const struct ehca_mw *mw);
258
259u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
260 const u64 ressource_handle,
261 void *rblock,
262 unsigned long *byte_count);
263u64 hipz_h_eoi(int irq);
264
265#endif /* __HCP_IF_H__ */
diff --git a/drivers/staging/rdma/ehca/hcp_phyp.c b/drivers/staging/rdma/ehca/hcp_phyp.c
deleted file mode 100644
index 077376ff3d28..000000000000
--- a/drivers/staging/rdma/ehca/hcp_phyp.c
+++ /dev/null
@@ -1,82 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * load store abstraction for ehca register access with tracing
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include "ehca_classes.h"
43#include "hipz_hw.h"
44
45u64 hcall_map_page(u64 physaddr)
46{
47 return (u64)ioremap(physaddr, EHCA_PAGESIZE);
48}
49
50int hcall_unmap_page(u64 mapaddr)
51{
52 iounmap((volatile void __iomem *) mapaddr);
53 return 0;
54}
55
56int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
57 u64 paddr_kernel, u64 paddr_user)
58{
59 if (!is_user) {
60 galpas->kernel.fw_handle = hcall_map_page(paddr_kernel);
61 if (!galpas->kernel.fw_handle)
62 return -ENOMEM;
63 } else
64 galpas->kernel.fw_handle = 0;
65
66 galpas->user.fw_handle = paddr_user;
67
68 return 0;
69}
70
71int hcp_galpas_dtor(struct h_galpas *galpas)
72{
73 if (galpas->kernel.fw_handle) {
74 int ret = hcall_unmap_page(galpas->kernel.fw_handle);
75 if (ret)
76 return ret;
77 }
78
79 galpas->user.fw_handle = galpas->kernel.fw_handle = 0;
80
81 return 0;
82}
diff --git a/drivers/staging/rdma/ehca/hcp_phyp.h b/drivers/staging/rdma/ehca/hcp_phyp.h
deleted file mode 100644
index d1b029910249..000000000000
--- a/drivers/staging/rdma/ehca/hcp_phyp.h
+++ /dev/null
@@ -1,90 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Firmware calls
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Waleri Fomin <fomin@de.ibm.com>
9 * Gerd Bayer <gerd.bayer@de.ibm.com>
10 *
11 * Copyright (c) 2005 IBM Corporation
12 *
13 * All rights reserved.
14 *
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
17 *
18 * OpenIB BSD License
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
22 *
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
25 *
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44#ifndef __HCP_PHYP_H__
45#define __HCP_PHYP_H__
46
47
48/*
49 * eHCA page (mapped into memory)
50 * resource to access eHCA register pages in CPU address space
51*/
52struct h_galpa {
53 u64 fw_handle;
54 /* for pSeries this is a 64bit memory address where
55 I/O memory is mapped into CPU address space (kv) */
56};
57
58/*
59 * resource to access eHCA address space registers, all types
60 */
61struct h_galpas {
62 u32 pid; /*PID of userspace galpa checking */
63 struct h_galpa user; /* user space accessible resource,
64 set to 0 if unused */
65 struct h_galpa kernel; /* kernel space accessible resource,
66 set to 0 if unused */
67};
68
69static inline u64 hipz_galpa_load(struct h_galpa galpa, u32 offset)
70{
71 u64 addr = galpa.fw_handle + offset;
72 return *(volatile u64 __force *)addr;
73}
74
75static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value)
76{
77 u64 addr = galpa.fw_handle + offset;
78 *(volatile u64 __force *)addr = value;
79}
80
81int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
82 u64 paddr_kernel, u64 paddr_user);
83
84int hcp_galpas_dtor(struct h_galpas *galpas);
85
86u64 hcall_map_page(u64 physaddr);
87
88int hcall_unmap_page(u64 mapaddr);
89
90#endif
diff --git a/drivers/staging/rdma/ehca/hipz_fns.h b/drivers/staging/rdma/ehca/hipz_fns.h
deleted file mode 100644
index 9dac93d02140..000000000000
--- a/drivers/staging/rdma/ehca/hipz_fns.h
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * HW abstraction register functions
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Reinhard Ernst <rernst@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#ifndef __HIPZ_FNS_H__
43#define __HIPZ_FNS_H__
44
45#include "ehca_classes.h"
46#include "hipz_hw.h"
47
48#include "hipz_fns_core.h"
49
50#define hipz_galpa_store_eq(gal, offset, value) \
51 hipz_galpa_store(gal, EQTEMM_OFFSET(offset), value)
52
53#define hipz_galpa_load_eq(gal, offset) \
54 hipz_galpa_load(gal, EQTEMM_OFFSET(offset))
55
56#define hipz_galpa_store_qped(gal, offset, value) \
57 hipz_galpa_store(gal, QPEDMM_OFFSET(offset), value)
58
59#define hipz_galpa_load_qped(gal, offset) \
60 hipz_galpa_load(gal, QPEDMM_OFFSET(offset))
61
62#define hipz_galpa_store_mrmw(gal, offset, value) \
63 hipz_galpa_store(gal, MRMWMM_OFFSET(offset), value)
64
65#define hipz_galpa_load_mrmw(gal, offset) \
66 hipz_galpa_load(gal, MRMWMM_OFFSET(offset))
67
68#endif
diff --git a/drivers/staging/rdma/ehca/hipz_fns_core.h b/drivers/staging/rdma/ehca/hipz_fns_core.h
deleted file mode 100644
index 868735fd3187..000000000000
--- a/drivers/staging/rdma/ehca/hipz_fns_core.h
+++ /dev/null
@@ -1,100 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * HW abstraction register functions
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Heiko J Schick <schickhj@de.ibm.com>
8 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
9 * Reinhard Ernst <rernst@de.ibm.com>
10 *
11 * Copyright (c) 2005 IBM Corporation
12 *
13 * All rights reserved.
14 *
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
17 *
18 * OpenIB BSD License
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
22 *
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
25 *
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44#ifndef __HIPZ_FNS_CORE_H__
45#define __HIPZ_FNS_CORE_H__
46
47#include "hcp_phyp.h"
48#include "hipz_hw.h"
49
50#define hipz_galpa_store_cq(gal, offset, value) \
51 hipz_galpa_store(gal, CQTEMM_OFFSET(offset), value)
52
53#define hipz_galpa_load_cq(gal, offset) \
54 hipz_galpa_load(gal, CQTEMM_OFFSET(offset))
55
56#define hipz_galpa_store_qp(gal, offset, value) \
57 hipz_galpa_store(gal, QPTEMM_OFFSET(offset), value)
58#define hipz_galpa_load_qp(gal, offset) \
59 hipz_galpa_load(gal, QPTEMM_OFFSET(offset))
60
61static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes)
62{
63 /* ringing doorbell :-) */
64 hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa,
65 EHCA_BMASK_SET(QPX_SQADDER, nr_wqes));
66}
67
68static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes)
69{
70 /* ringing doorbell :-) */
71 hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa,
72 EHCA_BMASK_SET(QPX_RQADDER, nr_wqes));
73}
74
75static inline void hipz_update_feca(struct ehca_cq *cq, u32 nr_cqes)
76{
77 hipz_galpa_store_cq(cq->galpas.kernel, cqx_feca,
78 EHCA_BMASK_SET(CQX_FECADDER, nr_cqes));
79}
80
81static inline void hipz_set_cqx_n0(struct ehca_cq *cq, u32 value)
82{
83 u64 cqx_n0_reg;
84
85 hipz_galpa_store_cq(cq->galpas.kernel, cqx_n0,
86 EHCA_BMASK_SET(CQX_N0_GENERATE_SOLICITED_COMP_EVENT,
87 value));
88 cqx_n0_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n0);
89}
90
91static inline void hipz_set_cqx_n1(struct ehca_cq *cq, u32 value)
92{
93 u64 cqx_n1_reg;
94
95 hipz_galpa_store_cq(cq->galpas.kernel, cqx_n1,
96 EHCA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, value));
97 cqx_n1_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n1);
98}
99
100#endif /* __HIPZ_FNC_CORE_H__ */
diff --git a/drivers/staging/rdma/ehca/hipz_hw.h b/drivers/staging/rdma/ehca/hipz_hw.h
deleted file mode 100644
index bf996c7acc42..000000000000
--- a/drivers/staging/rdma/ehca/hipz_hw.h
+++ /dev/null
@@ -1,414 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * eHCA register definitions
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43#ifndef __HIPZ_HW_H__
44#define __HIPZ_HW_H__
45
46#include "ehca_tools.h"
47
48#define EHCA_MAX_MTU 4
49
50/* QP Table Entry Memory Map */
51struct hipz_qptemm {
52 u64 qpx_hcr;
53 u64 qpx_c;
54 u64 qpx_herr;
55 u64 qpx_aer;
56/* 0x20*/
57 u64 qpx_sqa;
58 u64 qpx_sqc;
59 u64 qpx_rqa;
60 u64 qpx_rqc;
61/* 0x40*/
62 u64 qpx_st;
63 u64 qpx_pmstate;
64 u64 qpx_pmfa;
65 u64 qpx_pkey;
66/* 0x60*/
67 u64 qpx_pkeya;
68 u64 qpx_pkeyb;
69 u64 qpx_pkeyc;
70 u64 qpx_pkeyd;
71/* 0x80*/
72 u64 qpx_qkey;
73 u64 qpx_dqp;
74 u64 qpx_dlidp;
75 u64 qpx_portp;
76/* 0xa0*/
77 u64 qpx_slidp;
78 u64 qpx_slidpp;
79 u64 qpx_dlida;
80 u64 qpx_porta;
81/* 0xc0*/
82 u64 qpx_slida;
83 u64 qpx_slidpa;
84 u64 qpx_slvl;
85 u64 qpx_ipd;
86/* 0xe0*/
87 u64 qpx_mtu;
88 u64 qpx_lato;
89 u64 qpx_rlimit;
90 u64 qpx_rnrlimit;
91/* 0x100*/
92 u64 qpx_t;
93 u64 qpx_sqhp;
94 u64 qpx_sqptp;
95 u64 qpx_nspsn;
96/* 0x120*/
97 u64 qpx_nspsnhwm;
98 u64 reserved1;
99 u64 qpx_sdsi;
100 u64 qpx_sdsbc;
101/* 0x140*/
102 u64 qpx_sqwsize;
103 u64 qpx_sqwts;
104 u64 qpx_lsn;
105 u64 qpx_nssn;
106/* 0x160 */
107 u64 qpx_mor;
108 u64 qpx_cor;
109 u64 qpx_sqsize;
110 u64 qpx_erc;
111/* 0x180*/
112 u64 qpx_rnrrc;
113 u64 qpx_ernrwt;
114 u64 qpx_rnrresp;
115 u64 qpx_lmsna;
116/* 0x1a0 */
117 u64 qpx_sqhpc;
118 u64 qpx_sqcptp;
119 u64 qpx_sigt;
120 u64 qpx_wqecnt;
121/* 0x1c0*/
122 u64 qpx_rqhp;
123 u64 qpx_rqptp;
124 u64 qpx_rqsize;
125 u64 qpx_nrr;
126/* 0x1e0*/
127 u64 qpx_rdmac;
128 u64 qpx_nrpsn;
129 u64 qpx_lapsn;
130 u64 qpx_lcr;
131/* 0x200*/
132 u64 qpx_rwc;
133 u64 qpx_rwva;
134 u64 qpx_rdsi;
135 u64 qpx_rdsbc;
136/* 0x220*/
137 u64 qpx_rqwsize;
138 u64 qpx_crmsn;
139 u64 qpx_rdd;
140 u64 qpx_larpsn;
141/* 0x240*/
142 u64 qpx_pd;
143 u64 qpx_scqn;
144 u64 qpx_rcqn;
145 u64 qpx_aeqn;
146/* 0x260*/
147 u64 qpx_aaelog;
148 u64 qpx_ram;
149 u64 qpx_rdmaqe0;
150 u64 qpx_rdmaqe1;
151/* 0x280*/
152 u64 qpx_rdmaqe2;
153 u64 qpx_rdmaqe3;
154 u64 qpx_nrpsnhwm;
155/* 0x298*/
156 u64 reserved[(0x400 - 0x298) / 8];
157/* 0x400 extended data */
158 u64 reserved_ext[(0x500 - 0x400) / 8];
159/* 0x500 */
160 u64 reserved2[(0x1000 - 0x500) / 8];
161/* 0x1000 */
162};
163
164#define QPX_SQADDER EHCA_BMASK_IBM(48, 63)
165#define QPX_RQADDER EHCA_BMASK_IBM(48, 63)
166#define QPX_AAELOG_RESET_SRQ_LIMIT EHCA_BMASK_IBM(3, 3)
167
168#define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm, x)
169
170/* MRMWPT Entry Memory Map */
171struct hipz_mrmwmm {
172 /* 0x00 */
173 u64 mrx_hcr;
174
175 u64 mrx_c;
176 u64 mrx_herr;
177 u64 mrx_aer;
178 /* 0x20 */
179 u64 mrx_pp;
180 u64 reserved1;
181 u64 reserved2;
182 u64 reserved3;
183 /* 0x40 */
184 u64 reserved4[(0x200 - 0x40) / 8];
185 /* 0x200 */
186 u64 mrx_ctl[64];
187
188};
189
190#define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm, x)
191
192struct hipz_qpedmm {
193 /* 0x00 */
194 u64 reserved0[(0x400) / 8];
195 /* 0x400 */
196 u64 qpedx_phh;
197 u64 qpedx_ppsgp;
198 /* 0x410 */
199 u64 qpedx_ppsgu;
200 u64 qpedx_ppdgp;
201 /* 0x420 */
202 u64 qpedx_ppdgu;
203 u64 qpedx_aph;
204 /* 0x430 */
205 u64 qpedx_apsgp;
206 u64 qpedx_apsgu;
207 /* 0x440 */
208 u64 qpedx_apdgp;
209 u64 qpedx_apdgu;
210 /* 0x450 */
211 u64 qpedx_apav;
212 u64 qpedx_apsav;
213 /* 0x460 */
214 u64 qpedx_hcr;
215 u64 reserved1[4];
216 /* 0x488 */
217 u64 qpedx_rrl0;
218 /* 0x490 */
219 u64 qpedx_rrrkey0;
220 u64 qpedx_rrva0;
221 /* 0x4a0 */
222 u64 reserved2;
223 u64 qpedx_rrl1;
224 /* 0x4b0 */
225 u64 qpedx_rrrkey1;
226 u64 qpedx_rrva1;
227 /* 0x4c0 */
228 u64 reserved3;
229 u64 qpedx_rrl2;
230 /* 0x4d0 */
231 u64 qpedx_rrrkey2;
232 u64 qpedx_rrva2;
233 /* 0x4e0 */
234 u64 reserved4;
235 u64 qpedx_rrl3;
236 /* 0x4f0 */
237 u64 qpedx_rrrkey3;
238 u64 qpedx_rrva3;
239};
240
241#define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm, x)
242
243/* CQ Table Entry Memory Map */
244struct hipz_cqtemm {
245 u64 cqx_hcr;
246 u64 cqx_c;
247 u64 cqx_herr;
248 u64 cqx_aer;
249/* 0x20 */
250 u64 cqx_ptp;
251 u64 cqx_tp;
252 u64 cqx_fec;
253 u64 cqx_feca;
254/* 0x40 */
255 u64 cqx_ep;
256 u64 cqx_eq;
257/* 0x50 */
258 u64 reserved1;
259 u64 cqx_n0;
260/* 0x60 */
261 u64 cqx_n1;
262 u64 reserved2[(0x1000 - 0x60) / 8];
263/* 0x1000 */
264};
265
266#define CQX_FEC_CQE_CNT EHCA_BMASK_IBM(32, 63)
267#define CQX_FECADDER EHCA_BMASK_IBM(32, 63)
268#define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0, 0)
269#define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0, 0)
270
271#define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm, x)
272
273/* EQ Table Entry Memory Map */
274struct hipz_eqtemm {
275 u64 eqx_hcr;
276 u64 eqx_c;
277
278 u64 eqx_herr;
279 u64 eqx_aer;
280/* 0x20 */
281 u64 eqx_ptp;
282 u64 eqx_tp;
283 u64 eqx_ssba;
284 u64 eqx_psba;
285
286/* 0x40 */
287 u64 eqx_cec;
288 u64 eqx_meql;
289 u64 eqx_xisbi;
290 u64 eqx_xisc;
291/* 0x60 */
292 u64 eqx_it;
293
294};
295
296#define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm, x)
297
298/* access control defines for MR/MW */
299#define HIPZ_ACCESSCTRL_L_WRITE 0x00800000
300#define HIPZ_ACCESSCTRL_R_WRITE 0x00400000
301#define HIPZ_ACCESSCTRL_R_READ 0x00200000
302#define HIPZ_ACCESSCTRL_R_ATOMIC 0x00100000
303#define HIPZ_ACCESSCTRL_MW_BIND 0x00080000
304
305/* query hca response block */
306struct hipz_query_hca {
307 u32 cur_reliable_dg;
308 u32 cur_qp;
309 u32 cur_cq;
310 u32 cur_eq;
311 u32 cur_mr;
312 u32 cur_mw;
313 u32 cur_ee_context;
314 u32 cur_mcast_grp;
315 u32 cur_qp_attached_mcast_grp;
316 u32 reserved1;
317 u32 cur_ipv6_qp;
318 u32 cur_eth_qp;
319 u32 cur_hp_mr;
320 u32 reserved2[3];
321 u32 max_rd_domain;
322 u32 max_qp;
323 u32 max_cq;
324 u32 max_eq;
325 u32 max_mr;
326 u32 max_hp_mr;
327 u32 max_mw;
328 u32 max_mrwpte;
329 u32 max_special_mrwpte;
330 u32 max_rd_ee_context;
331 u32 max_mcast_grp;
332 u32 max_total_mcast_qp_attach;
333 u32 max_mcast_qp_attach;
334 u32 max_raw_ipv6_qp;
335 u32 max_raw_ethy_qp;
336 u32 internal_clock_frequency;
337 u32 max_pd;
338 u32 max_ah;
339 u32 max_cqe;
340 u32 max_wqes_wq;
341 u32 max_partitions;
342 u32 max_rr_ee_context;
343 u32 max_rr_qp;
344 u32 max_rr_hca;
345 u32 max_act_wqs_ee_context;
346 u32 max_act_wqs_qp;
347 u32 max_sge;
348 u32 max_sge_rd;
349 u32 memory_page_size_supported;
350 u64 max_mr_size;
351 u32 local_ca_ack_delay;
352 u32 num_ports;
353 u32 vendor_id;
354 u32 vendor_part_id;
355 u32 hw_ver;
356 u64 node_guid;
357 u64 hca_cap_indicators;
358 u32 data_counter_register_size;
359 u32 max_shared_rq;
360 u32 max_isns_eq;
361 u32 max_neq;
362} __attribute__ ((packed));
363
364#define HCA_CAP_AH_PORT_NR_CHECK EHCA_BMASK_IBM( 0, 0)
365#define HCA_CAP_ATOMIC EHCA_BMASK_IBM( 1, 1)
366#define HCA_CAP_AUTO_PATH_MIG EHCA_BMASK_IBM( 2, 2)
367#define HCA_CAP_BAD_P_KEY_CTR EHCA_BMASK_IBM( 3, 3)
368#define HCA_CAP_SQD_RTS_PORT_CHANGE EHCA_BMASK_IBM( 4, 4)
369#define HCA_CAP_CUR_QP_STATE_MOD EHCA_BMASK_IBM( 5, 5)
370#define HCA_CAP_INIT_TYPE EHCA_BMASK_IBM( 6, 6)
371#define HCA_CAP_PORT_ACTIVE_EVENT EHCA_BMASK_IBM( 7, 7)
372#define HCA_CAP_Q_KEY_VIOL_CTR EHCA_BMASK_IBM( 8, 8)
373#define HCA_CAP_WQE_RESIZE EHCA_BMASK_IBM( 9, 9)
374#define HCA_CAP_RAW_PACKET_MCAST EHCA_BMASK_IBM(10, 10)
375#define HCA_CAP_SHUTDOWN_PORT EHCA_BMASK_IBM(11, 11)
376#define HCA_CAP_RC_LL_QP EHCA_BMASK_IBM(12, 12)
377#define HCA_CAP_SRQ EHCA_BMASK_IBM(13, 13)
378#define HCA_CAP_UD_LL_QP EHCA_BMASK_IBM(16, 16)
379#define HCA_CAP_RESIZE_MR EHCA_BMASK_IBM(17, 17)
380#define HCA_CAP_MINI_QP EHCA_BMASK_IBM(18, 18)
381#define HCA_CAP_H_ALLOC_RES_SYNC EHCA_BMASK_IBM(19, 19)
382
383/* query port response block */
384struct hipz_query_port {
385 u32 state;
386 u32 bad_pkey_cntr;
387 u32 lmc;
388 u32 lid;
389 u32 subnet_timeout;
390 u32 qkey_viol_cntr;
391 u32 sm_sl;
392 u32 sm_lid;
393 u32 capability_mask;
394 u32 init_type_reply;
395 u32 pkey_tbl_len;
396 u32 gid_tbl_len;
397 u64 gid_prefix;
398 u32 port_nr;
399 u16 pkey_entries[16];
400 u8 reserved1[32];
401 u32 trent_size;
402 u32 trbuf_size;
403 u64 max_msg_sz;
404 u32 max_mtu;
405 u32 vl_cap;
406 u32 phys_pstate;
407 u32 phys_state;
408 u32 phys_speed;
409 u32 phys_width;
410 u8 reserved2[1884];
411 u64 guid_entries[255];
412} __attribute__ ((packed));
413
414#endif
diff --git a/drivers/staging/rdma/ehca/ipz_pt_fn.c b/drivers/staging/rdma/ehca/ipz_pt_fn.c
deleted file mode 100644
index 7ffc748cb973..000000000000
--- a/drivers/staging/rdma/ehca/ipz_pt_fn.c
+++ /dev/null
@@ -1,289 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * internal queue handling
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Reinhard Ernst <rernst@de.ibm.com>
8 * Christoph Raisch <raisch@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
13 * BSD.
14 *
15 * OpenIB BSD License
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are met:
19 *
20 * Redistributions of source code must retain the above copyright notice, this
21 * list of conditions and the following disclaimer.
22 *
23 * Redistributions in binary form must reproduce the above copyright notice,
24 * this list of conditions and the following disclaimer in the documentation
25 * and/or other materials
26 * provided with the distribution.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
29 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
32 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
35 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
36 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41#include <linux/slab.h>
42
43#include "ehca_tools.h"
44#include "ipz_pt_fn.h"
45#include "ehca_classes.h"
46
47#define PAGES_PER_KPAGE (PAGE_SIZE >> EHCA_PAGESHIFT)
48
49struct kmem_cache *small_qp_cache;
50
51void *ipz_qpageit_get_inc(struct ipz_queue *queue)
52{
53 void *ret = ipz_qeit_get(queue);
54 queue->current_q_offset += queue->pagesize;
55 if (queue->current_q_offset > queue->queue_length) {
56 queue->current_q_offset -= queue->pagesize;
57 ret = NULL;
58 }
59 if (((u64)ret) % queue->pagesize) {
60 ehca_gen_err("ERROR!! not at PAGE-Boundary");
61 return NULL;
62 }
63 return ret;
64}
65
66void *ipz_qeit_eq_get_inc(struct ipz_queue *queue)
67{
68 void *ret = ipz_qeit_get(queue);
69 u64 last_entry_in_q = queue->queue_length - queue->qe_size;
70
71 queue->current_q_offset += queue->qe_size;
72 if (queue->current_q_offset > last_entry_in_q) {
73 queue->current_q_offset = 0;
74 queue->toggle_state = (~queue->toggle_state) & 1;
75 }
76
77 return ret;
78}
79
80int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset)
81{
82 int i;
83 for (i = 0; i < queue->queue_length / queue->pagesize; i++) {
84 u64 page = __pa(queue->queue_pages[i]);
85 if (addr >= page && addr < page + queue->pagesize) {
86 *q_offset = addr - page + i * queue->pagesize;
87 return 0;
88 }
89 }
90 return -EINVAL;
91}
92
93#if PAGE_SHIFT < EHCA_PAGESHIFT
94#error Kernel pages must be at least as large than eHCA pages (4K) !
95#endif
96
97/*
98 * allocate pages for queue:
99 * outer loop allocates whole kernel pages (page aligned) and
100 * inner loop divides a kernel page into smaller hca queue pages
101 */
102static int alloc_queue_pages(struct ipz_queue *queue, const u32 nr_of_pages)
103{
104 int k, f = 0;
105 u8 *kpage;
106
107 while (f < nr_of_pages) {
108 kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
109 if (!kpage)
110 goto out;
111
112 for (k = 0; k < PAGES_PER_KPAGE && f < nr_of_pages; k++) {
113 queue->queue_pages[f] = (struct ipz_page *)kpage;
114 kpage += EHCA_PAGESIZE;
115 f++;
116 }
117 }
118 return 1;
119
120out:
121 for (f = 0; f < nr_of_pages && queue->queue_pages[f];
122 f += PAGES_PER_KPAGE)
123 free_page((unsigned long)(queue->queue_pages)[f]);
124 return 0;
125}
126
127static int alloc_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
128{
129 int order = ilog2(queue->pagesize) - 9;
130 struct ipz_small_queue_page *page;
131 unsigned long bit;
132
133 mutex_lock(&pd->lock);
134
135 if (!list_empty(&pd->free[order]))
136 page = list_entry(pd->free[order].next,
137 struct ipz_small_queue_page, list);
138 else {
139 page = kmem_cache_zalloc(small_qp_cache, GFP_KERNEL);
140 if (!page)
141 goto out;
142
143 page->page = get_zeroed_page(GFP_KERNEL);
144 if (!page->page) {
145 kmem_cache_free(small_qp_cache, page);
146 goto out;
147 }
148
149 list_add(&page->list, &pd->free[order]);
150 }
151
152 bit = find_first_zero_bit(page->bitmap, IPZ_SPAGE_PER_KPAGE >> order);
153 __set_bit(bit, page->bitmap);
154 page->fill++;
155
156 if (page->fill == IPZ_SPAGE_PER_KPAGE >> order)
157 list_move(&page->list, &pd->full[order]);
158
159 mutex_unlock(&pd->lock);
160
161 queue->queue_pages[0] = (void *)(page->page | (bit << (order + 9)));
162 queue->small_page = page;
163 queue->offset = bit << (order + 9);
164 return 1;
165
166out:
167 ehca_err(pd->ib_pd.device, "failed to allocate small queue page");
168 mutex_unlock(&pd->lock);
169 return 0;
170}
171
172static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
173{
174 int order = ilog2(queue->pagesize) - 9;
175 struct ipz_small_queue_page *page = queue->small_page;
176 unsigned long bit;
177 int free_page = 0;
178
179 bit = ((unsigned long)queue->queue_pages[0] & ~PAGE_MASK)
180 >> (order + 9);
181
182 mutex_lock(&pd->lock);
183
184 __clear_bit(bit, page->bitmap);
185 page->fill--;
186
187 if (page->fill == 0) {
188 list_del(&page->list);
189 free_page = 1;
190 }
191
192 if (page->fill == (IPZ_SPAGE_PER_KPAGE >> order) - 1)
193 /* the page was full until we freed the chunk */
194 list_move_tail(&page->list, &pd->free[order]);
195
196 mutex_unlock(&pd->lock);
197
198 if (free_page) {
199 free_page(page->page);
200 kmem_cache_free(small_qp_cache, page);
201 }
202}
203
204int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
205 const u32 nr_of_pages, const u32 pagesize,
206 const u32 qe_size, const u32 nr_of_sg,
207 int is_small)
208{
209 if (pagesize > PAGE_SIZE) {
210 ehca_gen_err("FATAL ERROR: pagesize=%x "
211 "is greater than kernel page size", pagesize);
212 return 0;
213 }
214
215 /* init queue fields */
216 queue->queue_length = nr_of_pages * pagesize;
217 queue->pagesize = pagesize;
218 queue->qe_size = qe_size;
219 queue->act_nr_of_sg = nr_of_sg;
220 queue->current_q_offset = 0;
221 queue->toggle_state = 1;
222 queue->small_page = NULL;
223
224 /* allocate queue page pointers */
225 queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *),
226 GFP_KERNEL | __GFP_NOWARN);
227 if (!queue->queue_pages) {
228 queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
229 if (!queue->queue_pages) {
230 ehca_gen_err("Couldn't allocate queue page list");
231 return 0;
232 }
233 }
234
235 /* allocate actual queue pages */
236 if (is_small) {
237 if (!alloc_small_queue_page(queue, pd))
238 goto ipz_queue_ctor_exit0;
239 } else
240 if (!alloc_queue_pages(queue, nr_of_pages))
241 goto ipz_queue_ctor_exit0;
242
243 return 1;
244
245ipz_queue_ctor_exit0:
246 ehca_gen_err("Couldn't alloc pages queue=%p "
247 "nr_of_pages=%x", queue, nr_of_pages);
248 kvfree(queue->queue_pages);
249
250 return 0;
251}
252
253int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue)
254{
255 int i, nr_pages;
256
257 if (!queue || !queue->queue_pages) {
258 ehca_gen_dbg("queue or queue_pages is NULL");
259 return 0;
260 }
261
262 if (queue->small_page)
263 free_small_queue_page(queue, pd);
264 else {
265 nr_pages = queue->queue_length / queue->pagesize;
266 for (i = 0; i < nr_pages; i += PAGES_PER_KPAGE)
267 free_page((unsigned long)queue->queue_pages[i]);
268 }
269
270 kvfree(queue->queue_pages);
271
272 return 1;
273}
274
275int ehca_init_small_qp_cache(void)
276{
277 small_qp_cache = kmem_cache_create("ehca_cache_small_qp",
278 sizeof(struct ipz_small_queue_page),
279 0, SLAB_HWCACHE_ALIGN, NULL);
280 if (!small_qp_cache)
281 return -ENOMEM;
282
283 return 0;
284}
285
286void ehca_cleanup_small_qp_cache(void)
287{
288 kmem_cache_destroy(small_qp_cache);
289}
diff --git a/drivers/staging/rdma/ehca/ipz_pt_fn.h b/drivers/staging/rdma/ehca/ipz_pt_fn.h
deleted file mode 100644
index a801274ea337..000000000000
--- a/drivers/staging/rdma/ehca/ipz_pt_fn.h
+++ /dev/null
@@ -1,289 +0,0 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * internal queue handling
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Reinhard Ernst <rernst@de.ibm.com>
8 * Christoph Raisch <raisch@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43#ifndef __IPZ_PT_FN_H__
44#define __IPZ_PT_FN_H__
45
46#define EHCA_PAGESHIFT 12
47#define EHCA_PAGESIZE 4096UL
48#define EHCA_PAGEMASK (~(EHCA_PAGESIZE-1))
49#define EHCA_PT_ENTRIES 512UL
50
51#include "ehca_tools.h"
52#include "ehca_qes.h"
53
54struct ehca_pd;
55struct ipz_small_queue_page;
56
57extern struct kmem_cache *small_qp_cache;
58
59/* struct generic ehca page */
60struct ipz_page {
61 u8 entries[EHCA_PAGESIZE];
62};
63
64#define IPZ_SPAGE_PER_KPAGE (PAGE_SIZE / 512)
65
66struct ipz_small_queue_page {
67 unsigned long page;
68 unsigned long bitmap[IPZ_SPAGE_PER_KPAGE / BITS_PER_LONG];
69 int fill;
70 void *mapped_addr;
71 u32 mmap_count;
72 struct list_head list;
73};
74
75/* struct generic queue in linux kernel virtual memory (kv) */
76struct ipz_queue {
77 u64 current_q_offset; /* current queue entry */
78
79 struct ipz_page **queue_pages; /* array of pages belonging to queue */
80 u32 qe_size; /* queue entry size */
81 u32 act_nr_of_sg;
82 u32 queue_length; /* queue length allocated in bytes */
83 u32 pagesize;
84 u32 toggle_state; /* toggle flag - per page */
85 u32 offset; /* save offset within page for small_qp */
86 struct ipz_small_queue_page *small_page;
87};
88
89/*
90 * return current Queue Entry for a certain q_offset
91 * returns address (kv) of Queue Entry
92 */
93static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset)
94{
95 struct ipz_page *current_page;
96 if (q_offset >= queue->queue_length)
97 return NULL;
98 current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT];
99 return &current_page->entries[q_offset & (EHCA_PAGESIZE - 1)];
100}
101
102/*
103 * return current Queue Entry
104 * returns address (kv) of Queue Entry
105 */
106static inline void *ipz_qeit_get(struct ipz_queue *queue)
107{
108 return ipz_qeit_calc(queue, queue->current_q_offset);
109}
110
111/*
112 * return current Queue Page , increment Queue Page iterator from
113 * page to page in struct ipz_queue, last increment will return 0! and
114 * NOT wrap
115 * returns address (kv) of Queue Page
116 * warning don't use in parallel with ipz_QE_get_inc()
117 */
118void *ipz_qpageit_get_inc(struct ipz_queue *queue);
119
120/*
121 * return current Queue Entry, increment Queue Entry iterator by one
122 * step in struct ipz_queue, will wrap in ringbuffer
123 * returns address (kv) of Queue Entry BEFORE increment
124 * warning don't use in parallel with ipz_qpageit_get_inc()
125 */
126static inline void *ipz_qeit_get_inc(struct ipz_queue *queue)
127{
128 void *ret = ipz_qeit_get(queue);
129 queue->current_q_offset += queue->qe_size;
130 if (queue->current_q_offset >= queue->queue_length) {
131 queue->current_q_offset = 0;
132 /* toggle the valid flag */
133 queue->toggle_state = (~queue->toggle_state) & 1;
134 }
135
136 return ret;
137}
138
139/*
140 * return a bool indicating whether current Queue Entry is valid
141 */
142static inline int ipz_qeit_is_valid(struct ipz_queue *queue)
143{
144 struct ehca_cqe *cqe = ipz_qeit_get(queue);
145 return ((cqe->cqe_flags >> 7) == (queue->toggle_state & 1));
146}
147
148/*
149 * return current Queue Entry, increment Queue Entry iterator by one
150 * step in struct ipz_queue, will wrap in ringbuffer
151 * returns address (kv) of Queue Entry BEFORE increment
152 * returns 0 and does not increment, if wrong valid state
153 * warning don't use in parallel with ipz_qpageit_get_inc()
154 */
155static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue)
156{
157 return ipz_qeit_is_valid(queue) ? ipz_qeit_get_inc(queue) : NULL;
158}
159
160/*
161 * returns and resets Queue Entry iterator
162 * returns address (kv) of first Queue Entry
163 */
164static inline void *ipz_qeit_reset(struct ipz_queue *queue)
165{
166 queue->current_q_offset = 0;
167 return ipz_qeit_get(queue);
168}
169
170/*
171 * return the q_offset corresponding to an absolute address
172 */
173int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset);
174
175/*
176 * return the next queue offset. don't modify the queue.
177 */
178static inline u64 ipz_queue_advance_offset(struct ipz_queue *queue, u64 offset)
179{
180 offset += queue->qe_size;
181 if (offset >= queue->queue_length) offset = 0;
182 return offset;
183}
184
185/* struct generic page table */
186struct ipz_pt {
187 u64 entries[EHCA_PT_ENTRIES];
188};
189
190/* struct page table for a queue, only to be used in pf */
191struct ipz_qpt {
192 /* queue page tables (kv), use u64 because we know the element length */
193 u64 *qpts;
194 u32 n_qpts;
195 u32 n_ptes; /* number of page table entries */
196 u64 *current_pte_addr;
197};
198
199/*
200 * constructor for a ipz_queue_t, placement new for ipz_queue_t,
201 * new for all dependent datastructors
202 * all QP Tables are the same
203 * flow:
204 * allocate+pin queue
205 * see ipz_qpt_ctor()
206 * returns true if ok, false if out of memory
207 */
208int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
209 const u32 nr_of_pages, const u32 pagesize,
210 const u32 qe_size, const u32 nr_of_sg,
211 int is_small);
212
213/*
214 * destructor for a ipz_queue_t
215 * -# free queue
216 * see ipz_queue_ctor()
217 * returns true if ok, false if queue was NULL-ptr of free failed
218 */
219int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue);
220
221/*
222 * constructor for a ipz_qpt_t,
223 * placement new for struct ipz_queue, new for all dependent datastructors
224 * all QP Tables are the same,
225 * flow:
226 * -# allocate+pin queue
227 * -# initialise ptcb
228 * -# allocate+pin PTs
229 * -# link PTs to a ring, according to HCA Arch, set bit62 id needed
230 * -# the ring must have room for exactly nr_of_PTEs
231 * see ipz_qpt_ctor()
232 */
233void ipz_qpt_ctor(struct ipz_qpt *qpt,
234 const u32 nr_of_qes,
235 const u32 pagesize,
236 const u32 qe_size,
237 const u8 lowbyte, const u8 toggle,
238 u32 * act_nr_of_QEs, u32 * act_nr_of_pages);
239
240/*
241 * return current Queue Entry, increment Queue Entry iterator by one
242 * step in struct ipz_queue, will wrap in ringbuffer
243 * returns address (kv) of Queue Entry BEFORE increment
244 * warning don't use in parallel with ipz_qpageit_get_inc()
245 * warning unpredictable results may occur if steps>act_nr_of_queue_entries
246 * fix EQ page problems
247 */
248void *ipz_qeit_eq_get_inc(struct ipz_queue *queue);
249
250/*
251 * return current Event Queue Entry, increment Queue Entry iterator
252 * by one step in struct ipz_queue if valid, will wrap in ringbuffer
253 * returns address (kv) of Queue Entry BEFORE increment
254 * returns 0 and does not increment, if wrong valid state
255 * warning don't use in parallel with ipz_queue_QPageit_get_inc()
256 * warning unpredictable results may occur if steps>act_nr_of_queue_entries
257 */
258static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue)
259{
260 void *ret = ipz_qeit_get(queue);
261 u32 qe = *(u8 *)ret;
262 if ((qe >> 7) != (queue->toggle_state & 1))
263 return NULL;
264 ipz_qeit_eq_get_inc(queue); /* this is a good one */
265 return ret;
266}
267
268static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue *queue)
269{
270 void *ret = ipz_qeit_get(queue);
271 u32 qe = *(u8 *)ret;
272 if ((qe >> 7) != (queue->toggle_state & 1))
273 return NULL;
274 return ret;
275}
276
277/* returns address (GX) of first queue entry */
278static inline u64 ipz_qpt_get_firstpage(struct ipz_qpt *qpt)
279{
280 return be64_to_cpu(qpt->qpts[0]);
281}
282
283/* returns address (kv) of first page of queue page table */
284static inline void *ipz_qpt_get_qpt(struct ipz_qpt *qpt)
285{
286 return qpt->qpts;
287}
288
289#endif /* __IPZ_PT_FN_H__ */
diff --git a/drivers/staging/rdma/ipath/Kconfig b/drivers/staging/rdma/ipath/Kconfig
deleted file mode 100644
index 041ce0634968..000000000000
--- a/drivers/staging/rdma/ipath/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
1config INFINIBAND_IPATH
2 tristate "QLogic HTX HCA support"
3 depends on 64BIT && NET && HT_IRQ
4 ---help---
5 This is a driver for the deprecated QLogic Hyper-Transport
6 IB host channel adapter (model QHT7140),
7 including InfiniBand verbs support. This driver allows these
8 devices to be used with both kernel upper level protocols such
9 as IP-over-InfiniBand as well as with userspace applications
10 (in conjunction with InfiniBand userspace access).
11 For QLogic PCIe QLE based cards, use the QIB driver instead.
12
13 If you have this hardware you will need to boot with PAT disabled
14 on your x86-64 systems, use the nopat kernel parameter.
15
16 Note that this driver will soon be removed entirely from the kernel.
diff --git a/drivers/staging/rdma/ipath/Makefile b/drivers/staging/rdma/ipath/Makefile
deleted file mode 100644
index 4496f2820c92..000000000000
--- a/drivers/staging/rdma/ipath/Makefile
+++ /dev/null
@@ -1,37 +0,0 @@
1ccflags-y := -DIPATH_IDSTR='"QLogic kernel.org driver"' \
2 -DIPATH_KERN_TYPE=0
3
4obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
5
6ib_ipath-y := \
7 ipath_cq.o \
8 ipath_diag.o \
9 ipath_dma.o \
10 ipath_driver.o \
11 ipath_eeprom.o \
12 ipath_file_ops.o \
13 ipath_fs.o \
14 ipath_init_chip.o \
15 ipath_intr.o \
16 ipath_keys.o \
17 ipath_mad.o \
18 ipath_mmap.o \
19 ipath_mr.o \
20 ipath_qp.o \
21 ipath_rc.o \
22 ipath_ruc.o \
23 ipath_sdma.o \
24 ipath_srq.o \
25 ipath_stats.o \
26 ipath_sysfs.o \
27 ipath_uc.o \
28 ipath_ud.o \
29 ipath_user_pages.o \
30 ipath_user_sdma.o \
31 ipath_verbs_mcast.o \
32 ipath_verbs.o
33
34ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
35
36ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
37ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
diff --git a/drivers/staging/rdma/ipath/TODO b/drivers/staging/rdma/ipath/TODO
deleted file mode 100644
index cb00158d64c8..000000000000
--- a/drivers/staging/rdma/ipath/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
1The ipath driver has been moved to staging in preparation for its removal in a
2few releases. The driver will be deleted during the 4.6 merge window.
3
4Contact Dennis Dalessandro <dennis.dalessandro@intel.com> and
5Cc: linux-rdma@vger.kernel.org
diff --git a/drivers/staging/rdma/ipath/ipath_common.h b/drivers/staging/rdma/ipath/ipath_common.h
deleted file mode 100644
index 28cfe97cf1e9..000000000000
--- a/drivers/staging/rdma/ipath/ipath_common.h
+++ /dev/null
@@ -1,851 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef _IPATH_COMMON_H
35#define _IPATH_COMMON_H
36
37/*
38 * This file contains defines, structures, etc. that are used
39 * to communicate between kernel and user code.
40 */
41
42
43/* This is the IEEE-assigned OUI for QLogic Inc. InfiniPath */
44#define IPATH_SRC_OUI_1 0x00
45#define IPATH_SRC_OUI_2 0x11
46#define IPATH_SRC_OUI_3 0x75
47
48/* version of protocol header (known to chip also). In the long run,
49 * we should be able to generate and accept a range of version numbers;
50 * for now we only accept one, and it's compiled in.
51 */
52#define IPS_PROTO_VERSION 2
53
54/*
55 * These are compile time constants that you may want to enable or disable
56 * if you are trying to debug problems with code or performance.
57 * IPATH_VERBOSE_TRACING define as 1 if you want additional tracing in
58 * fastpath code
59 * IPATH_TRACE_REGWRITES define as 1 if you want register writes to be
60 * traced in faspath code
61 * _IPATH_TRACING define as 0 if you want to remove all tracing in a
62 * compilation unit
63 * _IPATH_DEBUGGING define as 0 if you want to remove debug prints
64 */
65
66/*
67 * The value in the BTH QP field that InfiniPath uses to differentiate
68 * an infinipath protocol IB packet vs standard IB transport
69 */
70#define IPATH_KD_QP 0x656b79
71
72/*
73 * valid states passed to ipath_set_linkstate() user call
74 */
75#define IPATH_IB_LINKDOWN 0
76#define IPATH_IB_LINKARM 1
77#define IPATH_IB_LINKACTIVE 2
78#define IPATH_IB_LINKDOWN_ONLY 3
79#define IPATH_IB_LINKDOWN_SLEEP 4
80#define IPATH_IB_LINKDOWN_DISABLE 5
81#define IPATH_IB_LINK_LOOPBACK 6 /* enable local loopback */
82#define IPATH_IB_LINK_EXTERNAL 7 /* normal, disable local loopback */
83#define IPATH_IB_LINK_NO_HRTBT 8 /* disable Heartbeat, e.g. for loopback */
84#define IPATH_IB_LINK_HRTBT 9 /* enable heartbeat, normal, non-loopback */
85
86/*
87 * These 3 values (SDR and DDR may be ORed for auto-speed
88 * negotiation) are used for the 3rd argument to path_f_set_ib_cfg
89 * with cmd IPATH_IB_CFG_SPD_ENB, by direct calls or via sysfs. They
90 * are also the the possible values for ipath_link_speed_enabled and active
91 * The values were chosen to match values used within the IB spec.
92 */
93#define IPATH_IB_SDR 1
94#define IPATH_IB_DDR 2
95
96/*
97 * stats maintained by the driver. For now, at least, this is global
98 * to all minor devices.
99 */
100struct infinipath_stats {
101 /* number of interrupts taken */
102 __u64 sps_ints;
103 /* number of interrupts for errors */
104 __u64 sps_errints;
105 /* number of errors from chip (not incl. packet errors or CRC) */
106 __u64 sps_errs;
107 /* number of packet errors from chip other than CRC */
108 __u64 sps_pkterrs;
109 /* number of packets with CRC errors (ICRC and VCRC) */
110 __u64 sps_crcerrs;
111 /* number of hardware errors reported (parity, etc.) */
112 __u64 sps_hwerrs;
113 /* number of times IB link changed state unexpectedly */
114 __u64 sps_iblink;
115 __u64 sps_unused; /* was fastrcvint, no longer implemented */
116 /* number of kernel (port0) packets received */
117 __u64 sps_port0pkts;
118 /* number of "ethernet" packets sent by driver */
119 __u64 sps_ether_spkts;
120 /* number of "ethernet" packets received by driver */
121 __u64 sps_ether_rpkts;
122 /* number of SMA packets sent by driver. Obsolete. */
123 __u64 sps_sma_spkts;
124 /* number of SMA packets received by driver. Obsolete. */
125 __u64 sps_sma_rpkts;
126 /* number of times all ports rcvhdrq was full and packet dropped */
127 __u64 sps_hdrqfull;
128 /* number of times all ports egrtid was full and packet dropped */
129 __u64 sps_etidfull;
130 /*
131 * number of times we tried to send from driver, but no pio buffers
132 * avail
133 */
134 __u64 sps_nopiobufs;
135 /* number of ports currently open */
136 __u64 sps_ports;
137 /* list of pkeys (other than default) accepted (0 means not set) */
138 __u16 sps_pkeys[4];
139 __u16 sps_unused16[4]; /* available; maintaining compatible layout */
140 /* number of user ports per chip (not IB ports) */
141 __u32 sps_nports;
142 /* not our interrupt, or already handled */
143 __u32 sps_nullintr;
144 /* max number of packets handled per receive call */
145 __u32 sps_maxpkts_call;
146 /* avg number of packets handled per receive call */
147 __u32 sps_avgpkts_call;
148 /* total number of pages locked */
149 __u64 sps_pagelocks;
150 /* total number of pages unlocked */
151 __u64 sps_pageunlocks;
152 /*
153 * Number of packets dropped in kernel other than errors (ether
154 * packets if ipath not configured, etc.)
155 */
156 __u64 sps_krdrops;
157 __u64 sps_txeparity; /* PIO buffer parity error, recovered */
158 /* pad for future growth */
159 __u64 __sps_pad[45];
160};
161
162/*
163 * These are the status bits readable (in ascii form, 64bit value)
164 * from the "status" sysfs file.
165 */
166#define IPATH_STATUS_INITTED 0x1 /* basic initialization done */
167#define IPATH_STATUS_DISABLED 0x2 /* hardware disabled */
168/* Device has been disabled via admin request */
169#define IPATH_STATUS_ADMIN_DISABLED 0x4
170/* Chip has been found and initted */
171#define IPATH_STATUS_CHIP_PRESENT 0x20
172/* IB link is at ACTIVE, usable for data traffic */
173#define IPATH_STATUS_IB_READY 0x40
174/* link is configured, LID, MTU, etc. have been set */
175#define IPATH_STATUS_IB_CONF 0x80
176/* no link established, probably no cable */
177#define IPATH_STATUS_IB_NOCABLE 0x100
178/* A Fatal hardware error has occurred. */
179#define IPATH_STATUS_HWERROR 0x200
180
181/*
182 * The list of usermode accessible registers. Also see Reg_* later in file.
183 */
184typedef enum _ipath_ureg {
185 /* (RO) DMA RcvHdr to be used next. */
186 ur_rcvhdrtail = 0,
187 /* (RW) RcvHdr entry to be processed next by host. */
188 ur_rcvhdrhead = 1,
189 /* (RO) Index of next Eager index to use. */
190 ur_rcvegrindextail = 2,
191 /* (RW) Eager TID to be processed next */
192 ur_rcvegrindexhead = 3,
193 /* For internal use only; max register number. */
194 _IPATH_UregMax
195} ipath_ureg;
196
197/* bit values for spi_runtime_flags */
198#define IPATH_RUNTIME_HT 0x1
199#define IPATH_RUNTIME_PCIE 0x2
200#define IPATH_RUNTIME_FORCE_WC_ORDER 0x4
201#define IPATH_RUNTIME_RCVHDR_COPY 0x8
202#define IPATH_RUNTIME_MASTER 0x10
203#define IPATH_RUNTIME_NODMA_RTAIL 0x80
204#define IPATH_RUNTIME_SDMA 0x200
205#define IPATH_RUNTIME_FORCE_PIOAVAIL 0x400
206#define IPATH_RUNTIME_PIO_REGSWAPPED 0x800
207
208/*
209 * This structure is returned by ipath_userinit() immediately after
210 * open to get implementation-specific info, and info specific to this
211 * instance.
212 *
213 * This struct must have explict pad fields where type sizes
214 * may result in different alignments between 32 and 64 bit
215 * programs, since the 64 bit * bit kernel requires the user code
216 * to have matching offsets
217 */
218struct ipath_base_info {
219 /* version of hardware, for feature checking. */
220 __u32 spi_hw_version;
221 /* version of software, for feature checking. */
222 __u32 spi_sw_version;
223 /* InfiniPath port assigned, goes into sent packets */
224 __u16 spi_port;
225 __u16 spi_subport;
226 /*
227 * IB MTU, packets IB data must be less than this.
228 * The MTU is in bytes, and will be a multiple of 4 bytes.
229 */
230 __u32 spi_mtu;
231 /*
232 * Size of a PIO buffer. Any given packet's total size must be less
233 * than this (in words). Included is the starting control word, so
234 * if 513 is returned, then total pkt size is 512 words or less.
235 */
236 __u32 spi_piosize;
237 /* size of the TID cache in infinipath, in entries */
238 __u32 spi_tidcnt;
239 /* size of the TID Eager list in infinipath, in entries */
240 __u32 spi_tidegrcnt;
241 /* size of a single receive header queue entry in words. */
242 __u32 spi_rcvhdrent_size;
243 /*
244 * Count of receive header queue entries allocated.
245 * This may be less than the spu_rcvhdrcnt passed in!.
246 */
247 __u32 spi_rcvhdr_cnt;
248
249 /* per-chip and other runtime features bitmap (IPATH_RUNTIME_*) */
250 __u32 spi_runtime_flags;
251
252 /* address where receive buffer queue is mapped into */
253 __u64 spi_rcvhdr_base;
254
255 /* user program. */
256
257 /* base address of eager TID receive buffers. */
258 __u64 spi_rcv_egrbufs;
259
260 /* Allocated by initialization code, not by protocol. */
261
262 /*
263 * Size of each TID buffer in host memory, starting at
264 * spi_rcv_egrbufs. The buffers are virtually contiguous.
265 */
266 __u32 spi_rcv_egrbufsize;
267 /*
268 * The special QP (queue pair) value that identifies an infinipath
269 * protocol packet from standard IB packets. More, probably much
270 * more, to be added.
271 */
272 __u32 spi_qpair;
273
274 /*
275 * User register base for init code, not to be used directly by
276 * protocol or applications.
277 */
278 __u64 __spi_uregbase;
279 /*
280 * Maximum buffer size in bytes that can be used in a single TID
281 * entry (assuming the buffer is aligned to this boundary). This is
282 * the minimum of what the hardware and software support Guaranteed
283 * to be a power of 2.
284 */
285 __u32 spi_tid_maxsize;
286 /*
287 * alignment of each pio send buffer (byte count
288 * to add to spi_piobufbase to get to second buffer)
289 */
290 __u32 spi_pioalign;
291 /*
292 * The index of the first pio buffer available to this process;
293 * needed to do lookup in spi_pioavailaddr; not added to
294 * spi_piobufbase.
295 */
296 __u32 spi_pioindex;
297 /* number of buffers mapped for this process */
298 __u32 spi_piocnt;
299
300 /*
301 * Base address of writeonly pio buffers for this process.
302 * Each buffer has spi_piosize words, and is aligned on spi_pioalign
303 * boundaries. spi_piocnt buffers are mapped from this address
304 */
305 __u64 spi_piobufbase;
306
307 /*
308 * Base address of readonly memory copy of the pioavail registers.
309 * There are 2 bits for each buffer.
310 */
311 __u64 spi_pioavailaddr;
312
313 /*
314 * Address where driver updates a copy of the interface and driver
315 * status (IPATH_STATUS_*) as a 64 bit value. It's followed by a
316 * string indicating hardware error, if there was one.
317 */
318 __u64 spi_status;
319
320 /* number of chip ports available to user processes */
321 __u32 spi_nports;
322 /* unit number of chip we are using */
323 __u32 spi_unit;
324 /* num bufs in each contiguous set */
325 __u32 spi_rcv_egrperchunk;
326 /* size in bytes of each contiguous set */
327 __u32 spi_rcv_egrchunksize;
328 /* total size of mmap to cover full rcvegrbuffers */
329 __u32 spi_rcv_egrbuftotlen;
330 __u32 spi_filler_for_align;
331 /* address of readonly memory copy of the rcvhdrq tail register. */
332 __u64 spi_rcvhdr_tailaddr;
333
334 /* shared memory pages for subports if port is shared */
335 __u64 spi_subport_uregbase;
336 __u64 spi_subport_rcvegrbuf;
337 __u64 spi_subport_rcvhdr_base;
338
339 /* shared memory page for hardware port if it is shared */
340 __u64 spi_port_uregbase;
341 __u64 spi_port_rcvegrbuf;
342 __u64 spi_port_rcvhdr_base;
343 __u64 spi_port_rcvhdr_tailaddr;
344
345} __attribute__ ((aligned(8)));
346
347
348/*
349 * This version number is given to the driver by the user code during
350 * initialization in the spu_userversion field of ipath_user_info, so
351 * the driver can check for compatibility with user code.
352 *
353 * The major version changes when data structures
354 * change in an incompatible way. The driver must be the same or higher
355 * for initialization to succeed. In some cases, a higher version
356 * driver will not interoperate with older software, and initialization
357 * will return an error.
358 */
359#define IPATH_USER_SWMAJOR 1
360
361/*
362 * Minor version differences are always compatible
363 * a within a major version, however if user software is larger
364 * than driver software, some new features and/or structure fields
365 * may not be implemented; the user code must deal with this if it
366 * cares, or it must abort after initialization reports the difference.
367 */
368#define IPATH_USER_SWMINOR 6
369
370#define IPATH_USER_SWVERSION ((IPATH_USER_SWMAJOR<<16) | IPATH_USER_SWMINOR)
371
372#define IPATH_KERN_TYPE 0
373
374/*
375 * Similarly, this is the kernel version going back to the user. It's
376 * slightly different, in that we want to tell if the driver was built as
377 * part of a QLogic release, or from the driver from openfabrics.org,
378 * kernel.org, or a standard distribution, for support reasons.
379 * The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied.
380 *
381 * It's returned by the driver to the user code during initialization in the
382 * spi_sw_version field of ipath_base_info, so the user code can in turn
383 * check for compatibility with the kernel.
384*/
385#define IPATH_KERN_SWVERSION ((IPATH_KERN_TYPE<<31) | IPATH_USER_SWVERSION)
386
387/*
388 * This structure is passed to ipath_userinit() to tell the driver where
389 * user code buffers are, sizes, etc. The offsets and sizes of the
390 * fields must remain unchanged, for binary compatibility. It can
391 * be extended, if userversion is changed so user code can tell, if needed
392 */
393struct ipath_user_info {
394 /*
395 * version of user software, to detect compatibility issues.
396 * Should be set to IPATH_USER_SWVERSION.
397 */
398 __u32 spu_userversion;
399
400 /* desired number of receive header queue entries */
401 __u32 spu_rcvhdrcnt;
402
403 /* size of struct base_info to write to */
404 __u32 spu_base_info_size;
405
406 /*
407 * number of words in KD protocol header
408 * This tells InfiniPath how many words to copy to rcvhdrq. If 0,
409 * kernel uses a default. Once set, attempts to set any other value
410 * are an error (EAGAIN) until driver is reloaded.
411 */
412 __u32 spu_rcvhdrsize;
413
414 /*
415 * If two or more processes wish to share a port, each process
416 * must set the spu_subport_cnt and spu_subport_id to the same
417 * values. The only restriction on the spu_subport_id is that
418 * it be unique for a given node.
419 */
420 __u16 spu_subport_cnt;
421 __u16 spu_subport_id;
422
423 __u32 spu_unused; /* kept for compatible layout */
424
425 /*
426 * address of struct base_info to write to
427 */
428 __u64 spu_base_info;
429
430} __attribute__ ((aligned(8)));
431
432/* User commands. */
433
434#define IPATH_CMD_MIN 16
435
436#define __IPATH_CMD_USER_INIT 16 /* old set up userspace (for old user code) */
437#define IPATH_CMD_PORT_INFO 17 /* find out what resources we got */
438#define IPATH_CMD_RECV_CTRL 18 /* control receipt of packets */
439#define IPATH_CMD_TID_UPDATE 19 /* update expected TID entries */
440#define IPATH_CMD_TID_FREE 20 /* free expected TID entries */
441#define IPATH_CMD_SET_PART_KEY 21 /* add partition key */
442#define __IPATH_CMD_SLAVE_INFO 22 /* return info on slave processes (for old user code) */
443#define IPATH_CMD_ASSIGN_PORT 23 /* allocate HCA and port */
444#define IPATH_CMD_USER_INIT 24 /* set up userspace */
445#define IPATH_CMD_UNUSED_1 25
446#define IPATH_CMD_UNUSED_2 26
447#define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
448#define IPATH_CMD_POLL_TYPE 28 /* set the kind of polling we want */
449#define IPATH_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */
450/* 30 is unused */
451#define IPATH_CMD_SDMA_INFLIGHT 31 /* sdma inflight counter request */
452#define IPATH_CMD_SDMA_COMPLETE 32 /* sdma completion counter request */
453
454/*
455 * Poll types
456 */
457#define IPATH_POLL_TYPE_URGENT 0x01
458#define IPATH_POLL_TYPE_OVERFLOW 0x02
459
460struct ipath_port_info {
461 __u32 num_active; /* number of active units */
462 __u32 unit; /* unit (chip) assigned to caller */
463 __u16 port; /* port on unit assigned to caller */
464 __u16 subport; /* subport on unit assigned to caller */
465 __u16 num_ports; /* number of ports available on unit */
466 __u16 num_subports; /* number of subports opened on port */
467};
468
469struct ipath_tid_info {
470 __u32 tidcnt;
471 /* make structure same size in 32 and 64 bit */
472 __u32 tid__unused;
473 /* virtual address of first page in transfer */
474 __u64 tidvaddr;
475 /* pointer (same size 32/64 bit) to __u16 tid array */
476 __u64 tidlist;
477
478 /*
479 * pointer (same size 32/64 bit) to bitmap of TIDs used
480 * for this call; checked for being large enough at open
481 */
482 __u64 tidmap;
483};
484
485struct ipath_cmd {
486 __u32 type; /* command type */
487 union {
488 struct ipath_tid_info tid_info;
489 struct ipath_user_info user_info;
490
491 /*
492 * address in userspace where we should put the sdma
493 * inflight counter
494 */
495 __u64 sdma_inflight;
496 /*
497 * address in userspace where we should put the sdma
498 * completion counter
499 */
500 __u64 sdma_complete;
501 /* address in userspace of struct ipath_port_info to
502 write result to */
503 __u64 port_info;
504 /* enable/disable receipt of packets */
505 __u32 recv_ctrl;
506 /* enable/disable armlaunch errors (non-zero to enable) */
507 __u32 armlaunch_ctrl;
508 /* partition key to set */
509 __u16 part_key;
510 /* user address of __u32 bitmask of active slaves */
511 __u64 slave_mask_addr;
512 /* type of polling we want */
513 __u16 poll_type;
514 } cmd;
515};
516
517struct ipath_iovec {
518 /* Pointer to data, but same size 32 and 64 bit */
519 __u64 iov_base;
520
521 /*
522 * Length of data; don't need 64 bits, but want
523 * ipath_sendpkt to remain same size as before 32 bit changes, so...
524 */
525 __u64 iov_len;
526};
527
528/*
529 * Describes a single packet for send. Each packet can have one or more
530 * buffers, but the total length (exclusive of IB headers) must be less
531 * than the MTU, and if using the PIO method, entire packet length,
532 * including IB headers, must be less than the ipath_piosize value (words).
533 * Use of this necessitates including sys/uio.h
534 */
535struct __ipath_sendpkt {
536 __u32 sps_flags; /* flags for packet (TBD) */
537 __u32 sps_cnt; /* number of entries to use in sps_iov */
538 /* array of iov's describing packet. TEMPORARY */
539 struct ipath_iovec sps_iov[4];
540};
541
542/*
543 * diagnostics can send a packet by "writing" one of the following
544 * two structs to diag data special file
545 * The first is the legacy version for backward compatibility
546 */
547struct ipath_diag_pkt {
548 __u32 unit;
549 __u64 data;
550 __u32 len;
551};
552
553/* The second diag_pkt struct is the expanded version that allows
554 * more control over the packet, specifically, by allowing a custom
555 * pbc (+ static rate) qword, so that special modes and deliberate
556 * changes to CRCs can be used. The elements were also re-ordered
557 * for better alignment and to avoid padding issues.
558 */
559struct ipath_diag_xpkt {
560 __u64 data;
561 __u64 pbc_wd;
562 __u32 unit;
563 __u32 len;
564};
565
566/*
567 * Data layout in I2C flash (for GUID, etc.)
568 * All fields are little-endian binary unless otherwise stated
569 */
570#define IPATH_FLASH_VERSION 2
571struct ipath_flash {
572 /* flash layout version (IPATH_FLASH_VERSION) */
573 __u8 if_fversion;
574 /* checksum protecting if_length bytes */
575 __u8 if_csum;
576 /*
577 * valid length (in use, protected by if_csum), including
578 * if_fversion and if_csum themselves)
579 */
580 __u8 if_length;
581 /* the GUID, in network order */
582 __u8 if_guid[8];
583 /* number of GUIDs to use, starting from if_guid */
584 __u8 if_numguid;
585 /* the (last 10 characters of) board serial number, in ASCII */
586 char if_serial[12];
587 /* board mfg date (YYYYMMDD ASCII) */
588 char if_mfgdate[8];
589 /* last board rework/test date (YYYYMMDD ASCII) */
590 char if_testdate[8];
591 /* logging of error counts, TBD */
592 __u8 if_errcntp[4];
593 /* powered on hours, updated at driver unload */
594 __u8 if_powerhour[2];
595 /* ASCII free-form comment field */
596 char if_comment[32];
597 /* Backwards compatible prefix for longer QLogic Serial Numbers */
598 char if_sprefix[4];
599 /* 82 bytes used, min flash size is 128 bytes */
600 __u8 if_future[46];
601};
602
603/*
604 * These are the counters implemented in the chip, and are listed in order.
605 * The InterCaps naming is taken straight from the chip spec.
606 */
607struct infinipath_counters {
608 __u64 LBIntCnt;
609 __u64 LBFlowStallCnt;
610 __u64 TxSDmaDescCnt; /* was Reserved1 */
611 __u64 TxUnsupVLErrCnt;
612 __u64 TxDataPktCnt;
613 __u64 TxFlowPktCnt;
614 __u64 TxDwordCnt;
615 __u64 TxLenErrCnt;
616 __u64 TxMaxMinLenErrCnt;
617 __u64 TxUnderrunCnt;
618 __u64 TxFlowStallCnt;
619 __u64 TxDroppedPktCnt;
620 __u64 RxDroppedPktCnt;
621 __u64 RxDataPktCnt;
622 __u64 RxFlowPktCnt;
623 __u64 RxDwordCnt;
624 __u64 RxLenErrCnt;
625 __u64 RxMaxMinLenErrCnt;
626 __u64 RxICRCErrCnt;
627 __u64 RxVCRCErrCnt;
628 __u64 RxFlowCtrlErrCnt;
629 __u64 RxBadFormatCnt;
630 __u64 RxLinkProblemCnt;
631 __u64 RxEBPCnt;
632 __u64 RxLPCRCErrCnt;
633 __u64 RxBufOvflCnt;
634 __u64 RxTIDFullErrCnt;
635 __u64 RxTIDValidErrCnt;
636 __u64 RxPKeyMismatchCnt;
637 __u64 RxP0HdrEgrOvflCnt;
638 __u64 RxP1HdrEgrOvflCnt;
639 __u64 RxP2HdrEgrOvflCnt;
640 __u64 RxP3HdrEgrOvflCnt;
641 __u64 RxP4HdrEgrOvflCnt;
642 __u64 RxP5HdrEgrOvflCnt;
643 __u64 RxP6HdrEgrOvflCnt;
644 __u64 RxP7HdrEgrOvflCnt;
645 __u64 RxP8HdrEgrOvflCnt;
646 __u64 RxP9HdrEgrOvflCnt; /* was Reserved6 */
647 __u64 RxP10HdrEgrOvflCnt; /* was Reserved7 */
648 __u64 RxP11HdrEgrOvflCnt; /* new for IBA7220 */
649 __u64 RxP12HdrEgrOvflCnt; /* new for IBA7220 */
650 __u64 RxP13HdrEgrOvflCnt; /* new for IBA7220 */
651 __u64 RxP14HdrEgrOvflCnt; /* new for IBA7220 */
652 __u64 RxP15HdrEgrOvflCnt; /* new for IBA7220 */
653 __u64 RxP16HdrEgrOvflCnt; /* new for IBA7220 */
654 __u64 IBStatusChangeCnt;
655 __u64 IBLinkErrRecoveryCnt;
656 __u64 IBLinkDownedCnt;
657 __u64 IBSymbolErrCnt;
658 /* The following are new for IBA7220 */
659 __u64 RxVL15DroppedPktCnt;
660 __u64 RxOtherLocalPhyErrCnt;
661 __u64 PcieRetryBufDiagQwordCnt;
662 __u64 ExcessBufferOvflCnt;
663 __u64 LocalLinkIntegrityErrCnt;
664 __u64 RxVlErrCnt;
665 __u64 RxDlidFltrCnt;
666};
667
668/*
669 * The next set of defines are for packet headers, and chip register
670 * and memory bits that are visible to and/or used by user-mode software
671 * The other bits that are used only by the driver or diags are in
672 * ipath_registers.h
673 */
674
675/* RcvHdrFlags bits */
676#define INFINIPATH_RHF_LENGTH_MASK 0x7FF
677#define INFINIPATH_RHF_LENGTH_SHIFT 0
678#define INFINIPATH_RHF_RCVTYPE_MASK 0x7
679#define INFINIPATH_RHF_RCVTYPE_SHIFT 11
680#define INFINIPATH_RHF_EGRINDEX_MASK 0xFFF
681#define INFINIPATH_RHF_EGRINDEX_SHIFT 16
682#define INFINIPATH_RHF_SEQ_MASK 0xF
683#define INFINIPATH_RHF_SEQ_SHIFT 0
684#define INFINIPATH_RHF_HDRQ_OFFSET_MASK 0x7FF
685#define INFINIPATH_RHF_HDRQ_OFFSET_SHIFT 4
686#define INFINIPATH_RHF_H_ICRCERR 0x80000000
687#define INFINIPATH_RHF_H_VCRCERR 0x40000000
688#define INFINIPATH_RHF_H_PARITYERR 0x20000000
689#define INFINIPATH_RHF_H_LENERR 0x10000000
690#define INFINIPATH_RHF_H_MTUERR 0x08000000
691#define INFINIPATH_RHF_H_IHDRERR 0x04000000
692#define INFINIPATH_RHF_H_TIDERR 0x02000000
693#define INFINIPATH_RHF_H_MKERR 0x01000000
694#define INFINIPATH_RHF_H_IBERR 0x00800000
695#define INFINIPATH_RHF_H_ERR_MASK 0xFF800000
696#define INFINIPATH_RHF_L_USE_EGR 0x80000000
697#define INFINIPATH_RHF_L_SWA 0x00008000
698#define INFINIPATH_RHF_L_SWB 0x00004000
699
700/* infinipath header fields */
701#define INFINIPATH_I_VERS_MASK 0xF
702#define INFINIPATH_I_VERS_SHIFT 28
703#define INFINIPATH_I_PORT_MASK 0xF
704#define INFINIPATH_I_PORT_SHIFT 24
705#define INFINIPATH_I_TID_MASK 0x7FF
706#define INFINIPATH_I_TID_SHIFT 13
707#define INFINIPATH_I_OFFSET_MASK 0x1FFF
708#define INFINIPATH_I_OFFSET_SHIFT 0
709
710/* K_PktFlags bits */
711#define INFINIPATH_KPF_INTR 0x1
712#define INFINIPATH_KPF_SUBPORT_MASK 0x3
713#define INFINIPATH_KPF_SUBPORT_SHIFT 1
714
715#define INFINIPATH_MAX_SUBPORT 4
716
717/* SendPIO per-buffer control */
718#define INFINIPATH_SP_TEST 0x40
719#define INFINIPATH_SP_TESTEBP 0x20
720#define INFINIPATH_SP_TRIGGER_SHIFT 15
721
722/* SendPIOAvail bits */
723#define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1
724#define INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT 0
725
726/* infinipath header format */
727struct ipath_header {
728 /*
729 * Version - 4 bits, Port - 4 bits, TID - 10 bits and Offset -
730 * 14 bits before ECO change ~28 Dec 03. After that, Vers 4,
731 * Port 4, TID 11, offset 13.
732 */
733 __le32 ver_port_tid_offset;
734 __le16 chksum;
735 __le16 pkt_flags;
736};
737
738/* infinipath user message header format.
739 * This structure contains the first 4 fields common to all protocols
740 * that employ infinipath.
741 */
742struct ipath_message_header {
743 __be16 lrh[4];
744 __be32 bth[3];
745 /* fields below this point are in host byte order */
746 struct ipath_header iph;
747 __u8 sub_opcode;
748};
749
750/* infinipath ethernet header format */
751struct ether_header {
752 __be16 lrh[4];
753 __be32 bth[3];
754 struct ipath_header iph;
755 __u8 sub_opcode;
756 __u8 cmd;
757 __be16 lid;
758 __u16 mac[3];
759 __u8 frag_num;
760 __u8 seq_num;
761 __le32 len;
762 /* MUST be of word size due to PIO write requirements */
763 __le32 csum;
764 __le16 csum_offset;
765 __le16 flags;
766 __u16 first_2_bytes;
767 __u8 unused[2]; /* currently unused */
768};
769
770
771/* IB - LRH header consts */
772#define IPATH_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
773#define IPATH_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
774
775/* misc. */
776#define SIZE_OF_CRC 1
777
778#define IPATH_DEFAULT_P_KEY 0xFFFF
779#define IPATH_PERMISSIVE_LID 0xFFFF
780#define IPATH_AETH_CREDIT_SHIFT 24
781#define IPATH_AETH_CREDIT_MASK 0x1F
782#define IPATH_AETH_CREDIT_INVAL 0x1F
783#define IPATH_PSN_MASK 0xFFFFFF
784#define IPATH_MSN_MASK 0xFFFFFF
785#define IPATH_QPN_MASK 0xFFFFFF
786#define IPATH_MULTICAST_LID_BASE 0xC000
787#define IPATH_EAGER_TID_ID INFINIPATH_I_TID_MASK
788#define IPATH_MULTICAST_QPN 0xFFFFFF
789
790/* Receive Header Queue: receive type (from infinipath) */
791#define RCVHQ_RCV_TYPE_EXPECTED 0
792#define RCVHQ_RCV_TYPE_EAGER 1
793#define RCVHQ_RCV_TYPE_NON_KD 2
794#define RCVHQ_RCV_TYPE_ERROR 3
795
796
797/* sub OpCodes - ith4x */
798#define IPATH_ITH4X_OPCODE_ENCAP 0x81
799#define IPATH_ITH4X_OPCODE_LID_ARP 0x82
800
801#define IPATH_HEADER_QUEUE_WORDS 9
802
803/* functions for extracting fields from rcvhdrq entries for the driver.
804 */
805static inline __u32 ipath_hdrget_err_flags(const __le32 * rbuf)
806{
807 return __le32_to_cpu(rbuf[1]) & INFINIPATH_RHF_H_ERR_MASK;
808}
809
810static inline __u32 ipath_hdrget_rcv_type(const __le32 * rbuf)
811{
812 return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_RCVTYPE_SHIFT)
813 & INFINIPATH_RHF_RCVTYPE_MASK;
814}
815
816static inline __u32 ipath_hdrget_length_in_bytes(const __le32 * rbuf)
817{
818 return ((__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_LENGTH_SHIFT)
819 & INFINIPATH_RHF_LENGTH_MASK) << 2;
820}
821
822static inline __u32 ipath_hdrget_index(const __le32 * rbuf)
823{
824 return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_EGRINDEX_SHIFT)
825 & INFINIPATH_RHF_EGRINDEX_MASK;
826}
827
828static inline __u32 ipath_hdrget_seq(const __le32 *rbuf)
829{
830 return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_SEQ_SHIFT)
831 & INFINIPATH_RHF_SEQ_MASK;
832}
833
834static inline __u32 ipath_hdrget_offset(const __le32 *rbuf)
835{
836 return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_HDRQ_OFFSET_SHIFT)
837 & INFINIPATH_RHF_HDRQ_OFFSET_MASK;
838}
839
840static inline __u32 ipath_hdrget_use_egr_buf(const __le32 *rbuf)
841{
842 return __le32_to_cpu(rbuf[0]) & INFINIPATH_RHF_L_USE_EGR;
843}
844
845static inline __u32 ipath_hdrget_ipath_ver(__le32 hdrword)
846{
847 return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT)
848 & INFINIPATH_I_VERS_MASK;
849}
850
851#endif /* _IPATH_COMMON_H */
diff --git a/drivers/staging/rdma/ipath/ipath_cq.c b/drivers/staging/rdma/ipath/ipath_cq.c
deleted file mode 100644
index e9dd9112e718..000000000000
--- a/drivers/staging/rdma/ipath/ipath_cq.c
+++ /dev/null
@@ -1,483 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/err.h>
35#include <linux/slab.h>
36#include <linux/vmalloc.h>
37
38#include "ipath_verbs.h"
39
40/**
41 * ipath_cq_enter - add a new entry to the completion queue
42 * @cq: completion queue
43 * @entry: work completion entry to add
44 * @sig: true if @entry is a solicitated entry
45 *
46 * This may be called with qp->s_lock held.
47 */
48void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
49{
50 struct ipath_cq_wc *wc;
51 unsigned long flags;
52 u32 head;
53 u32 next;
54
55 spin_lock_irqsave(&cq->lock, flags);
56
57 /*
58 * Note that the head pointer might be writable by user processes.
59 * Take care to verify it is a sane value.
60 */
61 wc = cq->queue;
62 head = wc->head;
63 if (head >= (unsigned) cq->ibcq.cqe) {
64 head = cq->ibcq.cqe;
65 next = 0;
66 } else
67 next = head + 1;
68 if (unlikely(next == wc->tail)) {
69 spin_unlock_irqrestore(&cq->lock, flags);
70 if (cq->ibcq.event_handler) {
71 struct ib_event ev;
72
73 ev.device = cq->ibcq.device;
74 ev.element.cq = &cq->ibcq;
75 ev.event = IB_EVENT_CQ_ERR;
76 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
77 }
78 return;
79 }
80 if (cq->ip) {
81 wc->uqueue[head].wr_id = entry->wr_id;
82 wc->uqueue[head].status = entry->status;
83 wc->uqueue[head].opcode = entry->opcode;
84 wc->uqueue[head].vendor_err = entry->vendor_err;
85 wc->uqueue[head].byte_len = entry->byte_len;
86 wc->uqueue[head].ex.imm_data = (__u32 __force) entry->ex.imm_data;
87 wc->uqueue[head].qp_num = entry->qp->qp_num;
88 wc->uqueue[head].src_qp = entry->src_qp;
89 wc->uqueue[head].wc_flags = entry->wc_flags;
90 wc->uqueue[head].pkey_index = entry->pkey_index;
91 wc->uqueue[head].slid = entry->slid;
92 wc->uqueue[head].sl = entry->sl;
93 wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
94 wc->uqueue[head].port_num = entry->port_num;
95 /* Make sure entry is written before the head index. */
96 smp_wmb();
97 } else
98 wc->kqueue[head] = *entry;
99 wc->head = next;
100
101 if (cq->notify == IB_CQ_NEXT_COMP ||
102 (cq->notify == IB_CQ_SOLICITED && solicited)) {
103 cq->notify = IB_CQ_NONE;
104 cq->triggered++;
105 /*
106 * This will cause send_complete() to be called in
107 * another thread.
108 */
109 tasklet_hi_schedule(&cq->comptask);
110 }
111
112 spin_unlock_irqrestore(&cq->lock, flags);
113
114 if (entry->status != IB_WC_SUCCESS)
115 to_idev(cq->ibcq.device)->n_wqe_errs++;
116}
117
118/**
119 * ipath_poll_cq - poll for work completion entries
120 * @ibcq: the completion queue to poll
121 * @num_entries: the maximum number of entries to return
122 * @entry: pointer to array where work completions are placed
123 *
124 * Returns the number of completion entries polled.
125 *
126 * This may be called from interrupt context. Also called by ib_poll_cq()
127 * in the generic verbs code.
128 */
129int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
130{
131 struct ipath_cq *cq = to_icq(ibcq);
132 struct ipath_cq_wc *wc;
133 unsigned long flags;
134 int npolled;
135 u32 tail;
136
137 /* The kernel can only poll a kernel completion queue */
138 if (cq->ip) {
139 npolled = -EINVAL;
140 goto bail;
141 }
142
143 spin_lock_irqsave(&cq->lock, flags);
144
145 wc = cq->queue;
146 tail = wc->tail;
147 if (tail > (u32) cq->ibcq.cqe)
148 tail = (u32) cq->ibcq.cqe;
149 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
150 if (tail == wc->head)
151 break;
152 /* The kernel doesn't need a RMB since it has the lock. */
153 *entry = wc->kqueue[tail];
154 if (tail >= cq->ibcq.cqe)
155 tail = 0;
156 else
157 tail++;
158 }
159 wc->tail = tail;
160
161 spin_unlock_irqrestore(&cq->lock, flags);
162
163bail:
164 return npolled;
165}
166
167static void send_complete(unsigned long data)
168{
169 struct ipath_cq *cq = (struct ipath_cq *)data;
170
171 /*
172 * The completion handler will most likely rearm the notification
173 * and poll for all pending entries. If a new completion entry
174 * is added while we are in this routine, tasklet_hi_schedule()
175 * won't call us again until we return so we check triggered to
176 * see if we need to call the handler again.
177 */
178 for (;;) {
179 u8 triggered = cq->triggered;
180
181 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
182
183 if (cq->triggered == triggered)
184 return;
185 }
186}
187
188/**
189 * ipath_create_cq - create a completion queue
190 * @ibdev: the device this completion queue is attached to
191 * @attr: creation attributes
192 * @context: unused by the InfiniPath driver
193 * @udata: unused by the InfiniPath driver
194 *
195 * Returns a pointer to the completion queue or negative errno values
196 * for failure.
197 *
198 * Called by ib_create_cq() in the generic verbs code.
199 */
200struct ib_cq *ipath_create_cq(struct ib_device *ibdev,
201 const struct ib_cq_init_attr *attr,
202 struct ib_ucontext *context,
203 struct ib_udata *udata)
204{
205 int entries = attr->cqe;
206 struct ipath_ibdev *dev = to_idev(ibdev);
207 struct ipath_cq *cq;
208 struct ipath_cq_wc *wc;
209 struct ib_cq *ret;
210 u32 sz;
211
212 if (attr->flags)
213 return ERR_PTR(-EINVAL);
214
215 if (entries < 1 || entries > ib_ipath_max_cqes) {
216 ret = ERR_PTR(-EINVAL);
217 goto done;
218 }
219
220 /* Allocate the completion queue structure. */
221 cq = kmalloc(sizeof(*cq), GFP_KERNEL);
222 if (!cq) {
223 ret = ERR_PTR(-ENOMEM);
224 goto done;
225 }
226
227 /*
228 * Allocate the completion queue entries and head/tail pointers.
229 * This is allocated separately so that it can be resized and
230 * also mapped into user space.
231 * We need to use vmalloc() in order to support mmap and large
232 * numbers of entries.
233 */
234 sz = sizeof(*wc);
235 if (udata && udata->outlen >= sizeof(__u64))
236 sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
237 else
238 sz += sizeof(struct ib_wc) * (entries + 1);
239 wc = vmalloc_user(sz);
240 if (!wc) {
241 ret = ERR_PTR(-ENOMEM);
242 goto bail_cq;
243 }
244
245 /*
246 * Return the address of the WC as the offset to mmap.
247 * See ipath_mmap() for details.
248 */
249 if (udata && udata->outlen >= sizeof(__u64)) {
250 int err;
251
252 cq->ip = ipath_create_mmap_info(dev, sz, context, wc);
253 if (!cq->ip) {
254 ret = ERR_PTR(-ENOMEM);
255 goto bail_wc;
256 }
257
258 err = ib_copy_to_udata(udata, &cq->ip->offset,
259 sizeof(cq->ip->offset));
260 if (err) {
261 ret = ERR_PTR(err);
262 goto bail_ip;
263 }
264 } else
265 cq->ip = NULL;
266
267 spin_lock(&dev->n_cqs_lock);
268 if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
269 spin_unlock(&dev->n_cqs_lock);
270 ret = ERR_PTR(-ENOMEM);
271 goto bail_ip;
272 }
273
274 dev->n_cqs_allocated++;
275 spin_unlock(&dev->n_cqs_lock);
276
277 if (cq->ip) {
278 spin_lock_irq(&dev->pending_lock);
279 list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
280 spin_unlock_irq(&dev->pending_lock);
281 }
282
283 /*
284 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
285 * The number of entries should be >= the number requested or return
286 * an error.
287 */
288 cq->ibcq.cqe = entries;
289 cq->notify = IB_CQ_NONE;
290 cq->triggered = 0;
291 spin_lock_init(&cq->lock);
292 tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
293 wc->head = 0;
294 wc->tail = 0;
295 cq->queue = wc;
296
297 ret = &cq->ibcq;
298
299 goto done;
300
301bail_ip:
302 kfree(cq->ip);
303bail_wc:
304 vfree(wc);
305bail_cq:
306 kfree(cq);
307done:
308 return ret;
309}
310
311/**
312 * ipath_destroy_cq - destroy a completion queue
313 * @ibcq: the completion queue to destroy.
314 *
315 * Returns 0 for success.
316 *
317 * Called by ib_destroy_cq() in the generic verbs code.
318 */
319int ipath_destroy_cq(struct ib_cq *ibcq)
320{
321 struct ipath_ibdev *dev = to_idev(ibcq->device);
322 struct ipath_cq *cq = to_icq(ibcq);
323
324 tasklet_kill(&cq->comptask);
325 spin_lock(&dev->n_cqs_lock);
326 dev->n_cqs_allocated--;
327 spin_unlock(&dev->n_cqs_lock);
328 if (cq->ip)
329 kref_put(&cq->ip->ref, ipath_release_mmap_info);
330 else
331 vfree(cq->queue);
332 kfree(cq);
333
334 return 0;
335}
336
337/**
338 * ipath_req_notify_cq - change the notification type for a completion queue
339 * @ibcq: the completion queue
340 * @notify_flags: the type of notification to request
341 *
342 * Returns 0 for success.
343 *
344 * This may be called from interrupt context. Also called by
345 * ib_req_notify_cq() in the generic verbs code.
346 */
347int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
348{
349 struct ipath_cq *cq = to_icq(ibcq);
350 unsigned long flags;
351 int ret = 0;
352
353 spin_lock_irqsave(&cq->lock, flags);
354 /*
355 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
356 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
357 */
358 if (cq->notify != IB_CQ_NEXT_COMP)
359 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
360
361 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
362 cq->queue->head != cq->queue->tail)
363 ret = 1;
364
365 spin_unlock_irqrestore(&cq->lock, flags);
366
367 return ret;
368}
369
370/**
371 * ipath_resize_cq - change the size of the CQ
372 * @ibcq: the completion queue
373 *
374 * Returns 0 for success.
375 */
376int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
377{
378 struct ipath_cq *cq = to_icq(ibcq);
379 struct ipath_cq_wc *old_wc;
380 struct ipath_cq_wc *wc;
381 u32 head, tail, n;
382 int ret;
383 u32 sz;
384
385 if (cqe < 1 || cqe > ib_ipath_max_cqes) {
386 ret = -EINVAL;
387 goto bail;
388 }
389
390 /*
391 * Need to use vmalloc() if we want to support large #s of entries.
392 */
393 sz = sizeof(*wc);
394 if (udata && udata->outlen >= sizeof(__u64))
395 sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
396 else
397 sz += sizeof(struct ib_wc) * (cqe + 1);
398 wc = vmalloc_user(sz);
399 if (!wc) {
400 ret = -ENOMEM;
401 goto bail;
402 }
403
404 /* Check that we can write the offset to mmap. */
405 if (udata && udata->outlen >= sizeof(__u64)) {
406 __u64 offset = 0;
407
408 ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
409 if (ret)
410 goto bail_free;
411 }
412
413 spin_lock_irq(&cq->lock);
414 /*
415 * Make sure head and tail are sane since they
416 * might be user writable.
417 */
418 old_wc = cq->queue;
419 head = old_wc->head;
420 if (head > (u32) cq->ibcq.cqe)
421 head = (u32) cq->ibcq.cqe;
422 tail = old_wc->tail;
423 if (tail > (u32) cq->ibcq.cqe)
424 tail = (u32) cq->ibcq.cqe;
425 if (head < tail)
426 n = cq->ibcq.cqe + 1 + head - tail;
427 else
428 n = head - tail;
429 if (unlikely((u32)cqe < n)) {
430 ret = -EINVAL;
431 goto bail_unlock;
432 }
433 for (n = 0; tail != head; n++) {
434 if (cq->ip)
435 wc->uqueue[n] = old_wc->uqueue[tail];
436 else
437 wc->kqueue[n] = old_wc->kqueue[tail];
438 if (tail == (u32) cq->ibcq.cqe)
439 tail = 0;
440 else
441 tail++;
442 }
443 cq->ibcq.cqe = cqe;
444 wc->head = n;
445 wc->tail = 0;
446 cq->queue = wc;
447 spin_unlock_irq(&cq->lock);
448
449 vfree(old_wc);
450
451 if (cq->ip) {
452 struct ipath_ibdev *dev = to_idev(ibcq->device);
453 struct ipath_mmap_info *ip = cq->ip;
454
455 ipath_update_mmap_info(dev, ip, sz, wc);
456
457 /*
458 * Return the offset to mmap.
459 * See ipath_mmap() for details.
460 */
461 if (udata && udata->outlen >= sizeof(__u64)) {
462 ret = ib_copy_to_udata(udata, &ip->offset,
463 sizeof(ip->offset));
464 if (ret)
465 goto bail;
466 }
467
468 spin_lock_irq(&dev->pending_lock);
469 if (list_empty(&ip->pending_mmaps))
470 list_add(&ip->pending_mmaps, &dev->pending_mmaps);
471 spin_unlock_irq(&dev->pending_lock);
472 }
473
474 ret = 0;
475 goto bail;
476
477bail_unlock:
478 spin_unlock_irq(&cq->lock);
479bail_free:
480 vfree(wc);
481bail:
482 return ret;
483}
diff --git a/drivers/staging/rdma/ipath/ipath_debug.h b/drivers/staging/rdma/ipath/ipath_debug.h
deleted file mode 100644
index 65926cd35759..000000000000
--- a/drivers/staging/rdma/ipath/ipath_debug.h
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef _IPATH_DEBUG_H
35#define _IPATH_DEBUG_H
36
37#ifndef _IPATH_DEBUGGING /* debugging enabled or not */
38#define _IPATH_DEBUGGING 1
39#endif
40
41#if _IPATH_DEBUGGING
42
43/*
44 * Mask values for debugging. The scheme allows us to compile out any
45 * of the debug tracing stuff, and if compiled in, to enable or disable
46 * dynamically. This can be set at modprobe time also:
47 * modprobe infinipath.ko infinipath_debug=7
48 */
49
50#define __IPATH_INFO 0x1 /* generic low verbosity stuff */
51#define __IPATH_DBG 0x2 /* generic debug */
52#define __IPATH_TRSAMPLE 0x8 /* generate trace buffer sample entries */
53/* leave some low verbosity spots open */
54#define __IPATH_VERBDBG 0x40 /* very verbose debug */
55#define __IPATH_PKTDBG 0x80 /* print packet data */
56/* print process startup (init)/exit messages */
57#define __IPATH_PROCDBG 0x100
58/* print mmap/fault stuff, not using VDBG any more */
59#define __IPATH_MMDBG 0x200
60#define __IPATH_ERRPKTDBG 0x400
61#define __IPATH_USER_SEND 0x1000 /* use user mode send */
62#define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */
63#define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */
64#define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) gen debug */
65#define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings */
66#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */
67#define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump */
68#define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump */
69#define __IPATH_LINKVERBDBG 0x200000 /* very verbose linkchange debug */
70
71#else /* _IPATH_DEBUGGING */
72
73/*
74 * define all of these even with debugging off, for the few places that do
75 * if(infinipath_debug & _IPATH_xyzzy), but in a way that will make the
76 * compiler eliminate the code
77 */
78
79#define __IPATH_INFO 0x0 /* generic low verbosity stuff */
80#define __IPATH_DBG 0x0 /* generic debug */
81#define __IPATH_TRSAMPLE 0x0 /* generate trace buffer sample entries */
82#define __IPATH_VERBDBG 0x0 /* very verbose debug */
83#define __IPATH_PKTDBG 0x0 /* print packet data */
84#define __IPATH_PROCDBG 0x0 /* process startup (init)/exit messages */
85/* print mmap/fault stuff, not using VDBG any more */
86#define __IPATH_MMDBG 0x0
87#define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */
88#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */
89#define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */
90#define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */
91#define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */
92#define __IPATH_IPATHTABLE 0x0 /* Ethernet (IPATH) packet dump on */
93#define __IPATH_LINKVERBDBG 0x0 /* very verbose linkchange debug */
94
95#endif /* _IPATH_DEBUGGING */
96
97#define __IPATH_VERBOSEDBG __IPATH_VERBDBG
98
99#endif /* _IPATH_DEBUG_H */
diff --git a/drivers/staging/rdma/ipath/ipath_diag.c b/drivers/staging/rdma/ipath/ipath_diag.c
deleted file mode 100644
index 45802e97332e..000000000000
--- a/drivers/staging/rdma/ipath/ipath_diag.c
+++ /dev/null
@@ -1,551 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/*
35 * This file contains support for diagnostic functions. It is accessed by
36 * opening the ipath_diag device, normally minor number 129. Diagnostic use
37 * of the InfiniPath chip may render the chip or board unusable until the
38 * driver is unloaded, or in some cases, until the system is rebooted.
39 *
40 * Accesses to the chip through this interface are not similar to going
41 * through the /sys/bus/pci resource mmap interface.
42 */
43
44#include <linux/io.h>
45#include <linux/pci.h>
46#include <linux/vmalloc.h>
47#include <linux/fs.h>
48#include <linux/export.h>
49#include <asm/uaccess.h>
50
51#include "ipath_kernel.h"
52#include "ipath_common.h"
53
54int ipath_diag_inuse;
55static int diag_set_link;
56
57static int ipath_diag_open(struct inode *in, struct file *fp);
58static int ipath_diag_release(struct inode *in, struct file *fp);
59static ssize_t ipath_diag_read(struct file *fp, char __user *data,
60 size_t count, loff_t *off);
61static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
62 size_t count, loff_t *off);
63
64static const struct file_operations diag_file_ops = {
65 .owner = THIS_MODULE,
66 .write = ipath_diag_write,
67 .read = ipath_diag_read,
68 .open = ipath_diag_open,
69 .release = ipath_diag_release,
70 .llseek = default_llseek,
71};
72
73static ssize_t ipath_diagpkt_write(struct file *fp,
74 const char __user *data,
75 size_t count, loff_t *off);
76
77static const struct file_operations diagpkt_file_ops = {
78 .owner = THIS_MODULE,
79 .write = ipath_diagpkt_write,
80 .llseek = noop_llseek,
81};
82
83static atomic_t diagpkt_count = ATOMIC_INIT(0);
84static struct cdev *diagpkt_cdev;
85static struct device *diagpkt_dev;
86
87int ipath_diag_add(struct ipath_devdata *dd)
88{
89 char name[16];
90 int ret = 0;
91
92 if (atomic_inc_return(&diagpkt_count) == 1) {
93 ret = ipath_cdev_init(IPATH_DIAGPKT_MINOR,
94 "ipath_diagpkt", &diagpkt_file_ops,
95 &diagpkt_cdev, &diagpkt_dev);
96
97 if (ret) {
98 ipath_dev_err(dd, "Couldn't create ipath_diagpkt "
99 "device: %d", ret);
100 goto done;
101 }
102 }
103
104 snprintf(name, sizeof(name), "ipath_diag%d", dd->ipath_unit);
105
106 ret = ipath_cdev_init(IPATH_DIAG_MINOR_BASE + dd->ipath_unit, name,
107 &diag_file_ops, &dd->diag_cdev,
108 &dd->diag_dev);
109 if (ret)
110 ipath_dev_err(dd, "Couldn't create %s device: %d",
111 name, ret);
112
113done:
114 return ret;
115}
116
117void ipath_diag_remove(struct ipath_devdata *dd)
118{
119 if (atomic_dec_and_test(&diagpkt_count))
120 ipath_cdev_cleanup(&diagpkt_cdev, &diagpkt_dev);
121
122 ipath_cdev_cleanup(&dd->diag_cdev, &dd->diag_dev);
123}
124
125/**
126 * ipath_read_umem64 - read a 64-bit quantity from the chip into user space
127 * @dd: the infinipath device
128 * @uaddr: the location to store the data in user memory
129 * @caddr: the source chip address (full pointer, not offset)
130 * @count: number of bytes to copy (multiple of 32 bits)
131 *
132 * This function also localizes all chip memory accesses.
133 * The copy should be written such that we read full cacheline packets
134 * from the chip. This is usually used for a single qword
135 *
136 * NOTE: This assumes the chip address is 64-bit aligned.
137 */
138static int ipath_read_umem64(struct ipath_devdata *dd, void __user *uaddr,
139 const void __iomem *caddr, size_t count)
140{
141 const u64 __iomem *reg_addr = caddr;
142 const u64 __iomem *reg_end = reg_addr + (count / sizeof(u64));
143 int ret;
144
145 /* not very efficient, but it works for now */
146 if (reg_addr < dd->ipath_kregbase || reg_end > dd->ipath_kregend) {
147 ret = -EINVAL;
148 goto bail;
149 }
150 while (reg_addr < reg_end) {
151 u64 data = readq(reg_addr);
152 if (copy_to_user(uaddr, &data, sizeof(u64))) {
153 ret = -EFAULT;
154 goto bail;
155 }
156 reg_addr++;
157 uaddr += sizeof(u64);
158 }
159 ret = 0;
160bail:
161 return ret;
162}
163
164/**
165 * ipath_write_umem64 - write a 64-bit quantity to the chip from user space
166 * @dd: the infinipath device
167 * @caddr: the destination chip address (full pointer, not offset)
168 * @uaddr: the source of the data in user memory
169 * @count: the number of bytes to copy (multiple of 32 bits)
170 *
171 * This is usually used for a single qword
172 * NOTE: This assumes the chip address is 64-bit aligned.
173 */
174
175static int ipath_write_umem64(struct ipath_devdata *dd, void __iomem *caddr,
176 const void __user *uaddr, size_t count)
177{
178 u64 __iomem *reg_addr = caddr;
179 const u64 __iomem *reg_end = reg_addr + (count / sizeof(u64));
180 int ret;
181
182 /* not very efficient, but it works for now */
183 if (reg_addr < dd->ipath_kregbase || reg_end > dd->ipath_kregend) {
184 ret = -EINVAL;
185 goto bail;
186 }
187 while (reg_addr < reg_end) {
188 u64 data;
189 if (copy_from_user(&data, uaddr, sizeof(data))) {
190 ret = -EFAULT;
191 goto bail;
192 }
193 writeq(data, reg_addr);
194
195 reg_addr++;
196 uaddr += sizeof(u64);
197 }
198 ret = 0;
199bail:
200 return ret;
201}
202
203/**
204 * ipath_read_umem32 - read a 32-bit quantity from the chip into user space
205 * @dd: the infinipath device
206 * @uaddr: the location to store the data in user memory
207 * @caddr: the source chip address (full pointer, not offset)
208 * @count: number of bytes to copy
209 *
210 * read 32 bit values, not 64 bit; for memories that only
211 * support 32 bit reads; usually a single dword.
212 */
213static int ipath_read_umem32(struct ipath_devdata *dd, void __user *uaddr,
214 const void __iomem *caddr, size_t count)
215{
216 const u32 __iomem *reg_addr = caddr;
217 const u32 __iomem *reg_end = reg_addr + (count / sizeof(u32));
218 int ret;
219
220 if (reg_addr < (u32 __iomem *) dd->ipath_kregbase ||
221 reg_end > (u32 __iomem *) dd->ipath_kregend) {
222 ret = -EINVAL;
223 goto bail;
224 }
225 /* not very efficient, but it works for now */
226 while (reg_addr < reg_end) {
227 u32 data = readl(reg_addr);
228 if (copy_to_user(uaddr, &data, sizeof(data))) {
229 ret = -EFAULT;
230 goto bail;
231 }
232
233 reg_addr++;
234 uaddr += sizeof(u32);
235
236 }
237 ret = 0;
238bail:
239 return ret;
240}
241
242/**
243 * ipath_write_umem32 - write a 32-bit quantity to the chip from user space
244 * @dd: the infinipath device
245 * @caddr: the destination chip address (full pointer, not offset)
246 * @uaddr: the source of the data in user memory
247 * @count: number of bytes to copy
248 *
249 * write 32 bit values, not 64 bit; for memories that only
250 * support 32 bit write; usually a single dword.
251 */
252
253static int ipath_write_umem32(struct ipath_devdata *dd, void __iomem *caddr,
254 const void __user *uaddr, size_t count)
255{
256 u32 __iomem *reg_addr = caddr;
257 const u32 __iomem *reg_end = reg_addr + (count / sizeof(u32));
258 int ret;
259
260 if (reg_addr < (u32 __iomem *) dd->ipath_kregbase ||
261 reg_end > (u32 __iomem *) dd->ipath_kregend) {
262 ret = -EINVAL;
263 goto bail;
264 }
265 while (reg_addr < reg_end) {
266 u32 data;
267 if (copy_from_user(&data, uaddr, sizeof(data))) {
268 ret = -EFAULT;
269 goto bail;
270 }
271 writel(data, reg_addr);
272
273 reg_addr++;
274 uaddr += sizeof(u32);
275 }
276 ret = 0;
277bail:
278 return ret;
279}
280
281static int ipath_diag_open(struct inode *in, struct file *fp)
282{
283 int unit = iminor(in) - IPATH_DIAG_MINOR_BASE;
284 struct ipath_devdata *dd;
285 int ret;
286
287 mutex_lock(&ipath_mutex);
288
289 if (ipath_diag_inuse) {
290 ret = -EBUSY;
291 goto bail;
292 }
293
294 dd = ipath_lookup(unit);
295
296 if (dd == NULL || !(dd->ipath_flags & IPATH_PRESENT) ||
297 !dd->ipath_kregbase) {
298 ret = -ENODEV;
299 goto bail;
300 }
301
302 fp->private_data = dd;
303 ipath_diag_inuse = -2;
304 diag_set_link = 0;
305 ret = 0;
306
307 /* Only expose a way to reset the device if we
308 make it into diag mode. */
309 ipath_expose_reset(&dd->pcidev->dev);
310
311bail:
312 mutex_unlock(&ipath_mutex);
313
314 return ret;
315}
316
317/**
318 * ipath_diagpkt_write - write an IB packet
319 * @fp: the diag data device file pointer
320 * @data: ipath_diag_pkt structure saying where to get the packet
321 * @count: size of data to write
322 * @off: unused by this code
323 */
324static ssize_t ipath_diagpkt_write(struct file *fp,
325 const char __user *data,
326 size_t count, loff_t *off)
327{
328 u32 __iomem *piobuf;
329 u32 plen, pbufn, maxlen_reserve;
330 struct ipath_diag_pkt odp;
331 struct ipath_diag_xpkt dp;
332 u32 *tmpbuf = NULL;
333 struct ipath_devdata *dd;
334 ssize_t ret = 0;
335 u64 val;
336 u32 l_state, lt_state; /* LinkState, LinkTrainingState */
337
338
339 if (count == sizeof(dp)) {
340 if (copy_from_user(&dp, data, sizeof(dp))) {
341 ret = -EFAULT;
342 goto bail;
343 }
344 } else if (count == sizeof(odp)) {
345 if (copy_from_user(&odp, data, sizeof(odp))) {
346 ret = -EFAULT;
347 goto bail;
348 }
349 dp.len = odp.len;
350 dp.unit = odp.unit;
351 dp.data = odp.data;
352 dp.pbc_wd = 0;
353 } else {
354 ret = -EINVAL;
355 goto bail;
356 }
357
358 /* send count must be an exact number of dwords */
359 if (dp.len & 3) {
360 ret = -EINVAL;
361 goto bail;
362 }
363
364 plen = dp.len >> 2;
365
366 dd = ipath_lookup(dp.unit);
367 if (!dd || !(dd->ipath_flags & IPATH_PRESENT) ||
368 !dd->ipath_kregbase) {
369 ipath_cdbg(VERBOSE, "illegal unit %u for diag data send\n",
370 dp.unit);
371 ret = -ENODEV;
372 goto bail;
373 }
374
375 if (ipath_diag_inuse && !diag_set_link &&
376 !(dd->ipath_flags & IPATH_LINKACTIVE)) {
377 diag_set_link = 1;
378 ipath_cdbg(VERBOSE, "Trying to set to set link active for "
379 "diag pkt\n");
380 ipath_set_linkstate(dd, IPATH_IB_LINKARM);
381 ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE);
382 }
383
384 if (!(dd->ipath_flags & IPATH_INITTED)) {
385 /* no hardware, freeze, etc. */
386 ipath_cdbg(VERBOSE, "unit %u not usable\n", dd->ipath_unit);
387 ret = -ENODEV;
388 goto bail;
389 }
390 /*
391 * Want to skip check for l_state if using custom PBC,
392 * because we might be trying to force an SM packet out.
393 * first-cut, skip _all_ state checking in that case.
394 */
395 val = ipath_ib_state(dd, dd->ipath_lastibcstat);
396 lt_state = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
397 l_state = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
398 if (!dp.pbc_wd && (lt_state != INFINIPATH_IBCS_LT_STATE_LINKUP ||
399 (val != dd->ib_init && val != dd->ib_arm &&
400 val != dd->ib_active))) {
401 ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n",
402 dd->ipath_unit, (unsigned long long) val);
403 ret = -EINVAL;
404 goto bail;
405 }
406
407 /*
408 * need total length before first word written, plus 2 Dwords. One Dword
409 * is for padding so we get the full user data when not aligned on
410 * a word boundary. The other Dword is to make sure we have room for the
411 * ICRC which gets tacked on later.
412 */
413 maxlen_reserve = 2 * sizeof(u32);
414 if (dp.len > dd->ipath_ibmaxlen - maxlen_reserve) {
415 ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n",
416 dp.len, dd->ipath_ibmaxlen);
417 ret = -EINVAL;
418 goto bail;
419 }
420
421 plen = sizeof(u32) + dp.len;
422
423 tmpbuf = vmalloc(plen);
424 if (!tmpbuf) {
425 dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, "
426 "failing\n");
427 ret = -ENOMEM;
428 goto bail;
429 }
430
431 if (copy_from_user(tmpbuf,
432 (const void __user *) (unsigned long) dp.data,
433 dp.len)) {
434 ret = -EFAULT;
435 goto bail;
436 }
437
438 plen >>= 2; /* in dwords */
439
440 piobuf = ipath_getpiobuf(dd, plen, &pbufn);
441 if (!piobuf) {
442 ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n",
443 dd->ipath_unit);
444 ret = -EBUSY;
445 goto bail;
446 }
447 /* disarm it just to be extra sure */
448 ipath_disarm_piobufs(dd, pbufn, 1);
449
450 if (ipath_debug & __IPATH_PKTDBG)
451 ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n",
452 dd->ipath_unit, plen - 1, pbufn);
453
454 if (dp.pbc_wd == 0)
455 dp.pbc_wd = plen;
456 writeq(dp.pbc_wd, piobuf);
457 /*
458 * Copy all by the trigger word, then flush, so it's written
459 * to chip before trigger word, then write trigger word, then
460 * flush again, so packet is sent.
461 */
462 if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
463 ipath_flush_wc();
464 __iowrite32_copy(piobuf + 2, tmpbuf, plen - 1);
465 ipath_flush_wc();
466 __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1);
467 } else
468 __iowrite32_copy(piobuf + 2, tmpbuf, plen);
469
470 ipath_flush_wc();
471
472 ret = sizeof(dp);
473
474bail:
475 vfree(tmpbuf);
476 return ret;
477}
478
479static int ipath_diag_release(struct inode *in, struct file *fp)
480{
481 mutex_lock(&ipath_mutex);
482 ipath_diag_inuse = 0;
483 fp->private_data = NULL;
484 mutex_unlock(&ipath_mutex);
485 return 0;
486}
487
488static ssize_t ipath_diag_read(struct file *fp, char __user *data,
489 size_t count, loff_t *off)
490{
491 struct ipath_devdata *dd = fp->private_data;
492 void __iomem *kreg_base;
493 ssize_t ret;
494
495 kreg_base = dd->ipath_kregbase;
496
497 if (count == 0)
498 ret = 0;
499 else if ((count % 4) || (*off % 4))
500 /* address or length is not 32-bit aligned, hence invalid */
501 ret = -EINVAL;
502 else if (ipath_diag_inuse < 1 && (*off || count != 8))
503 ret = -EINVAL; /* prevent cat /dev/ipath_diag* */
504 else if ((count % 8) || (*off % 8))
505 /* address or length not 64-bit aligned; do 32-bit reads */
506 ret = ipath_read_umem32(dd, data, kreg_base + *off, count);
507 else
508 ret = ipath_read_umem64(dd, data, kreg_base + *off, count);
509
510 if (ret >= 0) {
511 *off += count;
512 ret = count;
513 if (ipath_diag_inuse == -2)
514 ipath_diag_inuse++;
515 }
516
517 return ret;
518}
519
520static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
521 size_t count, loff_t *off)
522{
523 struct ipath_devdata *dd = fp->private_data;
524 void __iomem *kreg_base;
525 ssize_t ret;
526
527 kreg_base = dd->ipath_kregbase;
528
529 if (count == 0)
530 ret = 0;
531 else if ((count % 4) || (*off % 4))
532 /* address or length is not 32-bit aligned, hence invalid */
533 ret = -EINVAL;
534 else if ((ipath_diag_inuse == -1 && (*off || count != 8)) ||
535 ipath_diag_inuse == -2) /* read qw off 0, write qw off 0 */
536 ret = -EINVAL; /* before any other write allowed */
537 else if ((count % 8) || (*off % 8))
538 /* address or length not 64-bit aligned; do 32-bit writes */
539 ret = ipath_write_umem32(dd, kreg_base + *off, data, count);
540 else
541 ret = ipath_write_umem64(dd, kreg_base + *off, data, count);
542
543 if (ret >= 0) {
544 *off += count;
545 ret = count;
546 if (ipath_diag_inuse == -1)
547 ipath_diag_inuse = 1; /* all read/write OK now */
548 }
549
550 return ret;
551}
diff --git a/drivers/staging/rdma/ipath/ipath_dma.c b/drivers/staging/rdma/ipath/ipath_dma.c
deleted file mode 100644
index 123a8c053539..000000000000
--- a/drivers/staging/rdma/ipath/ipath_dma.c
+++ /dev/null
@@ -1,179 +0,0 @@
1/*
2 * Copyright (c) 2006 QLogic, Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/scatterlist.h>
34#include <linux/gfp.h>
35#include <rdma/ib_verbs.h>
36
37#include "ipath_verbs.h"
38
39#define BAD_DMA_ADDRESS ((u64) 0)
40
41/*
42 * The following functions implement driver specific replacements
43 * for the ib_dma_*() functions.
44 *
45 * These functions return kernel virtual addresses instead of
46 * device bus addresses since the driver uses the CPU to copy
47 * data instead of using hardware DMA.
48 */
49
50static int ipath_mapping_error(struct ib_device *dev, u64 dma_addr)
51{
52 return dma_addr == BAD_DMA_ADDRESS;
53}
54
55static u64 ipath_dma_map_single(struct ib_device *dev,
56 void *cpu_addr, size_t size,
57 enum dma_data_direction direction)
58{
59 BUG_ON(!valid_dma_direction(direction));
60 return (u64) cpu_addr;
61}
62
63static void ipath_dma_unmap_single(struct ib_device *dev,
64 u64 addr, size_t size,
65 enum dma_data_direction direction)
66{
67 BUG_ON(!valid_dma_direction(direction));
68}
69
70static u64 ipath_dma_map_page(struct ib_device *dev,
71 struct page *page,
72 unsigned long offset,
73 size_t size,
74 enum dma_data_direction direction)
75{
76 u64 addr;
77
78 BUG_ON(!valid_dma_direction(direction));
79
80 if (offset + size > PAGE_SIZE) {
81 addr = BAD_DMA_ADDRESS;
82 goto done;
83 }
84
85 addr = (u64) page_address(page);
86 if (addr)
87 addr += offset;
88 /* TODO: handle highmem pages */
89
90done:
91 return addr;
92}
93
94static void ipath_dma_unmap_page(struct ib_device *dev,
95 u64 addr, size_t size,
96 enum dma_data_direction direction)
97{
98 BUG_ON(!valid_dma_direction(direction));
99}
100
101static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
102 int nents, enum dma_data_direction direction)
103{
104 struct scatterlist *sg;
105 u64 addr;
106 int i;
107 int ret = nents;
108
109 BUG_ON(!valid_dma_direction(direction));
110
111 for_each_sg(sgl, sg, nents, i) {
112 addr = (u64) page_address(sg_page(sg));
113 /* TODO: handle highmem pages */
114 if (!addr) {
115 ret = 0;
116 break;
117 }
118 sg->dma_address = addr + sg->offset;
119#ifdef CONFIG_NEED_SG_DMA_LENGTH
120 sg->dma_length = sg->length;
121#endif
122 }
123 return ret;
124}
125
126static void ipath_unmap_sg(struct ib_device *dev,
127 struct scatterlist *sg, int nents,
128 enum dma_data_direction direction)
129{
130 BUG_ON(!valid_dma_direction(direction));
131}
132
133static void ipath_sync_single_for_cpu(struct ib_device *dev,
134 u64 addr,
135 size_t size,
136 enum dma_data_direction dir)
137{
138}
139
140static void ipath_sync_single_for_device(struct ib_device *dev,
141 u64 addr,
142 size_t size,
143 enum dma_data_direction dir)
144{
145}
146
147static void *ipath_dma_alloc_coherent(struct ib_device *dev, size_t size,
148 u64 *dma_handle, gfp_t flag)
149{
150 struct page *p;
151 void *addr = NULL;
152
153 p = alloc_pages(flag, get_order(size));
154 if (p)
155 addr = page_address(p);
156 if (dma_handle)
157 *dma_handle = (u64) addr;
158 return addr;
159}
160
161static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
162 void *cpu_addr, u64 dma_handle)
163{
164 free_pages((unsigned long) cpu_addr, get_order(size));
165}
166
167struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
168 .mapping_error = ipath_mapping_error,
169 .map_single = ipath_dma_map_single,
170 .unmap_single = ipath_dma_unmap_single,
171 .map_page = ipath_dma_map_page,
172 .unmap_page = ipath_dma_unmap_page,
173 .map_sg = ipath_map_sg,
174 .unmap_sg = ipath_unmap_sg,
175 .sync_single_for_cpu = ipath_sync_single_for_cpu,
176 .sync_single_for_device = ipath_sync_single_for_device,
177 .alloc_coherent = ipath_dma_alloc_coherent,
178 .free_coherent = ipath_dma_free_coherent
179};
diff --git a/drivers/staging/rdma/ipath/ipath_driver.c b/drivers/staging/rdma/ipath/ipath_driver.c
deleted file mode 100644
index 2ab22f98e3ba..000000000000
--- a/drivers/staging/rdma/ipath/ipath_driver.c
+++ /dev/null
@@ -1,2784 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35
36#include <linux/spinlock.h>
37#include <linux/idr.h>
38#include <linux/pci.h>
39#include <linux/io.h>
40#include <linux/delay.h>
41#include <linux/netdevice.h>
42#include <linux/vmalloc.h>
43#include <linux/bitmap.h>
44#include <linux/slab.h>
45#include <linux/module.h>
46#ifdef CONFIG_X86_64
47#include <asm/pat.h>
48#endif
49
50#include "ipath_kernel.h"
51#include "ipath_verbs.h"
52
53static void ipath_update_pio_bufs(struct ipath_devdata *);
54
55const char *ipath_get_unit_name(int unit)
56{
57 static char iname[16];
58 snprintf(iname, sizeof iname, "infinipath%u", unit);
59 return iname;
60}
61
62#define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
63#define PFX IPATH_DRV_NAME ": "
64
65/*
66 * The size has to be longer than this string, so we can append
67 * board/chip information to it in the init code.
68 */
69const char ib_ipath_version[] = IPATH_IDSTR "\n";
70
71static struct idr unit_table;
72DEFINE_SPINLOCK(ipath_devs_lock);
73LIST_HEAD(ipath_dev_list);
74
75wait_queue_head_t ipath_state_wait;
76
77unsigned ipath_debug = __IPATH_INFO;
78
79module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO);
80MODULE_PARM_DESC(debug, "mask for debug prints");
81EXPORT_SYMBOL_GPL(ipath_debug);
82
83unsigned ipath_mtu4096 = 1; /* max 4KB IB mtu by default, if supported */
84module_param_named(mtu4096, ipath_mtu4096, uint, S_IRUGO);
85MODULE_PARM_DESC(mtu4096, "enable MTU of 4096 bytes, if supported");
86
87static unsigned ipath_hol_timeout_ms = 13000;
88module_param_named(hol_timeout_ms, ipath_hol_timeout_ms, uint, S_IRUGO);
89MODULE_PARM_DESC(hol_timeout_ms,
90 "duration of user app suspension after link failure");
91
92unsigned ipath_linkrecovery = 1;
93module_param_named(linkrecovery, ipath_linkrecovery, uint, S_IWUSR | S_IRUGO);
94MODULE_PARM_DESC(linkrecovery, "enable workaround for link recovery issue");
95
96MODULE_LICENSE("GPL");
97MODULE_AUTHOR("QLogic <support@qlogic.com>");
98MODULE_DESCRIPTION("QLogic InfiniPath driver");
99
100/*
101 * Table to translate the LINKTRAININGSTATE portion of
102 * IBCStatus to a human-readable form.
103 */
104const char *ipath_ibcstatus_str[] = {
105 "Disabled",
106 "LinkUp",
107 "PollActive",
108 "PollQuiet",
109 "SleepDelay",
110 "SleepQuiet",
111 "LState6", /* unused */
112 "LState7", /* unused */
113 "CfgDebounce",
114 "CfgRcvfCfg",
115 "CfgWaitRmt",
116 "CfgIdle",
117 "RecovRetrain",
118 "CfgTxRevLane", /* unused before IBA7220 */
119 "RecovWaitRmt",
120 "RecovIdle",
121 /* below were added for IBA7220 */
122 "CfgEnhanced",
123 "CfgTest",
124 "CfgWaitRmtTest",
125 "CfgWaitCfgEnhanced",
126 "SendTS_T",
127 "SendTstIdles",
128 "RcvTS_T",
129 "SendTst_TS1s",
130 "LTState18", "LTState19", "LTState1A", "LTState1B",
131 "LTState1C", "LTState1D", "LTState1E", "LTState1F"
132};
133
134static void ipath_remove_one(struct pci_dev *);
135static int ipath_init_one(struct pci_dev *, const struct pci_device_id *);
136
137/* Only needed for registration, nothing else needs this info */
138#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
139#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
140
141/* Number of seconds before our card status check... */
142#define STATUS_TIMEOUT 60
143
144static const struct pci_device_id ipath_pci_tbl[] = {
145 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
146 { 0, }
147};
148
149MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
150
151static struct pci_driver ipath_driver = {
152 .name = IPATH_DRV_NAME,
153 .probe = ipath_init_one,
154 .remove = ipath_remove_one,
155 .id_table = ipath_pci_tbl,
156 .driver = {
157 .groups = ipath_driver_attr_groups,
158 },
159};
160
161static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
162 u32 *bar0, u32 *bar1)
163{
164 int ret;
165
166 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, bar0);
167 if (ret)
168 ipath_dev_err(dd, "failed to read bar0 before enable: "
169 "error %d\n", -ret);
170
171 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, bar1);
172 if (ret)
173 ipath_dev_err(dd, "failed to read bar1 before enable: "
174 "error %d\n", -ret);
175
176 ipath_dbg("Read bar0 %x bar1 %x\n", *bar0, *bar1);
177}
178
179static void ipath_free_devdata(struct pci_dev *pdev,
180 struct ipath_devdata *dd)
181{
182 unsigned long flags;
183
184 pci_set_drvdata(pdev, NULL);
185
186 if (dd->ipath_unit != -1) {
187 spin_lock_irqsave(&ipath_devs_lock, flags);
188 idr_remove(&unit_table, dd->ipath_unit);
189 list_del(&dd->ipath_list);
190 spin_unlock_irqrestore(&ipath_devs_lock, flags);
191 }
192 vfree(dd);
193}
194
195static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
196{
197 unsigned long flags;
198 struct ipath_devdata *dd;
199 int ret;
200
201 dd = vzalloc(sizeof(*dd));
202 if (!dd) {
203 dd = ERR_PTR(-ENOMEM);
204 goto bail;
205 }
206 dd->ipath_unit = -1;
207
208 idr_preload(GFP_KERNEL);
209 spin_lock_irqsave(&ipath_devs_lock, flags);
210
211 ret = idr_alloc(&unit_table, dd, 0, 0, GFP_NOWAIT);
212 if (ret < 0) {
213 printk(KERN_ERR IPATH_DRV_NAME
214 ": Could not allocate unit ID: error %d\n", -ret);
215 ipath_free_devdata(pdev, dd);
216 dd = ERR_PTR(ret);
217 goto bail_unlock;
218 }
219 dd->ipath_unit = ret;
220
221 dd->pcidev = pdev;
222 pci_set_drvdata(pdev, dd);
223
224 list_add(&dd->ipath_list, &ipath_dev_list);
225
226bail_unlock:
227 spin_unlock_irqrestore(&ipath_devs_lock, flags);
228 idr_preload_end();
229bail:
230 return dd;
231}
232
233static inline struct ipath_devdata *__ipath_lookup(int unit)
234{
235 return idr_find(&unit_table, unit);
236}
237
238struct ipath_devdata *ipath_lookup(int unit)
239{
240 struct ipath_devdata *dd;
241 unsigned long flags;
242
243 spin_lock_irqsave(&ipath_devs_lock, flags);
244 dd = __ipath_lookup(unit);
245 spin_unlock_irqrestore(&ipath_devs_lock, flags);
246
247 return dd;
248}
249
250int ipath_count_units(int *npresentp, int *nupp, int *maxportsp)
251{
252 int nunits, npresent, nup;
253 struct ipath_devdata *dd;
254 unsigned long flags;
255 int maxports;
256
257 nunits = npresent = nup = maxports = 0;
258
259 spin_lock_irqsave(&ipath_devs_lock, flags);
260
261 list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
262 nunits++;
263 if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase)
264 npresent++;
265 if (dd->ipath_lid &&
266 !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN
267 | IPATH_LINKUNK)))
268 nup++;
269 if (dd->ipath_cfgports > maxports)
270 maxports = dd->ipath_cfgports;
271 }
272
273 spin_unlock_irqrestore(&ipath_devs_lock, flags);
274
275 if (npresentp)
276 *npresentp = npresent;
277 if (nupp)
278 *nupp = nup;
279 if (maxportsp)
280 *maxportsp = maxports;
281
282 return nunits;
283}
284
285/*
286 * These next two routines are placeholders in case we don't have per-arch
287 * code for controlling write combining. If explicit control of write
288 * combining is not available, performance will probably be awful.
289 */
290
291int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd)
292{
293 return -EOPNOTSUPP;
294}
295
296void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd)
297{
298}
299
300/*
301 * Perform a PIO buffer bandwidth write test, to verify proper system
302 * configuration. Even when all the setup calls work, occasionally
303 * BIOS or other issues can prevent write combining from working, or
304 * can cause other bandwidth problems to the chip.
305 *
306 * This test simply writes the same buffer over and over again, and
307 * measures close to the peak bandwidth to the chip (not testing
308 * data bandwidth to the wire). On chips that use an address-based
309 * trigger to send packets to the wire, this is easy. On chips that
310 * use a count to trigger, we want to make sure that the packet doesn't
311 * go out on the wire, or trigger flow control checks.
312 */
313static void ipath_verify_pioperf(struct ipath_devdata *dd)
314{
315 u32 pbnum, cnt, lcnt;
316 u32 __iomem *piobuf;
317 u32 *addr;
318 u64 msecs, emsecs;
319
320 piobuf = ipath_getpiobuf(dd, 0, &pbnum);
321 if (!piobuf) {
322 dev_info(&dd->pcidev->dev,
323 "No PIObufs for checking perf, skipping\n");
324 return;
325 }
326
327 /*
328 * Enough to give us a reasonable test, less than piobuf size, and
329 * likely multiple of store buffer length.
330 */
331 cnt = 1024;
332
333 addr = vmalloc(cnt);
334 if (!addr) {
335 dev_info(&dd->pcidev->dev,
336 "Couldn't get memory for checking PIO perf,"
337 " skipping\n");
338 goto done;
339 }
340
341 preempt_disable(); /* we want reasonably accurate elapsed time */
342 msecs = 1 + jiffies_to_msecs(jiffies);
343 for (lcnt = 0; lcnt < 10000U; lcnt++) {
344 /* wait until we cross msec boundary */
345 if (jiffies_to_msecs(jiffies) >= msecs)
346 break;
347 udelay(1);
348 }
349
350 ipath_disable_armlaunch(dd);
351
352 /*
353 * length 0, no dwords actually sent, and mark as VL15
354 * on chips where that may matter (due to IB flowcontrol)
355 */
356 if ((dd->ipath_flags & IPATH_HAS_PBC_CNT))
357 writeq(1UL << 63, piobuf);
358 else
359 writeq(0, piobuf);
360 ipath_flush_wc();
361
362 /*
363 * this is only roughly accurate, since even with preempt we
364 * still take interrupts that could take a while. Running for
365 * >= 5 msec seems to get us "close enough" to accurate values
366 */
367 msecs = jiffies_to_msecs(jiffies);
368 for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
369 __iowrite32_copy(piobuf + 64, addr, cnt >> 2);
370 emsecs = jiffies_to_msecs(jiffies) - msecs;
371 }
372
373 /* 1 GiB/sec, slightly over IB SDR line rate */
374 if (lcnt < (emsecs * 1024U))
375 ipath_dev_err(dd,
376 "Performance problem: bandwidth to PIO buffers is "
377 "only %u MiB/sec\n",
378 lcnt / (u32) emsecs);
379 else
380 ipath_dbg("PIO buffer bandwidth %u MiB/sec is OK\n",
381 lcnt / (u32) emsecs);
382
383 preempt_enable();
384
385 vfree(addr);
386
387done:
388 /* disarm piobuf, so it's available again */
389 ipath_disarm_piobufs(dd, pbnum, 1);
390 ipath_enable_armlaunch(dd);
391}
392
393static void cleanup_device(struct ipath_devdata *dd);
394
395static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
396{
397 int ret, len, j;
398 struct ipath_devdata *dd;
399 unsigned long long addr;
400 u32 bar0 = 0, bar1 = 0;
401
402#ifdef CONFIG_X86_64
403 if (pat_enabled()) {
404 pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n");
405 ret = -ENODEV;
406 goto bail;
407 }
408#endif
409
410 dd = ipath_alloc_devdata(pdev);
411 if (IS_ERR(dd)) {
412 ret = PTR_ERR(dd);
413 printk(KERN_ERR IPATH_DRV_NAME
414 ": Could not allocate devdata: error %d\n", -ret);
415 goto bail;
416 }
417
418 ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
419
420 ret = pci_enable_device(pdev);
421 if (ret) {
422 /* This can happen iff:
423 *
424 * We did a chip reset, and then failed to reprogram the
425 * BAR, or the chip reset due to an internal error. We then
426 * unloaded the driver and reloaded it.
427 *
428 * Both reset cases set the BAR back to initial state. For
429 * the latter case, the AER sticky error bit at offset 0x718
430 * should be set, but the Linux kernel doesn't yet know
431 * about that, it appears. If the original BAR was retained
432 * in the kernel data structures, this may be OK.
433 */
434 ipath_dev_err(dd, "enable unit %d failed: error %d\n",
435 dd->ipath_unit, -ret);
436 goto bail_devdata;
437 }
438 addr = pci_resource_start(pdev, 0);
439 len = pci_resource_len(pdev, 0);
440 ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %d, vend %x/%x "
441 "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
442 ent->device, ent->driver_data);
443
444 read_bars(dd, pdev, &bar0, &bar1);
445
446 if (!bar1 && !(bar0 & ~0xf)) {
447 if (addr) {
448 dev_info(&pdev->dev, "BAR is 0 (probable RESET), "
449 "rewriting as %llx\n", addr);
450 ret = pci_write_config_dword(
451 pdev, PCI_BASE_ADDRESS_0, addr);
452 if (ret) {
453 ipath_dev_err(dd, "rewrite of BAR0 "
454 "failed: err %d\n", -ret);
455 goto bail_disable;
456 }
457 ret = pci_write_config_dword(
458 pdev, PCI_BASE_ADDRESS_1, addr >> 32);
459 if (ret) {
460 ipath_dev_err(dd, "rewrite of BAR1 "
461 "failed: err %d\n", -ret);
462 goto bail_disable;
463 }
464 } else {
465 ipath_dev_err(dd, "BAR is 0 (probable RESET), "
466 "not usable until reboot\n");
467 ret = -ENODEV;
468 goto bail_disable;
469 }
470 }
471
472 ret = pci_request_regions(pdev, IPATH_DRV_NAME);
473 if (ret) {
474 dev_info(&pdev->dev, "pci_request_regions unit %u fails: "
475 "err %d\n", dd->ipath_unit, -ret);
476 goto bail_disable;
477 }
478
479 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
480 if (ret) {
481 /*
482 * if the 64 bit setup fails, try 32 bit. Some systems
483 * do not setup 64 bit maps on systems with 2GB or less
484 * memory installed.
485 */
486 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
487 if (ret) {
488 dev_info(&pdev->dev,
489 "Unable to set DMA mask for unit %u: %d\n",
490 dd->ipath_unit, ret);
491 goto bail_regions;
492 } else {
493 ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
494 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
495 if (ret)
496 dev_info(&pdev->dev,
497 "Unable to set DMA consistent mask "
498 "for unit %u: %d\n",
499 dd->ipath_unit, ret);
500
501 }
502 } else {
503 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
504 if (ret)
505 dev_info(&pdev->dev,
506 "Unable to set DMA consistent mask "
507 "for unit %u: %d\n",
508 dd->ipath_unit, ret);
509 }
510
511 pci_set_master(pdev);
512
513 /*
514 * Save BARs to rewrite after device reset. Save all 64 bits of
515 * BAR, just in case.
516 */
517 dd->ipath_pcibar0 = addr;
518 dd->ipath_pcibar1 = addr >> 32;
519 dd->ipath_deviceid = ent->device; /* save for later use */
520 dd->ipath_vendorid = ent->vendor;
521
522 /* setup the chip-specific functions, as early as possible. */
523 switch (ent->device) {
524 case PCI_DEVICE_ID_INFINIPATH_HT:
525 ipath_init_iba6110_funcs(dd);
526 break;
527
528 default:
529 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
530 "failing\n", ent->device);
531 return -ENODEV;
532 }
533
534 for (j = 0; j < 6; j++) {
535 if (!pdev->resource[j].start)
536 continue;
537 ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n",
538 j, &pdev->resource[j],
539 (unsigned long long)pci_resource_len(pdev, j));
540 }
541
542 if (!addr) {
543 ipath_dev_err(dd, "No valid address in BAR 0!\n");
544 ret = -ENODEV;
545 goto bail_regions;
546 }
547
548 dd->ipath_pcirev = pdev->revision;
549
550#if defined(__powerpc__)
551 /* There isn't a generic way to specify writethrough mappings */
552 dd->ipath_kregbase = __ioremap(addr, len,
553 (_PAGE_NO_CACHE|_PAGE_WRITETHRU));
554#else
555 /* XXX: split this properly to enable on PAT */
556 dd->ipath_kregbase = ioremap_nocache(addr, len);
557#endif
558
559 if (!dd->ipath_kregbase) {
560 ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
561 addr);
562 ret = -ENOMEM;
563 goto bail_iounmap;
564 }
565 dd->ipath_kregend = (u64 __iomem *)
566 ((void __iomem *)dd->ipath_kregbase + len);
567 dd->ipath_physaddr = addr; /* used for io_remap, etc. */
568 /* for user mmap */
569 ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
570 addr, dd->ipath_kregbase);
571
572 if (dd->ipath_f_bus(dd, pdev))
573 ipath_dev_err(dd, "Failed to setup config space; "
574 "continuing anyway\n");
575
576 /*
577 * set up our interrupt handler; IRQF_SHARED probably not needed,
578 * since MSI interrupts shouldn't be shared but won't hurt for now.
579 * check 0 irq after we return from chip-specific bus setup, since
580 * that can affect this due to setup
581 */
582 if (!dd->ipath_irq)
583 ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
584 "work\n");
585 else {
586 ret = request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
587 IPATH_DRV_NAME, dd);
588 if (ret) {
589 ipath_dev_err(dd, "Couldn't setup irq handler, "
590 "irq=%d: %d\n", dd->ipath_irq, ret);
591 goto bail_iounmap;
592 }
593 }
594
595 ret = ipath_init_chip(dd, 0); /* do the chip-specific init */
596 if (ret)
597 goto bail_irqsetup;
598
599 ret = ipath_enable_wc(dd);
600
601 if (ret)
602 ret = 0;
603
604 ipath_verify_pioperf(dd);
605
606 ipath_device_create_group(&pdev->dev, dd);
607 ipathfs_add_device(dd);
608 ipath_user_add(dd);
609 ipath_diag_add(dd);
610 ipath_register_ib_device(dd);
611
612 goto bail;
613
614bail_irqsetup:
615 cleanup_device(dd);
616
617 if (dd->ipath_irq)
618 dd->ipath_f_free_irq(dd);
619
620 if (dd->ipath_f_cleanup)
621 dd->ipath_f_cleanup(dd);
622
623bail_iounmap:
624 iounmap((volatile void __iomem *) dd->ipath_kregbase);
625
626bail_regions:
627 pci_release_regions(pdev);
628
629bail_disable:
630 pci_disable_device(pdev);
631
632bail_devdata:
633 ipath_free_devdata(pdev, dd);
634
635bail:
636 return ret;
637}
638
639static void cleanup_device(struct ipath_devdata *dd)
640{
641 int port;
642 struct ipath_portdata **tmp;
643 unsigned long flags;
644
645 if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
646 /* can't do anything more with chip; needs re-init */
647 *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
648 if (dd->ipath_kregbase) {
649 /*
650 * if we haven't already cleaned up before these are
651 * to ensure any register reads/writes "fail" until
652 * re-init
653 */
654 dd->ipath_kregbase = NULL;
655 dd->ipath_uregbase = 0;
656 dd->ipath_sregbase = 0;
657 dd->ipath_cregbase = 0;
658 dd->ipath_kregsize = 0;
659 }
660 ipath_disable_wc(dd);
661 }
662
663 if (dd->ipath_spectriggerhit)
664 dev_info(&dd->pcidev->dev, "%lu special trigger hits\n",
665 dd->ipath_spectriggerhit);
666
667 if (dd->ipath_pioavailregs_dma) {
668 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
669 (void *) dd->ipath_pioavailregs_dma,
670 dd->ipath_pioavailregs_phys);
671 dd->ipath_pioavailregs_dma = NULL;
672 }
673 if (dd->ipath_dummy_hdrq) {
674 dma_free_coherent(&dd->pcidev->dev,
675 dd->ipath_pd[0]->port_rcvhdrq_size,
676 dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys);
677 dd->ipath_dummy_hdrq = NULL;
678 }
679
680 if (dd->ipath_pageshadow) {
681 struct page **tmpp = dd->ipath_pageshadow;
682 dma_addr_t *tmpd = dd->ipath_physshadow;
683 int i, cnt = 0;
684
685 ipath_cdbg(VERBOSE, "Unlocking any expTID pages still "
686 "locked\n");
687 for (port = 0; port < dd->ipath_cfgports; port++) {
688 int port_tidbase = port * dd->ipath_rcvtidcnt;
689 int maxtid = port_tidbase + dd->ipath_rcvtidcnt;
690 for (i = port_tidbase; i < maxtid; i++) {
691 if (!tmpp[i])
692 continue;
693 pci_unmap_page(dd->pcidev, tmpd[i],
694 PAGE_SIZE, PCI_DMA_FROMDEVICE);
695 ipath_release_user_pages(&tmpp[i], 1);
696 tmpp[i] = NULL;
697 cnt++;
698 }
699 }
700 if (cnt) {
701 ipath_stats.sps_pageunlocks += cnt;
702 ipath_cdbg(VERBOSE, "There were still %u expTID "
703 "entries locked\n", cnt);
704 }
705 if (ipath_stats.sps_pagelocks ||
706 ipath_stats.sps_pageunlocks)
707 ipath_cdbg(VERBOSE, "%llu pages locked, %llu "
708 "unlocked via ipath_m{un}lock\n",
709 (unsigned long long)
710 ipath_stats.sps_pagelocks,
711 (unsigned long long)
712 ipath_stats.sps_pageunlocks);
713
714 ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n",
715 dd->ipath_pageshadow);
716 tmpp = dd->ipath_pageshadow;
717 dd->ipath_pageshadow = NULL;
718 vfree(tmpp);
719
720 dd->ipath_egrtidbase = NULL;
721 }
722
723 /*
724 * free any resources still in use (usually just kernel ports)
725 * at unload; we do for portcnt, because that's what we allocate.
726 * We acquire lock to be really paranoid that ipath_pd isn't being
727 * accessed from some interrupt-related code (that should not happen,
728 * but best to be sure).
729 */
730 spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
731 tmp = dd->ipath_pd;
732 dd->ipath_pd = NULL;
733 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
734 for (port = 0; port < dd->ipath_portcnt; port++) {
735 struct ipath_portdata *pd = tmp[port];
736 tmp[port] = NULL; /* debugging paranoia */
737 ipath_free_pddata(dd, pd);
738 }
739 kfree(tmp);
740}
741
742static void ipath_remove_one(struct pci_dev *pdev)
743{
744 struct ipath_devdata *dd = pci_get_drvdata(pdev);
745
746 ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd);
747
748 /*
749 * disable the IB link early, to be sure no new packets arrive, which
750 * complicates the shutdown process
751 */
752 ipath_shutdown_device(dd);
753
754 flush_workqueue(ib_wq);
755
756 if (dd->verbs_dev)
757 ipath_unregister_ib_device(dd->verbs_dev);
758
759 ipath_diag_remove(dd);
760 ipath_user_remove(dd);
761 ipathfs_remove_device(dd);
762 ipath_device_remove_group(&pdev->dev, dd);
763
764 ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
765 "unit %u\n", dd, (u32) dd->ipath_unit);
766
767 cleanup_device(dd);
768
769 /*
770 * turn off rcv, send, and interrupts for all ports, all drivers
771 * should also hard reset the chip here?
772 * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs
773 * for all versions of the driver, if they were allocated
774 */
775 if (dd->ipath_irq) {
776 ipath_cdbg(VERBOSE, "unit %u free irq %d\n",
777 dd->ipath_unit, dd->ipath_irq);
778 dd->ipath_f_free_irq(dd);
779 } else
780 ipath_dbg("irq is 0, not doing free_irq "
781 "for unit %u\n", dd->ipath_unit);
782 /*
783 * we check for NULL here, because it's outside
784 * the kregbase check, and we need to call it
785 * after the free_irq. Thus it's possible that
786 * the function pointers were never initialized.
787 */
788 if (dd->ipath_f_cleanup)
789 /* clean up chip-specific stuff */
790 dd->ipath_f_cleanup(dd);
791
792 ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n", dd->ipath_kregbase);
793 iounmap((volatile void __iomem *) dd->ipath_kregbase);
794 pci_release_regions(pdev);
795 ipath_cdbg(VERBOSE, "calling pci_disable_device\n");
796 pci_disable_device(pdev);
797
798 ipath_free_devdata(pdev, dd);
799}
800
801/* general driver use */
802DEFINE_MUTEX(ipath_mutex);
803
804static DEFINE_SPINLOCK(ipath_pioavail_lock);
805
806/**
807 * ipath_disarm_piobufs - cancel a range of PIO buffers
808 * @dd: the infinipath device
809 * @first: the first PIO buffer to cancel
810 * @cnt: the number of PIO buffers to cancel
811 *
812 * cancel a range of PIO buffers, used when they might be armed, but
813 * not triggered. Used at init to ensure buffer state, and also user
814 * process close, in case it died while writing to a PIO buffer
815 * Also after errors.
816 */
817void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
818 unsigned cnt)
819{
820 unsigned i, last = first + cnt;
821 unsigned long flags;
822
823 ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
824 for (i = first; i < last; i++) {
825 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
826 /*
827 * The disarm-related bits are write-only, so it
828 * is ok to OR them in with our copy of sendctrl
829 * while we hold the lock.
830 */
831 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
832 dd->ipath_sendctrl | INFINIPATH_S_DISARM |
833 (i << INFINIPATH_S_DISARMPIOBUF_SHIFT));
834 /* can't disarm bufs back-to-back per iba7220 spec */
835 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
836 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
837 }
838 /* on some older chips, update may not happen after cancel */
839 ipath_force_pio_avail_update(dd);
840}
841
842/**
843 * ipath_wait_linkstate - wait for an IB link state change to occur
844 * @dd: the infinipath device
845 * @state: the state to wait for
846 * @msecs: the number of milliseconds to wait
847 *
848 * wait up to msecs milliseconds for IB link state change to occur for
849 * now, take the easy polling route. Currently used only by
850 * ipath_set_linkstate. Returns 0 if state reached, otherwise
851 * -ETIMEDOUT state can have multiple states set, for any of several
852 * transitions.
853 */
854int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
855{
856 dd->ipath_state_wanted = state;
857 wait_event_interruptible_timeout(ipath_state_wait,
858 (dd->ipath_flags & state),
859 msecs_to_jiffies(msecs));
860 dd->ipath_state_wanted = 0;
861
862 if (!(dd->ipath_flags & state)) {
863 u64 val;
864 ipath_cdbg(VERBOSE, "Didn't reach linkstate %s within %u"
865 " ms\n",
866 /* test INIT ahead of DOWN, both can be set */
867 (state & IPATH_LINKINIT) ? "INIT" :
868 ((state & IPATH_LINKDOWN) ? "DOWN" :
869 ((state & IPATH_LINKARMED) ? "ARM" : "ACTIVE")),
870 msecs);
871 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
872 ipath_cdbg(VERBOSE, "ibcc=%llx ibcstatus=%llx (%s)\n",
873 (unsigned long long) ipath_read_kreg64(
874 dd, dd->ipath_kregs->kr_ibcctrl),
875 (unsigned long long) val,
876 ipath_ibcstatus_str[val & dd->ibcs_lts_mask]);
877 }
878 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
879}
880
881static void decode_sdma_errs(struct ipath_devdata *dd, ipath_err_t err,
882 char *buf, size_t blen)
883{
884 static const struct {
885 ipath_err_t err;
886 const char *msg;
887 } errs[] = {
888 { INFINIPATH_E_SDMAGENMISMATCH, "SDmaGenMismatch" },
889 { INFINIPATH_E_SDMAOUTOFBOUND, "SDmaOutOfBound" },
890 { INFINIPATH_E_SDMATAILOUTOFBOUND, "SDmaTailOutOfBound" },
891 { INFINIPATH_E_SDMABASE, "SDmaBase" },
892 { INFINIPATH_E_SDMA1STDESC, "SDma1stDesc" },
893 { INFINIPATH_E_SDMARPYTAG, "SDmaRpyTag" },
894 { INFINIPATH_E_SDMADWEN, "SDmaDwEn" },
895 { INFINIPATH_E_SDMAMISSINGDW, "SDmaMissingDw" },
896 { INFINIPATH_E_SDMAUNEXPDATA, "SDmaUnexpData" },
897 { INFINIPATH_E_SDMADESCADDRMISALIGN, "SDmaDescAddrMisalign" },
898 { INFINIPATH_E_SENDBUFMISUSE, "SendBufMisuse" },
899 { INFINIPATH_E_SDMADISABLED, "SDmaDisabled" },
900 };
901 int i;
902 int expected;
903 size_t bidx = 0;
904
905 for (i = 0; i < ARRAY_SIZE(errs); i++) {
906 expected = (errs[i].err != INFINIPATH_E_SDMADISABLED) ? 0 :
907 test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
908 if ((err & errs[i].err) && !expected)
909 bidx += snprintf(buf + bidx, blen - bidx,
910 "%s ", errs[i].msg);
911 }
912}
913
914/*
915 * Decode the error status into strings, deciding whether to always
916 * print * it or not depending on "normal packet errors" vs everything
917 * else. Return 1 if "real" errors, otherwise 0 if only packet
918 * errors, so caller can decide what to print with the string.
919 */
920int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
921 ipath_err_t err)
922{
923 int iserr = 1;
924 *buf = '\0';
925 if (err & INFINIPATH_E_PKTERRS) {
926 if (!(err & ~INFINIPATH_E_PKTERRS))
927 iserr = 0; // if only packet errors.
928 if (ipath_debug & __IPATH_ERRPKTDBG) {
929 if (err & INFINIPATH_E_REBP)
930 strlcat(buf, "EBP ", blen);
931 if (err & INFINIPATH_E_RVCRC)
932 strlcat(buf, "VCRC ", blen);
933 if (err & INFINIPATH_E_RICRC) {
934 strlcat(buf, "CRC ", blen);
935 // clear for check below, so only once
936 err &= INFINIPATH_E_RICRC;
937 }
938 if (err & INFINIPATH_E_RSHORTPKTLEN)
939 strlcat(buf, "rshortpktlen ", blen);
940 if (err & INFINIPATH_E_SDROPPEDDATAPKT)
941 strlcat(buf, "sdroppeddatapkt ", blen);
942 if (err & INFINIPATH_E_SPKTLEN)
943 strlcat(buf, "spktlen ", blen);
944 }
945 if ((err & INFINIPATH_E_RICRC) &&
946 !(err&(INFINIPATH_E_RVCRC|INFINIPATH_E_REBP)))
947 strlcat(buf, "CRC ", blen);
948 if (!iserr)
949 goto done;
950 }
951 if (err & INFINIPATH_E_RHDRLEN)
952 strlcat(buf, "rhdrlen ", blen);
953 if (err & INFINIPATH_E_RBADTID)
954 strlcat(buf, "rbadtid ", blen);
955 if (err & INFINIPATH_E_RBADVERSION)
956 strlcat(buf, "rbadversion ", blen);
957 if (err & INFINIPATH_E_RHDR)
958 strlcat(buf, "rhdr ", blen);
959 if (err & INFINIPATH_E_SENDSPECIALTRIGGER)
960 strlcat(buf, "sendspecialtrigger ", blen);
961 if (err & INFINIPATH_E_RLONGPKTLEN)
962 strlcat(buf, "rlongpktlen ", blen);
963 if (err & INFINIPATH_E_RMAXPKTLEN)
964 strlcat(buf, "rmaxpktlen ", blen);
965 if (err & INFINIPATH_E_RMINPKTLEN)
966 strlcat(buf, "rminpktlen ", blen);
967 if (err & INFINIPATH_E_SMINPKTLEN)
968 strlcat(buf, "sminpktlen ", blen);
969 if (err & INFINIPATH_E_RFORMATERR)
970 strlcat(buf, "rformaterr ", blen);
971 if (err & INFINIPATH_E_RUNSUPVL)
972 strlcat(buf, "runsupvl ", blen);
973 if (err & INFINIPATH_E_RUNEXPCHAR)
974 strlcat(buf, "runexpchar ", blen);
975 if (err & INFINIPATH_E_RIBFLOW)
976 strlcat(buf, "ribflow ", blen);
977 if (err & INFINIPATH_E_SUNDERRUN)
978 strlcat(buf, "sunderrun ", blen);
979 if (err & INFINIPATH_E_SPIOARMLAUNCH)
980 strlcat(buf, "spioarmlaunch ", blen);
981 if (err & INFINIPATH_E_SUNEXPERRPKTNUM)
982 strlcat(buf, "sunexperrpktnum ", blen);
983 if (err & INFINIPATH_E_SDROPPEDSMPPKT)
984 strlcat(buf, "sdroppedsmppkt ", blen);
985 if (err & INFINIPATH_E_SMAXPKTLEN)
986 strlcat(buf, "smaxpktlen ", blen);
987 if (err & INFINIPATH_E_SUNSUPVL)
988 strlcat(buf, "sunsupVL ", blen);
989 if (err & INFINIPATH_E_INVALIDADDR)
990 strlcat(buf, "invalidaddr ", blen);
991 if (err & INFINIPATH_E_RRCVEGRFULL)
992 strlcat(buf, "rcvegrfull ", blen);
993 if (err & INFINIPATH_E_RRCVHDRFULL)
994 strlcat(buf, "rcvhdrfull ", blen);
995 if (err & INFINIPATH_E_IBSTATUSCHANGED)
996 strlcat(buf, "ibcstatuschg ", blen);
997 if (err & INFINIPATH_E_RIBLOSTLINK)
998 strlcat(buf, "riblostlink ", blen);
999 if (err & INFINIPATH_E_HARDWARE)
1000 strlcat(buf, "hardware ", blen);
1001 if (err & INFINIPATH_E_RESET)
1002 strlcat(buf, "reset ", blen);
1003 if (err & INFINIPATH_E_SDMAERRS)
1004 decode_sdma_errs(dd, err, buf, blen);
1005 if (err & INFINIPATH_E_INVALIDEEPCMD)
1006 strlcat(buf, "invalideepromcmd ", blen);
1007done:
1008 return iserr;
1009}
1010
1011/**
1012 * get_rhf_errstring - decode RHF errors
1013 * @err: the err number
1014 * @msg: the output buffer
1015 * @len: the length of the output buffer
1016 *
1017 * only used one place now, may want more later
1018 */
1019static void get_rhf_errstring(u32 err, char *msg, size_t len)
1020{
1021 /* if no errors, and so don't need to check what's first */
1022 *msg = '\0';
1023
1024 if (err & INFINIPATH_RHF_H_ICRCERR)
1025 strlcat(msg, "icrcerr ", len);
1026 if (err & INFINIPATH_RHF_H_VCRCERR)
1027 strlcat(msg, "vcrcerr ", len);
1028 if (err & INFINIPATH_RHF_H_PARITYERR)
1029 strlcat(msg, "parityerr ", len);
1030 if (err & INFINIPATH_RHF_H_LENERR)
1031 strlcat(msg, "lenerr ", len);
1032 if (err & INFINIPATH_RHF_H_MTUERR)
1033 strlcat(msg, "mtuerr ", len);
1034 if (err & INFINIPATH_RHF_H_IHDRERR)
1035 /* infinipath hdr checksum error */
1036 strlcat(msg, "ipathhdrerr ", len);
1037 if (err & INFINIPATH_RHF_H_TIDERR)
1038 strlcat(msg, "tiderr ", len);
1039 if (err & INFINIPATH_RHF_H_MKERR)
1040 /* bad port, offset, etc. */
1041 strlcat(msg, "invalid ipathhdr ", len);
1042 if (err & INFINIPATH_RHF_H_IBERR)
1043 strlcat(msg, "iberr ", len);
1044 if (err & INFINIPATH_RHF_L_SWA)
1045 strlcat(msg, "swA ", len);
1046 if (err & INFINIPATH_RHF_L_SWB)
1047 strlcat(msg, "swB ", len);
1048}
1049
1050/**
1051 * ipath_get_egrbuf - get an eager buffer
1052 * @dd: the infinipath device
1053 * @bufnum: the eager buffer to get
1054 *
1055 * must only be called if ipath_pd[port] is known to be allocated
1056 */
1057static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum)
1058{
1059 return dd->ipath_port0_skbinfo ?
1060 (void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL;
1061}
1062
1063/**
1064 * ipath_alloc_skb - allocate an skb and buffer with possible constraints
1065 * @dd: the infinipath device
1066 * @gfp_mask: the sk_buff SFP mask
1067 */
1068struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
1069 gfp_t gfp_mask)
1070{
1071 struct sk_buff *skb;
1072 u32 len;
1073
1074 /*
1075 * Only fully supported way to handle this is to allocate lots
1076 * extra, align as needed, and then do skb_reserve(). That wastes
1077 * a lot of memory... I'll have to hack this into infinipath_copy
1078 * also.
1079 */
1080
1081 /*
1082 * We need 2 extra bytes for ipath_ether data sent in the
1083 * key header. In order to keep everything dword aligned,
1084 * we'll reserve 4 bytes.
1085 */
1086 len = dd->ipath_ibmaxlen + 4;
1087
1088 if (dd->ipath_flags & IPATH_4BYTE_TID) {
1089 /* We need a 2KB multiple alignment, and there is no way
1090 * to do it except to allocate extra and then skb_reserve
1091 * enough to bring it up to the right alignment.
1092 */
1093 len += 2047;
1094 }
1095
1096 skb = __dev_alloc_skb(len, gfp_mask);
1097 if (!skb) {
1098 ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
1099 len);
1100 goto bail;
1101 }
1102
1103 skb_reserve(skb, 4);
1104
1105 if (dd->ipath_flags & IPATH_4BYTE_TID) {
1106 u32 una = (unsigned long)skb->data & 2047;
1107 if (una)
1108 skb_reserve(skb, 2048 - una);
1109 }
1110
1111bail:
1112 return skb;
1113}
1114
1115static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
1116 u32 eflags,
1117 u32 l,
1118 u32 etail,
1119 __le32 *rhf_addr,
1120 struct ipath_message_header *hdr)
1121{
1122 char emsg[128];
1123
1124 get_rhf_errstring(eflags, emsg, sizeof emsg);
1125 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
1126 "tlen=%x opcode=%x egridx=%x: %s\n",
1127 eflags, l,
1128 ipath_hdrget_rcv_type(rhf_addr),
1129 ipath_hdrget_length_in_bytes(rhf_addr),
1130 be32_to_cpu(hdr->bth[0]) >> 24,
1131 etail, emsg);
1132
1133 /* Count local link integrity errors. */
1134 if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) {
1135 u8 n = (dd->ipath_ibcctrl >>
1136 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1137 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1138
1139 if (++dd->ipath_lli_counter > n) {
1140 dd->ipath_lli_counter = 0;
1141 dd->ipath_lli_errors++;
1142 }
1143 }
1144}
1145
1146/*
1147 * ipath_kreceive - receive a packet
1148 * @pd: the infinipath port
1149 *
1150 * called from interrupt handler for errors or receive interrupt
1151 */
1152void ipath_kreceive(struct ipath_portdata *pd)
1153{
1154 struct ipath_devdata *dd = pd->port_dd;
1155 __le32 *rhf_addr;
1156 void *ebuf;
1157 const u32 rsize = dd->ipath_rcvhdrentsize; /* words */
1158 const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
1159 u32 etail = -1, l, hdrqtail;
1160 struct ipath_message_header *hdr;
1161 u32 eflags, i, etype, tlen, pkttot = 0, updegr = 0, reloop = 0;
1162 static u64 totcalls; /* stats, may eventually remove */
1163 int last;
1164
1165 l = pd->port_head;
1166 rhf_addr = (__le32 *) pd->port_rcvhdrq + l + dd->ipath_rhf_offset;
1167 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1168 u32 seq = ipath_hdrget_seq(rhf_addr);
1169
1170 if (seq != pd->port_seq_cnt)
1171 goto bail;
1172 hdrqtail = 0;
1173 } else {
1174 hdrqtail = ipath_get_rcvhdrtail(pd);
1175 if (l == hdrqtail)
1176 goto bail;
1177 smp_rmb();
1178 }
1179
1180reloop:
1181 for (last = 0, i = 1; !last; i += !last) {
1182 hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
1183 eflags = ipath_hdrget_err_flags(rhf_addr);
1184 etype = ipath_hdrget_rcv_type(rhf_addr);
1185 /* total length */
1186 tlen = ipath_hdrget_length_in_bytes(rhf_addr);
1187 ebuf = NULL;
1188 if ((dd->ipath_flags & IPATH_NODMA_RTAIL) ?
1189 ipath_hdrget_use_egr_buf(rhf_addr) :
1190 (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
1191 /*
1192 * It turns out that the chip uses an eager buffer
1193 * for all non-expected packets, whether it "needs"
1194 * one or not. So always get the index, but don't
1195 * set ebuf (so we try to copy data) unless the
1196 * length requires it.
1197 */
1198 etail = ipath_hdrget_index(rhf_addr);
1199 updegr = 1;
1200 if (tlen > sizeof(*hdr) ||
1201 etype == RCVHQ_RCV_TYPE_NON_KD)
1202 ebuf = ipath_get_egrbuf(dd, etail);
1203 }
1204
1205 /*
1206 * both tiderr and ipathhdrerr are set for all plain IB
1207 * packets; only ipathhdrerr should be set.
1208 */
1209
1210 if (etype != RCVHQ_RCV_TYPE_NON_KD &&
1211 etype != RCVHQ_RCV_TYPE_ERROR &&
1212 ipath_hdrget_ipath_ver(hdr->iph.ver_port_tid_offset) !=
1213 IPS_PROTO_VERSION)
1214 ipath_cdbg(PKT, "Bad InfiniPath protocol version "
1215 "%x\n", etype);
1216
1217 if (unlikely(eflags))
1218 ipath_rcv_hdrerr(dd, eflags, l, etail, rhf_addr, hdr);
1219 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
1220 ipath_ib_rcv(dd->verbs_dev, (u32 *)hdr, ebuf, tlen);
1221 if (dd->ipath_lli_counter)
1222 dd->ipath_lli_counter--;
1223 } else if (etype == RCVHQ_RCV_TYPE_EAGER) {
1224 u8 opcode = be32_to_cpu(hdr->bth[0]) >> 24;
1225 u32 qp = be32_to_cpu(hdr->bth[1]) & 0xffffff;
1226 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
1227 "qp=%x), len %x; ignored\n",
1228 etype, opcode, qp, tlen);
1229 } else if (etype == RCVHQ_RCV_TYPE_EXPECTED) {
1230 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
1231 be32_to_cpu(hdr->bth[0]) >> 24);
1232 } else {
1233 /*
1234 * error packet, type of error unknown.
1235 * Probably type 3, but we don't know, so don't
1236 * even try to print the opcode, etc.
1237 * Usually caused by a "bad packet", that has no
1238 * BTH, when the LRH says it should.
1239 */
1240 ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf"
1241 " %x, len %x hdrq+%x rhf: %Lx\n",
1242 etail, tlen, l, (unsigned long long)
1243 le64_to_cpu(*(__le64 *) rhf_addr));
1244 if (ipath_debug & __IPATH_ERRPKTDBG) {
1245 u32 j, *d, dw = rsize-2;
1246 if (rsize > (tlen>>2))
1247 dw = tlen>>2;
1248 d = (u32 *)hdr;
1249 printk(KERN_DEBUG "EPkt rcvhdr(%x dw):\n",
1250 dw);
1251 for (j = 0; j < dw; j++)
1252 printk(KERN_DEBUG "%8x%s", d[j],
1253 (j%8) == 7 ? "\n" : " ");
1254 printk(KERN_DEBUG ".\n");
1255 }
1256 }
1257 l += rsize;
1258 if (l >= maxcnt)
1259 l = 0;
1260 rhf_addr = (__le32 *) pd->port_rcvhdrq +
1261 l + dd->ipath_rhf_offset;
1262 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1263 u32 seq = ipath_hdrget_seq(rhf_addr);
1264
1265 if (++pd->port_seq_cnt > 13)
1266 pd->port_seq_cnt = 1;
1267 if (seq != pd->port_seq_cnt)
1268 last = 1;
1269 } else if (l == hdrqtail) {
1270 last = 1;
1271 }
1272 /*
1273 * update head regs on last packet, and every 16 packets.
1274 * Reduce bus traffic, while still trying to prevent
1275 * rcvhdrq overflows, for when the queue is nearly full
1276 */
1277 if (last || !(i & 0xf)) {
1278 u64 lval = l;
1279
1280 /* request IBA6120 and 7220 interrupt only on last */
1281 if (last)
1282 lval |= dd->ipath_rhdrhead_intr_off;
1283 ipath_write_ureg(dd, ur_rcvhdrhead, lval,
1284 pd->port_port);
1285 if (updegr) {
1286 ipath_write_ureg(dd, ur_rcvegrindexhead,
1287 etail, pd->port_port);
1288 updegr = 0;
1289 }
1290 }
1291 }
1292
1293 if (!dd->ipath_rhdrhead_intr_off && !reloop &&
1294 !(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1295 /* IBA6110 workaround; we can have a race clearing chip
1296 * interrupt with another interrupt about to be delivered,
1297 * and can clear it before it is delivered on the GPIO
1298 * workaround. By doing the extra check here for the
1299 * in-memory tail register updating while we were doing
1300 * earlier packets, we "almost" guarantee we have covered
1301 * that case.
1302 */
1303 u32 hqtail = ipath_get_rcvhdrtail(pd);
1304 if (hqtail != hdrqtail) {
1305 hdrqtail = hqtail;
1306 reloop = 1; /* loop 1 extra time at most */
1307 goto reloop;
1308 }
1309 }
1310
1311 pkttot += i;
1312
1313 pd->port_head = l;
1314
1315 if (pkttot > ipath_stats.sps_maxpkts_call)
1316 ipath_stats.sps_maxpkts_call = pkttot;
1317 ipath_stats.sps_port0pkts += pkttot;
1318 ipath_stats.sps_avgpkts_call =
1319 ipath_stats.sps_port0pkts / ++totcalls;
1320
1321bail:;
1322}
1323
1324/**
1325 * ipath_update_pio_bufs - update shadow copy of the PIO availability map
1326 * @dd: the infinipath device
1327 *
1328 * called whenever our local copy indicates we have run out of send buffers
1329 * NOTE: This can be called from interrupt context by some code
1330 * and from non-interrupt context by ipath_getpiobuf().
1331 */
1332
1333static void ipath_update_pio_bufs(struct ipath_devdata *dd)
1334{
1335 unsigned long flags;
1336 int i;
1337 const unsigned piobregs = (unsigned)dd->ipath_pioavregs;
1338
1339 /* If the generation (check) bits have changed, then we update the
1340 * busy bit for the corresponding PIO buffer. This algorithm will
1341 * modify positions to the value they already have in some cases
1342 * (i.e., no change), but it's faster than changing only the bits
1343 * that have changed.
1344 *
1345 * We would like to do this atomicly, to avoid spinlocks in the
1346 * critical send path, but that's not really possible, given the
1347 * type of changes, and that this routine could be called on
1348 * multiple cpu's simultaneously, so we lock in this routine only,
1349 * to avoid conflicting updates; all we change is the shadow, and
1350 * it's a single 64 bit memory location, so by definition the update
1351 * is atomic in terms of what other cpu's can see in testing the
1352 * bits. The spin_lock overhead isn't too bad, since it only
1353 * happens when all buffers are in use, so only cpu overhead, not
1354 * latency or bandwidth is affected.
1355 */
1356 if (!dd->ipath_pioavailregs_dma) {
1357 ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
1358 return;
1359 }
1360 if (ipath_debug & __IPATH_VERBDBG) {
1361 /* only if packet debug and verbose */
1362 volatile __le64 *dma = dd->ipath_pioavailregs_dma;
1363 unsigned long *shadow = dd->ipath_pioavailshadow;
1364
1365 ipath_cdbg(PKT, "Refill avail, dma0=%llx shad0=%lx, "
1366 "d1=%llx s1=%lx, d2=%llx s2=%lx, d3=%llx "
1367 "s3=%lx\n",
1368 (unsigned long long) le64_to_cpu(dma[0]),
1369 shadow[0],
1370 (unsigned long long) le64_to_cpu(dma[1]),
1371 shadow[1],
1372 (unsigned long long) le64_to_cpu(dma[2]),
1373 shadow[2],
1374 (unsigned long long) le64_to_cpu(dma[3]),
1375 shadow[3]);
1376 if (piobregs > 4)
1377 ipath_cdbg(
1378 PKT, "2nd group, dma4=%llx shad4=%lx, "
1379 "d5=%llx s5=%lx, d6=%llx s6=%lx, "
1380 "d7=%llx s7=%lx\n",
1381 (unsigned long long) le64_to_cpu(dma[4]),
1382 shadow[4],
1383 (unsigned long long) le64_to_cpu(dma[5]),
1384 shadow[5],
1385 (unsigned long long) le64_to_cpu(dma[6]),
1386 shadow[6],
1387 (unsigned long long) le64_to_cpu(dma[7]),
1388 shadow[7]);
1389 }
1390 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1391 for (i = 0; i < piobregs; i++) {
1392 u64 pchbusy, pchg, piov, pnew;
1393 /*
1394 * Chip Errata: bug 6641; even and odd qwords>3 are swapped
1395 */
1396 if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
1397 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
1398 else
1399 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
1400 pchg = dd->ipath_pioavailkernel[i] &
1401 ~(dd->ipath_pioavailshadow[i] ^ piov);
1402 pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
1403 if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
1404 pnew = dd->ipath_pioavailshadow[i] & ~pchbusy;
1405 pnew |= piov & pchbusy;
1406 dd->ipath_pioavailshadow[i] = pnew;
1407 }
1408 }
1409 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1410}
1411
1412/*
1413 * used to force update of pioavailshadow if we can't get a pio buffer.
1414 * Needed primarily due to exitting freeze mode after recovering
1415 * from errors. Done lazily, because it's safer (known to not
1416 * be writing pio buffers).
1417 */
1418static void ipath_reset_availshadow(struct ipath_devdata *dd)
1419{
1420 int i, im;
1421 unsigned long flags;
1422
1423 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1424 for (i = 0; i < dd->ipath_pioavregs; i++) {
1425 u64 val, oldval;
1426 /* deal with 6110 chip bug on high register #s */
1427 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
1428 i ^ 1 : i;
1429 val = le64_to_cpu(dd->ipath_pioavailregs_dma[im]);
1430 /*
1431 * busy out the buffers not in the kernel avail list,
1432 * without changing the generation bits.
1433 */
1434 oldval = dd->ipath_pioavailshadow[i];
1435 dd->ipath_pioavailshadow[i] = val |
1436 ((~dd->ipath_pioavailkernel[i] <<
1437 INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT) &
1438 0xaaaaaaaaaaaaaaaaULL); /* All BUSY bits in qword */
1439 if (oldval != dd->ipath_pioavailshadow[i])
1440 ipath_dbg("shadow[%d] was %Lx, now %lx\n",
1441 i, (unsigned long long) oldval,
1442 dd->ipath_pioavailshadow[i]);
1443 }
1444 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1445}
1446
1447/**
1448 * ipath_setrcvhdrsize - set the receive header size
1449 * @dd: the infinipath device
1450 * @rhdrsize: the receive header size
1451 *
1452 * called from user init code, and also layered driver init
1453 */
1454int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
1455{
1456 int ret = 0;
1457
1458 if (dd->ipath_flags & IPATH_RCVHDRSZ_SET) {
1459 if (dd->ipath_rcvhdrsize != rhdrsize) {
1460 dev_info(&dd->pcidev->dev,
1461 "Error: can't set protocol header "
1462 "size %u, already %u\n",
1463 rhdrsize, dd->ipath_rcvhdrsize);
1464 ret = -EAGAIN;
1465 } else
1466 ipath_cdbg(VERBOSE, "Reuse same protocol header "
1467 "size %u\n", dd->ipath_rcvhdrsize);
1468 } else if (rhdrsize > (dd->ipath_rcvhdrentsize -
1469 (sizeof(u64) / sizeof(u32)))) {
1470 ipath_dbg("Error: can't set protocol header size %u "
1471 "(> max %u)\n", rhdrsize,
1472 dd->ipath_rcvhdrentsize -
1473 (u32) (sizeof(u64) / sizeof(u32)));
1474 ret = -EOVERFLOW;
1475 } else {
1476 dd->ipath_flags |= IPATH_RCVHDRSZ_SET;
1477 dd->ipath_rcvhdrsize = rhdrsize;
1478 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
1479 dd->ipath_rcvhdrsize);
1480 ipath_cdbg(VERBOSE, "Set protocol header size to %u\n",
1481 dd->ipath_rcvhdrsize);
1482 }
1483 return ret;
1484}
1485
1486/*
1487 * debugging code and stats updates if no pio buffers available.
1488 */
1489static noinline void no_pio_bufs(struct ipath_devdata *dd)
1490{
1491 unsigned long *shadow = dd->ipath_pioavailshadow;
1492 __le64 *dma = (__le64 *)dd->ipath_pioavailregs_dma;
1493
1494 dd->ipath_upd_pio_shadow = 1;
1495
1496 /*
1497 * not atomic, but if we lose a stat count in a while, that's OK
1498 */
1499 ipath_stats.sps_nopiobufs++;
1500 if (!(++dd->ipath_consec_nopiobuf % 100000)) {
1501 ipath_force_pio_avail_update(dd); /* at start */
1502 ipath_dbg("%u tries no piobufavail ts%lx; dmacopy: "
1503 "%llx %llx %llx %llx\n"
1504 "ipath shadow: %lx %lx %lx %lx\n",
1505 dd->ipath_consec_nopiobuf,
1506 (unsigned long)get_cycles(),
1507 (unsigned long long) le64_to_cpu(dma[0]),
1508 (unsigned long long) le64_to_cpu(dma[1]),
1509 (unsigned long long) le64_to_cpu(dma[2]),
1510 (unsigned long long) le64_to_cpu(dma[3]),
1511 shadow[0], shadow[1], shadow[2], shadow[3]);
1512 /*
1513 * 4 buffers per byte, 4 registers above, cover rest
1514 * below
1515 */
1516 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
1517 (sizeof(shadow[0]) * 4 * 4))
1518 ipath_dbg("2nd group: dmacopy: "
1519 "%llx %llx %llx %llx\n"
1520 "ipath shadow: %lx %lx %lx %lx\n",
1521 (unsigned long long)le64_to_cpu(dma[4]),
1522 (unsigned long long)le64_to_cpu(dma[5]),
1523 (unsigned long long)le64_to_cpu(dma[6]),
1524 (unsigned long long)le64_to_cpu(dma[7]),
1525 shadow[4], shadow[5], shadow[6], shadow[7]);
1526
1527 /* at end, so update likely happened */
1528 ipath_reset_availshadow(dd);
1529 }
1530}
1531
1532/*
1533 * common code for normal driver pio buffer allocation, and reserved
1534 * allocation.
1535 *
1536 * do appropriate marking as busy, etc.
1537 * returns buffer number if one found (>=0), negative number is error.
1538 */
1539static u32 __iomem *ipath_getpiobuf_range(struct ipath_devdata *dd,
1540 u32 *pbufnum, u32 first, u32 last, u32 firsti)
1541{
1542 int i, j, updated = 0;
1543 unsigned piobcnt;
1544 unsigned long flags;
1545 unsigned long *shadow = dd->ipath_pioavailshadow;
1546 u32 __iomem *buf;
1547
1548 piobcnt = last - first;
1549 if (dd->ipath_upd_pio_shadow) {
1550 /*
1551 * Minor optimization. If we had no buffers on last call,
1552 * start out by doing the update; continue and do scan even
1553 * if no buffers were updated, to be paranoid
1554 */
1555 ipath_update_pio_bufs(dd);
1556 updated++;
1557 i = first;
1558 } else
1559 i = firsti;
1560rescan:
1561 /*
1562 * while test_and_set_bit() is atomic, we do that and then the
1563 * change_bit(), and the pair is not. See if this is the cause
1564 * of the remaining armlaunch errors.
1565 */
1566 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1567 for (j = 0; j < piobcnt; j++, i++) {
1568 if (i >= last)
1569 i = first;
1570 if (__test_and_set_bit((2 * i) + 1, shadow))
1571 continue;
1572 /* flip generation bit */
1573 __change_bit(2 * i, shadow);
1574 break;
1575 }
1576 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1577
1578 if (j == piobcnt) {
1579 if (!updated) {
1580 /*
1581 * first time through; shadow exhausted, but may be
1582 * buffers available, try an update and then rescan.
1583 */
1584 ipath_update_pio_bufs(dd);
1585 updated++;
1586 i = first;
1587 goto rescan;
1588 } else if (updated == 1 && piobcnt <=
1589 ((dd->ipath_sendctrl
1590 >> INFINIPATH_S_UPDTHRESH_SHIFT) &
1591 INFINIPATH_S_UPDTHRESH_MASK)) {
1592 /*
1593 * for chips supporting and using the update
1594 * threshold we need to force an update of the
1595 * in-memory copy if the count is less than the
1596 * thershold, then check one more time.
1597 */
1598 ipath_force_pio_avail_update(dd);
1599 ipath_update_pio_bufs(dd);
1600 updated++;
1601 i = first;
1602 goto rescan;
1603 }
1604
1605 no_pio_bufs(dd);
1606 buf = NULL;
1607 } else {
1608 if (i < dd->ipath_piobcnt2k)
1609 buf = (u32 __iomem *) (dd->ipath_pio2kbase +
1610 i * dd->ipath_palign);
1611 else
1612 buf = (u32 __iomem *)
1613 (dd->ipath_pio4kbase +
1614 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
1615 if (pbufnum)
1616 *pbufnum = i;
1617 }
1618
1619 return buf;
1620}
1621
1622/**
1623 * ipath_getpiobuf - find an available pio buffer
1624 * @dd: the infinipath device
1625 * @plen: the size of the PIO buffer needed in 32-bit words
1626 * @pbufnum: the buffer number is placed here
1627 */
1628u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 plen, u32 *pbufnum)
1629{
1630 u32 __iomem *buf;
1631 u32 pnum, nbufs;
1632 u32 first, lasti;
1633
1634 if (plen + 1 >= IPATH_SMALLBUF_DWORDS) {
1635 first = dd->ipath_piobcnt2k;
1636 lasti = dd->ipath_lastpioindexl;
1637 } else {
1638 first = 0;
1639 lasti = dd->ipath_lastpioindex;
1640 }
1641 nbufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
1642 buf = ipath_getpiobuf_range(dd, &pnum, first, nbufs, lasti);
1643
1644 if (buf) {
1645 /*
1646 * Set next starting place. It's just an optimization,
1647 * it doesn't matter who wins on this, so no locking
1648 */
1649 if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
1650 dd->ipath_lastpioindexl = pnum + 1;
1651 else
1652 dd->ipath_lastpioindex = pnum + 1;
1653 if (dd->ipath_upd_pio_shadow)
1654 dd->ipath_upd_pio_shadow = 0;
1655 if (dd->ipath_consec_nopiobuf)
1656 dd->ipath_consec_nopiobuf = 0;
1657 ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
1658 pnum, (pnum < dd->ipath_piobcnt2k) ? 2 : 4, buf);
1659 if (pbufnum)
1660 *pbufnum = pnum;
1661
1662 }
1663 return buf;
1664}
1665
1666/**
1667 * ipath_chg_pioavailkernel - change which send buffers are available for kernel
1668 * @dd: the infinipath device
1669 * @start: the starting send buffer number
1670 * @len: the number of send buffers
1671 * @avail: true if the buffers are available for kernel use, false otherwise
1672 */
1673void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1674 unsigned len, int avail)
1675{
1676 unsigned long flags;
1677 unsigned end, cnt = 0;
1678
1679 /* There are two bits per send buffer (busy and generation) */
1680 start *= 2;
1681 end = start + len * 2;
1682
1683 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1684 /* Set or clear the busy bit in the shadow. */
1685 while (start < end) {
1686 if (avail) {
1687 unsigned long dma;
1688 int i, im;
1689 /*
1690 * the BUSY bit will never be set, because we disarm
1691 * the user buffers before we hand them back to the
1692 * kernel. We do have to make sure the generation
1693 * bit is set correctly in shadow, since it could
1694 * have changed many times while allocated to user.
1695 * We can't use the bitmap functions on the full
1696 * dma array because it is always little-endian, so
1697 * we have to flip to host-order first.
1698 * BITS_PER_LONG is slightly wrong, since it's
1699 * always 64 bits per register in chip...
1700 * We only work on 64 bit kernels, so that's OK.
1701 */
1702 /* deal with 6110 chip bug on high register #s */
1703 i = start / BITS_PER_LONG;
1704 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
1705 i ^ 1 : i;
1706 __clear_bit(INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT
1707 + start, dd->ipath_pioavailshadow);
1708 dma = (unsigned long) le64_to_cpu(
1709 dd->ipath_pioavailregs_dma[im]);
1710 if (test_bit((INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1711 + start) % BITS_PER_LONG, &dma))
1712 __set_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1713 + start, dd->ipath_pioavailshadow);
1714 else
1715 __clear_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1716 + start, dd->ipath_pioavailshadow);
1717 __set_bit(start, dd->ipath_pioavailkernel);
1718 } else {
1719 __set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
1720 dd->ipath_pioavailshadow);
1721 __clear_bit(start, dd->ipath_pioavailkernel);
1722 }
1723 start += 2;
1724 }
1725
1726 if (dd->ipath_pioupd_thresh) {
1727 end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1728 cnt = bitmap_weight(dd->ipath_pioavailkernel, end);
1729 }
1730 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1731
1732 /*
1733 * When moving buffers from kernel to user, if number assigned to
1734 * the user is less than the pio update threshold, and threshold
1735 * is supported (cnt was computed > 0), drop the update threshold
1736 * so we update at least once per allocated number of buffers.
1737 * In any case, if the kernel buffers are less than the threshold,
1738 * drop the threshold. We don't bother increasing it, having once
1739 * decreased it, since it would typically just cycle back and forth.
1740 * If we don't decrease below buffers in use, we can wait a long
1741 * time for an update, until some other context uses PIO buffers.
1742 */
1743 if (!avail && len < cnt)
1744 cnt = len;
1745 if (cnt < dd->ipath_pioupd_thresh) {
1746 dd->ipath_pioupd_thresh = cnt;
1747 ipath_dbg("Decreased pio update threshold to %u\n",
1748 dd->ipath_pioupd_thresh);
1749 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1750 dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
1751 << INFINIPATH_S_UPDTHRESH_SHIFT);
1752 dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
1753 << INFINIPATH_S_UPDTHRESH_SHIFT;
1754 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1755 dd->ipath_sendctrl);
1756 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1757 }
1758}
1759
1760/**
1761 * ipath_create_rcvhdrq - create a receive header queue
1762 * @dd: the infinipath device
1763 * @pd: the port data
1764 *
1765 * this must be contiguous memory (from an i/o perspective), and must be
1766 * DMA'able (which means for some systems, it will go through an IOMMU,
1767 * or be forced into a low address range).
1768 */
1769int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1770 struct ipath_portdata *pd)
1771{
1772 int ret = 0;
1773
1774 if (!pd->port_rcvhdrq) {
1775 dma_addr_t phys_hdrqtail;
1776 gfp_t gfp_flags = GFP_USER | __GFP_COMP;
1777 int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1778 sizeof(u32), PAGE_SIZE);
1779
1780 pd->port_rcvhdrq = dma_alloc_coherent(
1781 &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
1782 gfp_flags);
1783
1784 if (!pd->port_rcvhdrq) {
1785 ipath_dev_err(dd, "attempt to allocate %d bytes "
1786 "for port %u rcvhdrq failed\n",
1787 amt, pd->port_port);
1788 ret = -ENOMEM;
1789 goto bail;
1790 }
1791
1792 if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1793 pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
1794 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1795 GFP_KERNEL);
1796 if (!pd->port_rcvhdrtail_kvaddr) {
1797 ipath_dev_err(dd, "attempt to allocate 1 page "
1798 "for port %u rcvhdrqtailaddr "
1799 "failed\n", pd->port_port);
1800 ret = -ENOMEM;
1801 dma_free_coherent(&dd->pcidev->dev, amt,
1802 pd->port_rcvhdrq,
1803 pd->port_rcvhdrq_phys);
1804 pd->port_rcvhdrq = NULL;
1805 goto bail;
1806 }
1807 pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
1808 ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx "
1809 "physical\n", pd->port_port,
1810 (unsigned long long) phys_hdrqtail);
1811 }
1812
1813 pd->port_rcvhdrq_size = amt;
1814
1815 ipath_cdbg(VERBOSE, "%d pages at %p (phys %lx) size=%lu "
1816 "for port %u rcvhdr Q\n",
1817 amt >> PAGE_SHIFT, pd->port_rcvhdrq,
1818 (unsigned long) pd->port_rcvhdrq_phys,
1819 (unsigned long) pd->port_rcvhdrq_size,
1820 pd->port_port);
1821 } else {
1822 ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
1823 "hdrtailaddr@%p %llx physical\n",
1824 pd->port_port, pd->port_rcvhdrq,
1825 (unsigned long long) pd->port_rcvhdrq_phys,
1826 pd->port_rcvhdrtail_kvaddr, (unsigned long long)
1827 pd->port_rcvhdrqtailaddr_phys);
1828 }
1829 /* clear for security and sanity on each use */
1830 memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
1831 if (pd->port_rcvhdrtail_kvaddr)
1832 memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
1833
1834 /*
1835 * tell chip each time we init it, even if we are re-using previous
1836 * memory (we zero the register at process close)
1837 */
1838 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
1839 pd->port_port, pd->port_rcvhdrqtailaddr_phys);
1840 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
1841 pd->port_port, pd->port_rcvhdrq_phys);
1842
1843bail:
1844 return ret;
1845}
1846
1847
1848/*
1849 * Flush all sends that might be in the ready to send state, as well as any
1850 * that are in the process of being sent. Used whenever we need to be
1851 * sure the send side is idle. Cleans up all buffer state by canceling
1852 * all pio buffers, and issuing an abort, which cleans up anything in the
1853 * launch fifo. The cancel is superfluous on some chip versions, but
1854 * it's safer to always do it.
1855 * PIOAvail bits are updated by the chip as if normal send had happened.
1856 */
1857void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
1858{
1859 unsigned long flags;
1860
1861 if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
1862 ipath_cdbg(VERBOSE, "Ignore while in autonegotiation\n");
1863 goto bail;
1864 }
1865 /*
1866 * If we have SDMA, and it's not disabled, we have to kick off the
1867 * abort state machine, provided we aren't already aborting.
1868 * If we are in the process of aborting SDMA (!DISABLED, but ABORTING),
1869 * we skip the rest of this routine. It is already "in progress"
1870 */
1871 if (dd->ipath_flags & IPATH_HAS_SEND_DMA) {
1872 int skip_cancel;
1873 unsigned long *statp = &dd->ipath_sdma_status;
1874
1875 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1876 skip_cancel =
1877 test_and_set_bit(IPATH_SDMA_ABORTING, statp)
1878 && !test_bit(IPATH_SDMA_DISABLED, statp);
1879 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1880 if (skip_cancel)
1881 goto bail;
1882 }
1883
1884 ipath_dbg("Cancelling all in-progress send buffers\n");
1885
1886 /* skip armlaunch errs for a while */
1887 dd->ipath_lastcancel = jiffies + HZ / 2;
1888
1889 /*
1890 * The abort bit is auto-clearing. We also don't want pioavail
1891 * update happening during this, and we don't want any other
1892 * sends going out, so turn those off for the duration. We read
1893 * the scratch register to be sure that cancels and the abort
1894 * have taken effect in the chip. Otherwise two parts are same
1895 * as ipath_force_pio_avail_update()
1896 */
1897 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1898 dd->ipath_sendctrl &= ~(INFINIPATH_S_PIOBUFAVAILUPD
1899 | INFINIPATH_S_PIOENABLE);
1900 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1901 dd->ipath_sendctrl | INFINIPATH_S_ABORT);
1902 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1903 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1904
1905 /* disarm all send buffers */
1906 ipath_disarm_piobufs(dd, 0,
1907 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1908
1909 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
1910 set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
1911
1912 if (restore_sendctrl) {
1913 /* else done by caller later if needed */
1914 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1915 dd->ipath_sendctrl |= INFINIPATH_S_PIOBUFAVAILUPD |
1916 INFINIPATH_S_PIOENABLE;
1917 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1918 dd->ipath_sendctrl);
1919 /* and again, be sure all have hit the chip */
1920 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1921 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1922 }
1923
1924 if ((dd->ipath_flags & IPATH_HAS_SEND_DMA) &&
1925 !test_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status) &&
1926 test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)) {
1927 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1928 /* only wait so long for intr */
1929 dd->ipath_sdma_abort_intr_timeout = jiffies + HZ;
1930 dd->ipath_sdma_reset_wait = 200;
1931 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
1932 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
1933 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1934 }
1935bail:;
1936}
1937
1938/*
1939 * Force an update of in-memory copy of the pioavail registers, when
1940 * needed for any of a variety of reasons. We read the scratch register
1941 * to make it highly likely that the update will have happened by the
1942 * time we return. If already off (as in cancel_sends above), this
1943 * routine is a nop, on the assumption that the caller will "do the
1944 * right thing".
1945 */
1946void ipath_force_pio_avail_update(struct ipath_devdata *dd)
1947{
1948 unsigned long flags;
1949
1950 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1951 if (dd->ipath_sendctrl & INFINIPATH_S_PIOBUFAVAILUPD) {
1952 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1953 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
1954 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1955 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1956 dd->ipath_sendctrl);
1957 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1958 }
1959 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1960}
1961
1962static void ipath_set_ib_lstate(struct ipath_devdata *dd, int linkcmd,
1963 int linitcmd)
1964{
1965 u64 mod_wd;
1966 static const char *what[4] = {
1967 [0] = "NOP",
1968 [INFINIPATH_IBCC_LINKCMD_DOWN] = "DOWN",
1969 [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
1970 [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
1971 };
1972
1973 if (linitcmd == INFINIPATH_IBCC_LINKINITCMD_DISABLE) {
1974 /*
1975 * If we are told to disable, note that so link-recovery
1976 * code does not attempt to bring us back up.
1977 */
1978 preempt_disable();
1979 dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
1980 preempt_enable();
1981 } else if (linitcmd) {
1982 /*
1983 * Any other linkinitcmd will lead to LINKDOWN and then
1984 * to INIT (if all is well), so clear flag to let
1985 * link-recovery code attempt to bring us back up.
1986 */
1987 preempt_disable();
1988 dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
1989 preempt_enable();
1990 }
1991
1992 mod_wd = (linkcmd << dd->ibcc_lc_shift) |
1993 (linitcmd << INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1994 ipath_cdbg(VERBOSE,
1995 "Moving unit %u to %s (initcmd=0x%x), current ltstate is %s\n",
1996 dd->ipath_unit, what[linkcmd], linitcmd,
1997 ipath_ibcstatus_str[ipath_ib_linktrstate(dd,
1998 ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus))]);
1999
2000 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2001 dd->ipath_ibcctrl | mod_wd);
2002 /* read from chip so write is flushed */
2003 (void) ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
2004}
2005
2006int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
2007{
2008 u32 lstate;
2009 int ret;
2010
2011 switch (newstate) {
2012 case IPATH_IB_LINKDOWN_ONLY:
2013 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN, 0);
2014 /* don't wait */
2015 ret = 0;
2016 goto bail;
2017
2018 case IPATH_IB_LINKDOWN:
2019 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
2020 INFINIPATH_IBCC_LINKINITCMD_POLL);
2021 /* don't wait */
2022 ret = 0;
2023 goto bail;
2024
2025 case IPATH_IB_LINKDOWN_SLEEP:
2026 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
2027 INFINIPATH_IBCC_LINKINITCMD_SLEEP);
2028 /* don't wait */
2029 ret = 0;
2030 goto bail;
2031
2032 case IPATH_IB_LINKDOWN_DISABLE:
2033 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
2034 INFINIPATH_IBCC_LINKINITCMD_DISABLE);
2035 /* don't wait */
2036 ret = 0;
2037 goto bail;
2038
2039 case IPATH_IB_LINKARM:
2040 if (dd->ipath_flags & IPATH_LINKARMED) {
2041 ret = 0;
2042 goto bail;
2043 }
2044 if (!(dd->ipath_flags &
2045 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
2046 ret = -EINVAL;
2047 goto bail;
2048 }
2049 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED, 0);
2050
2051 /*
2052 * Since the port can transition to ACTIVE by receiving
2053 * a non VL 15 packet, wait for either state.
2054 */
2055 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
2056 break;
2057
2058 case IPATH_IB_LINKACTIVE:
2059 if (dd->ipath_flags & IPATH_LINKACTIVE) {
2060 ret = 0;
2061 goto bail;
2062 }
2063 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
2064 ret = -EINVAL;
2065 goto bail;
2066 }
2067 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE, 0);
2068 lstate = IPATH_LINKACTIVE;
2069 break;
2070
2071 case IPATH_IB_LINK_LOOPBACK:
2072 dev_info(&dd->pcidev->dev, "Enabling IB local loopback\n");
2073 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
2074 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2075 dd->ipath_ibcctrl);
2076
2077 /* turn heartbeat off, as it causes loopback to fail */
2078 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2079 IPATH_IB_HRTBT_OFF);
2080 /* don't wait */
2081 ret = 0;
2082 goto bail;
2083
2084 case IPATH_IB_LINK_EXTERNAL:
2085 dev_info(&dd->pcidev->dev,
2086 "Disabling IB local loopback (normal)\n");
2087 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2088 IPATH_IB_HRTBT_ON);
2089 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
2090 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2091 dd->ipath_ibcctrl);
2092 /* don't wait */
2093 ret = 0;
2094 goto bail;
2095
2096 /*
2097 * Heartbeat can be explicitly enabled by the user via
2098 * "hrtbt_enable" "file", and if disabled, trying to enable here
2099 * will have no effect. Implicit changes (heartbeat off when
2100 * loopback on, and vice versa) are included to ease testing.
2101 */
2102 case IPATH_IB_LINK_HRTBT:
2103 ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2104 IPATH_IB_HRTBT_ON);
2105 goto bail;
2106
2107 case IPATH_IB_LINK_NO_HRTBT:
2108 ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2109 IPATH_IB_HRTBT_OFF);
2110 goto bail;
2111
2112 default:
2113 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
2114 ret = -EINVAL;
2115 goto bail;
2116 }
2117 ret = ipath_wait_linkstate(dd, lstate, 2000);
2118
2119bail:
2120 return ret;
2121}
2122
2123/**
2124 * ipath_set_mtu - set the MTU
2125 * @dd: the infinipath device
2126 * @arg: the new MTU
2127 *
2128 * we can handle "any" incoming size, the issue here is whether we
2129 * need to restrict our outgoing size. For now, we don't do any
2130 * sanity checking on this, and we don't deal with what happens to
2131 * programs that are already running when the size changes.
2132 * NOTE: changing the MTU will usually cause the IBC to go back to
2133 * link INIT state...
2134 */
2135int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
2136{
2137 u32 piosize;
2138 int changed = 0;
2139 int ret;
2140
2141 /*
2142 * mtu is IB data payload max. It's the largest power of 2 less
2143 * than piosize (or even larger, since it only really controls the
2144 * largest we can receive; we can send the max of the mtu and
2145 * piosize). We check that it's one of the valid IB sizes.
2146 */
2147 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
2148 (arg != 4096 || !ipath_mtu4096)) {
2149 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
2150 ret = -EINVAL;
2151 goto bail;
2152 }
2153 if (dd->ipath_ibmtu == arg) {
2154 ret = 0; /* same as current */
2155 goto bail;
2156 }
2157
2158 piosize = dd->ipath_ibmaxlen;
2159 dd->ipath_ibmtu = arg;
2160
2161 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
2162 /* Only if it's not the initial value (or reset to it) */
2163 if (piosize != dd->ipath_init_ibmaxlen) {
2164 if (arg > piosize && arg <= dd->ipath_init_ibmaxlen)
2165 piosize = dd->ipath_init_ibmaxlen;
2166 dd->ipath_ibmaxlen = piosize;
2167 changed = 1;
2168 }
2169 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
2170 piosize = arg + IPATH_PIO_MAXIBHDR;
2171 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
2172 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
2173 arg);
2174 dd->ipath_ibmaxlen = piosize;
2175 changed = 1;
2176 }
2177
2178 if (changed) {
2179 u64 ibc = dd->ipath_ibcctrl, ibdw;
2180 /*
2181 * update our housekeeping variables, and set IBC max
2182 * size, same as init code; max IBC is max we allow in
2183 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
2184 */
2185 dd->ipath_ibmaxlen = piosize - 2 * sizeof(u32);
2186 ibdw = (dd->ipath_ibmaxlen >> 2) + 1;
2187 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
2188 dd->ibcc_mpl_shift);
2189 ibc |= ibdw << dd->ibcc_mpl_shift;
2190 dd->ipath_ibcctrl = ibc;
2191 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2192 dd->ipath_ibcctrl);
2193 dd->ipath_f_tidtemplate(dd);
2194 }
2195
2196 ret = 0;
2197
2198bail:
2199 return ret;
2200}
2201
2202int ipath_set_lid(struct ipath_devdata *dd, u32 lid, u8 lmc)
2203{
2204 dd->ipath_lid = lid;
2205 dd->ipath_lmc = lmc;
2206
2207 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LIDLMC, lid |
2208 (~((1U << lmc) - 1)) << 16);
2209
2210 dev_info(&dd->pcidev->dev, "We got a lid: 0x%x\n", lid);
2211
2212 return 0;
2213}
2214
2215
2216/**
2217 * ipath_write_kreg_port - write a device's per-port 64-bit kernel register
2218 * @dd: the infinipath device
2219 * @regno: the register number to write
2220 * @port: the port containing the register
2221 * @value: the value to write
2222 *
2223 * Registers that vary with the chip implementation constants (port)
2224 * use this routine.
2225 */
2226void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
2227 unsigned port, u64 value)
2228{
2229 u16 where;
2230
2231 if (port < dd->ipath_portcnt &&
2232 (regno == dd->ipath_kregs->kr_rcvhdraddr ||
2233 regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
2234 where = regno + port;
2235 else
2236 where = -1;
2237
2238 ipath_write_kreg(dd, where, value);
2239}
2240
2241/*
2242 * Following deal with the "obviously simple" task of overriding the state
2243 * of the LEDS, which normally indicate link physical and logical status.
2244 * The complications arise in dealing with different hardware mappings
2245 * and the board-dependent routine being called from interrupts.
2246 * and then there's the requirement to _flash_ them.
2247 */
2248#define LED_OVER_FREQ_SHIFT 8
2249#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
2250/* Below is "non-zero" to force override, but both actual LEDs are off */
2251#define LED_OVER_BOTH_OFF (8)
2252
2253static void ipath_run_led_override(unsigned long opaque)
2254{
2255 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
2256 int timeoff;
2257 int pidx;
2258 u64 lstate, ltstate, val;
2259
2260 if (!(dd->ipath_flags & IPATH_INITTED))
2261 return;
2262
2263 pidx = dd->ipath_led_override_phase++ & 1;
2264 dd->ipath_led_override = dd->ipath_led_override_vals[pidx];
2265 timeoff = dd->ipath_led_override_timeoff;
2266
2267 /*
2268 * below potentially restores the LED values per current status,
2269 * should also possibly setup the traffic-blink register,
2270 * but leave that to per-chip functions.
2271 */
2272 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
2273 ltstate = ipath_ib_linktrstate(dd, val);
2274 lstate = ipath_ib_linkstate(dd, val);
2275
2276 dd->ipath_f_setextled(dd, lstate, ltstate);
2277 mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff);
2278}
2279
2280void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
2281{
2282 int timeoff, freq;
2283
2284 if (!(dd->ipath_flags & IPATH_INITTED))
2285 return;
2286
2287 /* First check if we are blinking. If not, use 1HZ polling */
2288 timeoff = HZ;
2289 freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
2290
2291 if (freq) {
2292 /* For blink, set each phase from one nybble of val */
2293 dd->ipath_led_override_vals[0] = val & 0xF;
2294 dd->ipath_led_override_vals[1] = (val >> 4) & 0xF;
2295 timeoff = (HZ << 4)/freq;
2296 } else {
2297 /* Non-blink set both phases the same. */
2298 dd->ipath_led_override_vals[0] = val & 0xF;
2299 dd->ipath_led_override_vals[1] = val & 0xF;
2300 }
2301 dd->ipath_led_override_timeoff = timeoff;
2302
2303 /*
2304 * If the timer has not already been started, do so. Use a "quick"
2305 * timeout so the function will be called soon, to look at our request.
2306 */
2307 if (atomic_inc_return(&dd->ipath_led_override_timer_active) == 1) {
2308 /* Need to start timer */
2309 setup_timer(&dd->ipath_led_override_timer,
2310 ipath_run_led_override, (unsigned long)dd);
2311
2312 dd->ipath_led_override_timer.expires = jiffies + 1;
2313 add_timer(&dd->ipath_led_override_timer);
2314 } else
2315 atomic_dec(&dd->ipath_led_override_timer_active);
2316}
2317
2318/**
2319 * ipath_shutdown_device - shut down a device
2320 * @dd: the infinipath device
2321 *
2322 * This is called to make the device quiet when we are about to
2323 * unload the driver, and also when the device is administratively
2324 * disabled. It does not free any data structures.
2325 * Everything it does has to be setup again by ipath_init_chip(dd,1)
2326 */
2327void ipath_shutdown_device(struct ipath_devdata *dd)
2328{
2329 unsigned long flags;
2330
2331 ipath_dbg("Shutting down the device\n");
2332
2333 ipath_hol_up(dd); /* make sure user processes aren't suspended */
2334
2335 dd->ipath_flags |= IPATH_LINKUNK;
2336 dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
2337 IPATH_LINKINIT | IPATH_LINKARMED |
2338 IPATH_LINKACTIVE);
2339 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_CONF |
2340 IPATH_STATUS_IB_READY);
2341
2342 /* mask interrupts, but not errors */
2343 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
2344
2345 dd->ipath_rcvctrl = 0;
2346 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
2347 dd->ipath_rcvctrl);
2348
2349 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
2350 teardown_sdma(dd);
2351
2352 /*
2353 * gracefully stop all sends allowing any in progress to trickle out
2354 * first.
2355 */
2356 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
2357 dd->ipath_sendctrl = 0;
2358 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
2359 /* flush it */
2360 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2361 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
2362
2363 /*
2364 * enough for anything that's going to trickle out to have actually
2365 * done so.
2366 */
2367 udelay(5);
2368
2369 dd->ipath_f_setextled(dd, 0, 0); /* make sure LEDs are off */
2370
2371 ipath_set_ib_lstate(dd, 0, INFINIPATH_IBCC_LINKINITCMD_DISABLE);
2372 ipath_cancel_sends(dd, 0);
2373
2374 /*
2375 * we are shutting down, so tell components that care. We don't do
2376 * this on just a link state change, much like ethernet, a cable
2377 * unplug, etc. doesn't change driver state
2378 */
2379 signal_ib_event(dd, IB_EVENT_PORT_ERR);
2380
2381 /* disable IBC */
2382 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
2383 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
2384 dd->ipath_control | INFINIPATH_C_FREEZEMODE);
2385
2386 /*
2387 * clear SerdesEnable and turn the leds off; do this here because
2388 * we are unloading, so don't count on interrupts to move along
2389 * Turn the LEDs off explicitly for the same reason.
2390 */
2391 dd->ipath_f_quiet_serdes(dd);
2392
2393 /* stop all the timers that might still be running */
2394 del_timer_sync(&dd->ipath_hol_timer);
2395 if (dd->ipath_stats_timer_active) {
2396 del_timer_sync(&dd->ipath_stats_timer);
2397 dd->ipath_stats_timer_active = 0;
2398 }
2399 if (dd->ipath_intrchk_timer.data) {
2400 del_timer_sync(&dd->ipath_intrchk_timer);
2401 dd->ipath_intrchk_timer.data = 0;
2402 }
2403 if (atomic_read(&dd->ipath_led_override_timer_active)) {
2404 del_timer_sync(&dd->ipath_led_override_timer);
2405 atomic_set(&dd->ipath_led_override_timer_active, 0);
2406 }
2407
2408 /*
2409 * clear all interrupts and errors, so that the next time the driver
2410 * is loaded or device is enabled, we know that whatever is set
2411 * happened while we were unloaded
2412 */
2413 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
2414 ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED);
2415 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
2416 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
2417
2418 ipath_cdbg(VERBOSE, "Flush time and errors to EEPROM\n");
2419 ipath_update_eeprom_log(dd);
2420}
2421
2422/**
2423 * ipath_free_pddata - free a port's allocated data
2424 * @dd: the infinipath device
2425 * @pd: the portdata structure
2426 *
2427 * free up any allocated data for a port
2428 * This should not touch anything that would affect a simultaneous
2429 * re-allocation of port data, because it is called after ipath_mutex
2430 * is released (and can be called from reinit as well).
2431 * It should never change any chip state, or global driver state.
2432 * (The only exception to global state is freeing the port0 port0_skbs.)
2433 */
2434void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
2435{
2436 if (!pd)
2437 return;
2438
2439 if (pd->port_rcvhdrq) {
2440 ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
2441 "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
2442 (unsigned long) pd->port_rcvhdrq_size);
2443 dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
2444 pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
2445 pd->port_rcvhdrq = NULL;
2446 if (pd->port_rcvhdrtail_kvaddr) {
2447 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
2448 pd->port_rcvhdrtail_kvaddr,
2449 pd->port_rcvhdrqtailaddr_phys);
2450 pd->port_rcvhdrtail_kvaddr = NULL;
2451 }
2452 }
2453 if (pd->port_port && pd->port_rcvegrbuf) {
2454 unsigned e;
2455
2456 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
2457 void *base = pd->port_rcvegrbuf[e];
2458 size_t size = pd->port_rcvegrbuf_size;
2459
2460 ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
2461 "chunk %u/%u\n", base,
2462 (unsigned long) size,
2463 e, pd->port_rcvegrbuf_chunks);
2464 dma_free_coherent(&dd->pcidev->dev, size,
2465 base, pd->port_rcvegrbuf_phys[e]);
2466 }
2467 kfree(pd->port_rcvegrbuf);
2468 pd->port_rcvegrbuf = NULL;
2469 kfree(pd->port_rcvegrbuf_phys);
2470 pd->port_rcvegrbuf_phys = NULL;
2471 pd->port_rcvegrbuf_chunks = 0;
2472 } else if (pd->port_port == 0 && dd->ipath_port0_skbinfo) {
2473 unsigned e;
2474 struct ipath_skbinfo *skbinfo = dd->ipath_port0_skbinfo;
2475
2476 dd->ipath_port0_skbinfo = NULL;
2477 ipath_cdbg(VERBOSE, "free closed port %d "
2478 "ipath_port0_skbinfo @ %p\n", pd->port_port,
2479 skbinfo);
2480 for (e = 0; e < dd->ipath_p0_rcvegrcnt; e++)
2481 if (skbinfo[e].skb) {
2482 pci_unmap_single(dd->pcidev, skbinfo[e].phys,
2483 dd->ipath_ibmaxlen,
2484 PCI_DMA_FROMDEVICE);
2485 dev_kfree_skb(skbinfo[e].skb);
2486 }
2487 vfree(skbinfo);
2488 }
2489 kfree(pd->port_tid_pg_list);
2490 vfree(pd->subport_uregbase);
2491 vfree(pd->subport_rcvegrbuf);
2492 vfree(pd->subport_rcvhdr_base);
2493 kfree(pd);
2494}
2495
2496static int __init infinipath_init(void)
2497{
2498 int ret;
2499
2500 if (ipath_debug & __IPATH_DBG)
2501 printk(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version);
2502
2503 /*
2504 * These must be called before the driver is registered with
2505 * the PCI subsystem.
2506 */
2507 idr_init(&unit_table);
2508
2509 ret = pci_register_driver(&ipath_driver);
2510 if (ret < 0) {
2511 printk(KERN_ERR IPATH_DRV_NAME
2512 ": Unable to register driver: error %d\n", -ret);
2513 goto bail_unit;
2514 }
2515
2516 ret = ipath_init_ipathfs();
2517 if (ret < 0) {
2518 printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
2519 "ipathfs: error %d\n", -ret);
2520 goto bail_pci;
2521 }
2522
2523 goto bail;
2524
2525bail_pci:
2526 pci_unregister_driver(&ipath_driver);
2527
2528bail_unit:
2529 idr_destroy(&unit_table);
2530
2531bail:
2532 return ret;
2533}
2534
2535static void __exit infinipath_cleanup(void)
2536{
2537 ipath_exit_ipathfs();
2538
2539 ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
2540 pci_unregister_driver(&ipath_driver);
2541
2542 idr_destroy(&unit_table);
2543}
2544
2545/**
2546 * ipath_reset_device - reset the chip if possible
2547 * @unit: the device to reset
2548 *
2549 * Whether or not reset is successful, we attempt to re-initialize the chip
2550 * (that is, much like a driver unload/reload). We clear the INITTED flag
2551 * so that the various entry points will fail until we reinitialize. For
2552 * now, we only allow this if no user ports are open that use chip resources
2553 */
2554int ipath_reset_device(int unit)
2555{
2556 int ret, i;
2557 struct ipath_devdata *dd = ipath_lookup(unit);
2558 unsigned long flags;
2559
2560 if (!dd) {
2561 ret = -ENODEV;
2562 goto bail;
2563 }
2564
2565 if (atomic_read(&dd->ipath_led_override_timer_active)) {
2566 /* Need to stop LED timer, _then_ shut off LEDs */
2567 del_timer_sync(&dd->ipath_led_override_timer);
2568 atomic_set(&dd->ipath_led_override_timer_active, 0);
2569 }
2570
2571 /* Shut off LEDs after we are sure timer is not running */
2572 dd->ipath_led_override = LED_OVER_BOTH_OFF;
2573 dd->ipath_f_setextled(dd, 0, 0);
2574
2575 dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
2576
2577 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
2578 dev_info(&dd->pcidev->dev, "Invalid unit number %u or "
2579 "not initialized or not present\n", unit);
2580 ret = -ENXIO;
2581 goto bail;
2582 }
2583
2584 spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
2585 if (dd->ipath_pd)
2586 for (i = 1; i < dd->ipath_cfgports; i++) {
2587 if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
2588 continue;
2589 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
2590 ipath_dbg("unit %u port %d is in use "
2591 "(PID %u cmd %s), can't reset\n",
2592 unit, i,
2593 pid_nr(dd->ipath_pd[i]->port_pid),
2594 dd->ipath_pd[i]->port_comm);
2595 ret = -EBUSY;
2596 goto bail;
2597 }
2598 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
2599
2600 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
2601 teardown_sdma(dd);
2602
2603 dd->ipath_flags &= ~IPATH_INITTED;
2604 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
2605 ret = dd->ipath_f_reset(dd);
2606 if (ret == 1) {
2607 ipath_dbg("Reinitializing unit %u after reset attempt\n",
2608 unit);
2609 ret = ipath_init_chip(dd, 1);
2610 } else
2611 ret = -EAGAIN;
2612 if (ret)
2613 ipath_dev_err(dd, "Reinitialize unit %u after "
2614 "reset failed with %d\n", unit, ret);
2615 else
2616 dev_info(&dd->pcidev->dev, "Reinitialized unit %u after "
2617 "resetting\n", unit);
2618
2619bail:
2620 return ret;
2621}
2622
2623/*
2624 * send a signal to all the processes that have the driver open
2625 * through the normal interfaces (i.e., everything other than diags
2626 * interface). Returns number of signalled processes.
2627 */
2628static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
2629{
2630 int i, sub, any = 0;
2631 struct pid *pid;
2632 unsigned long flags;
2633
2634 if (!dd->ipath_pd)
2635 return 0;
2636
2637 spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
2638 for (i = 1; i < dd->ipath_cfgports; i++) {
2639 if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
2640 continue;
2641 pid = dd->ipath_pd[i]->port_pid;
2642 if (!pid)
2643 continue;
2644
2645 dev_info(&dd->pcidev->dev, "context %d in use "
2646 "(PID %u), sending signal %d\n",
2647 i, pid_nr(pid), sig);
2648 kill_pid(pid, sig, 1);
2649 any++;
2650 for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) {
2651 pid = dd->ipath_pd[i]->port_subpid[sub];
2652 if (!pid)
2653 continue;
2654 dev_info(&dd->pcidev->dev, "sub-context "
2655 "%d:%d in use (PID %u), sending "
2656 "signal %d\n", i, sub, pid_nr(pid), sig);
2657 kill_pid(pid, sig, 1);
2658 any++;
2659 }
2660 }
2661 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
2662 return any;
2663}
2664
2665static void ipath_hol_signal_down(struct ipath_devdata *dd)
2666{
2667 if (ipath_signal_procs(dd, SIGSTOP))
2668 ipath_dbg("Stopped some processes\n");
2669 ipath_cancel_sends(dd, 1);
2670}
2671
2672
2673static void ipath_hol_signal_up(struct ipath_devdata *dd)
2674{
2675 if (ipath_signal_procs(dd, SIGCONT))
2676 ipath_dbg("Continued some processes\n");
2677}
2678
2679/*
2680 * link is down, stop any users processes, and flush pending sends
2681 * to prevent HoL blocking, then start the HoL timer that
2682 * periodically continues, then stop procs, so they can detect
2683 * link down if they want, and do something about it.
2684 * Timer may already be running, so use mod_timer, not add_timer.
2685 */
2686void ipath_hol_down(struct ipath_devdata *dd)
2687{
2688 dd->ipath_hol_state = IPATH_HOL_DOWN;
2689 ipath_hol_signal_down(dd);
2690 dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
2691 dd->ipath_hol_timer.expires = jiffies +
2692 msecs_to_jiffies(ipath_hol_timeout_ms);
2693 mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires);
2694}
2695
2696/*
2697 * link is up, continue any user processes, and ensure timer
2698 * is a nop, if running. Let timer keep running, if set; it
2699 * will nop when it sees the link is up
2700 */
2701void ipath_hol_up(struct ipath_devdata *dd)
2702{
2703 ipath_hol_signal_up(dd);
2704 dd->ipath_hol_state = IPATH_HOL_UP;
2705}
2706
2707/*
2708 * toggle the running/not running state of user proceses
2709 * to prevent HoL blocking on chip resources, but still allow
2710 * user processes to do link down special case handling.
2711 * Should only be called via the timer
2712 */
2713void ipath_hol_event(unsigned long opaque)
2714{
2715 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
2716
2717 if (dd->ipath_hol_next == IPATH_HOL_DOWNSTOP
2718 && dd->ipath_hol_state != IPATH_HOL_UP) {
2719 dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
2720 ipath_dbg("Stopping processes\n");
2721 ipath_hol_signal_down(dd);
2722 } else { /* may do "extra" if also in ipath_hol_up() */
2723 dd->ipath_hol_next = IPATH_HOL_DOWNSTOP;
2724 ipath_dbg("Continuing processes\n");
2725 ipath_hol_signal_up(dd);
2726 }
2727 if (dd->ipath_hol_state == IPATH_HOL_UP)
2728 ipath_dbg("link's up, don't resched timer\n");
2729 else {
2730 dd->ipath_hol_timer.expires = jiffies +
2731 msecs_to_jiffies(ipath_hol_timeout_ms);
2732 mod_timer(&dd->ipath_hol_timer,
2733 dd->ipath_hol_timer.expires);
2734 }
2735}
2736
2737int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
2738{
2739 u64 val;
2740
2741 if (new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK)
2742 return -1;
2743 if (dd->ipath_rx_pol_inv != new_pol_inv) {
2744 dd->ipath_rx_pol_inv = new_pol_inv;
2745 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
2746 val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
2747 INFINIPATH_XGXS_RX_POL_SHIFT);
2748 val |= ((u64)dd->ipath_rx_pol_inv) <<
2749 INFINIPATH_XGXS_RX_POL_SHIFT;
2750 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
2751 }
2752 return 0;
2753}
2754
2755/*
2756 * Disable and enable the armlaunch error. Used for PIO bandwidth testing on
2757 * the 7220, which is count-based, rather than trigger-based. Safe for the
2758 * driver check, since it's at init. Not completely safe when used for
2759 * user-mode checking, since some error checking can be lost, but not
2760 * particularly risky, and only has problematic side-effects in the face of
2761 * very buggy user code. There is no reference counting, but that's also
2762 * fine, given the intended use.
2763 */
2764void ipath_enable_armlaunch(struct ipath_devdata *dd)
2765{
2766 dd->ipath_lasterror &= ~INFINIPATH_E_SPIOARMLAUNCH;
2767 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
2768 INFINIPATH_E_SPIOARMLAUNCH);
2769 dd->ipath_errormask |= INFINIPATH_E_SPIOARMLAUNCH;
2770 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
2771 dd->ipath_errormask);
2772}
2773
2774void ipath_disable_armlaunch(struct ipath_devdata *dd)
2775{
2776 /* so don't re-enable if already set */
2777 dd->ipath_maskederrs &= ~INFINIPATH_E_SPIOARMLAUNCH;
2778 dd->ipath_errormask &= ~INFINIPATH_E_SPIOARMLAUNCH;
2779 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
2780 dd->ipath_errormask);
2781}
2782
2783module_init(infinipath_init);
2784module_exit(infinipath_cleanup);
diff --git a/drivers/staging/rdma/ipath/ipath_eeprom.c b/drivers/staging/rdma/ipath/ipath_eeprom.c
deleted file mode 100644
index ef84107c7ce0..000000000000
--- a/drivers/staging/rdma/ipath/ipath_eeprom.c
+++ /dev/null
@@ -1,1183 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/delay.h>
35#include <linux/pci.h>
36#include <linux/vmalloc.h>
37
38#include "ipath_kernel.h"
39
40/*
41 * InfiniPath I2C driver for a serial eeprom. This is not a generic
42 * I2C interface. For a start, the device we're using (Atmel AT24C11)
43 * doesn't work like a regular I2C device. It looks like one
44 * electrically, but not logically. Normal I2C devices have a single
45 * 7-bit or 10-bit I2C address that they respond to. Valid 7-bit
46 * addresses range from 0x03 to 0x77. Addresses 0x00 to 0x02 and 0x78
47 * to 0x7F are special reserved addresses (e.g. 0x00 is the "general
48 * call" address.) The Atmel device, on the other hand, responds to ALL
49 * 7-bit addresses. It's designed to be the only device on a given I2C
50 * bus. A 7-bit address corresponds to the memory address within the
51 * Atmel device itself.
52 *
53 * Also, the timing requirements mean more than simple software
54 * bitbanging, with readbacks from chip to ensure timing (simple udelay
55 * is not enough).
56 *
57 * This all means that accessing the device is specialized enough
58 * that using the standard kernel I2C bitbanging interface would be
59 * impossible. For example, the core I2C eeprom driver expects to find
60 * a device at one or more of a limited set of addresses only. It doesn't
61 * allow writing to an eeprom. It also doesn't provide any means of
62 * accessing eeprom contents from within the kernel, only via sysfs.
63 */
64
65/* Added functionality for IBA7220-based cards */
66#define IPATH_EEPROM_DEV_V1 0xA0
67#define IPATH_EEPROM_DEV_V2 0xA2
68#define IPATH_TEMP_DEV 0x98
69#define IPATH_BAD_DEV (IPATH_EEPROM_DEV_V2+2)
70#define IPATH_NO_DEV (0xFF)
71
72/*
73 * The number of I2C chains is proliferating. Table below brings
74 * some order to the madness. The basic principle is that the
75 * table is scanned from the top, and a "probe" is made to the
76 * device probe_dev. If that succeeds, the chain is considered
77 * to be of that type, and dd->i2c_chain_type is set to the index+1
78 * of the entry.
79 * The +1 is so static initialization can mean "unknown, do probe."
80 */
81static struct i2c_chain_desc {
82 u8 probe_dev; /* If seen at probe, chain is this type */
83 u8 eeprom_dev; /* Dev addr (if any) for EEPROM */
84 u8 temp_dev; /* Dev Addr (if any) for Temp-sense */
85} i2c_chains[] = {
86 { IPATH_BAD_DEV, IPATH_NO_DEV, IPATH_NO_DEV }, /* pre-iba7220 bds */
87 { IPATH_EEPROM_DEV_V1, IPATH_EEPROM_DEV_V1, IPATH_TEMP_DEV}, /* V1 */
88 { IPATH_EEPROM_DEV_V2, IPATH_EEPROM_DEV_V2, IPATH_TEMP_DEV}, /* V2 */
89 { IPATH_NO_DEV }
90};
91
92enum i2c_type {
93 i2c_line_scl = 0,
94 i2c_line_sda
95};
96
97enum i2c_state {
98 i2c_line_low = 0,
99 i2c_line_high
100};
101
102#define READ_CMD 1
103#define WRITE_CMD 0
104
105/**
106 * i2c_gpio_set - set a GPIO line
107 * @dd: the infinipath device
108 * @line: the line to set
109 * @new_line_state: the state to set
110 *
111 * Returns 0 if the line was set to the new state successfully, non-zero
112 * on error.
113 */
114static int i2c_gpio_set(struct ipath_devdata *dd,
115 enum i2c_type line,
116 enum i2c_state new_line_state)
117{
118 u64 out_mask, dir_mask, *gpioval;
119 unsigned long flags = 0;
120
121 gpioval = &dd->ipath_gpio_out;
122
123 if (line == i2c_line_scl) {
124 dir_mask = dd->ipath_gpio_scl;
125 out_mask = (1UL << dd->ipath_gpio_scl_num);
126 } else {
127 dir_mask = dd->ipath_gpio_sda;
128 out_mask = (1UL << dd->ipath_gpio_sda_num);
129 }
130
131 spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
132 if (new_line_state == i2c_line_high) {
133 /* tri-state the output rather than force high */
134 dd->ipath_extctrl &= ~dir_mask;
135 } else {
136 /* config line to be an output */
137 dd->ipath_extctrl |= dir_mask;
138 }
139 ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, dd->ipath_extctrl);
140
141 /* set output as well (no real verify) */
142 if (new_line_state == i2c_line_high)
143 *gpioval |= out_mask;
144 else
145 *gpioval &= ~out_mask;
146
147 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_out, *gpioval);
148 spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
149
150 return 0;
151}
152
153/**
154 * i2c_gpio_get - get a GPIO line state
155 * @dd: the infinipath device
156 * @line: the line to get
157 * @curr_statep: where to put the line state
158 *
159 * Returns 0 if the line was set to the new state successfully, non-zero
160 * on error. curr_state is not set on error.
161 */
162static int i2c_gpio_get(struct ipath_devdata *dd,
163 enum i2c_type line,
164 enum i2c_state *curr_statep)
165{
166 u64 read_val, mask;
167 int ret;
168 unsigned long flags = 0;
169
170 /* check args */
171 if (curr_statep == NULL) {
172 ret = 1;
173 goto bail;
174 }
175
176 /* config line to be an input */
177 if (line == i2c_line_scl)
178 mask = dd->ipath_gpio_scl;
179 else
180 mask = dd->ipath_gpio_sda;
181
182 spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
183 dd->ipath_extctrl &= ~mask;
184 ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, dd->ipath_extctrl);
185 /*
186 * Below is very unlikely to reflect true input state if Output
187 * Enable actually changed.
188 */
189 read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
190 spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
191
192 if (read_val & mask)
193 *curr_statep = i2c_line_high;
194 else
195 *curr_statep = i2c_line_low;
196
197 ret = 0;
198
199bail:
200 return ret;
201}
202
203/**
204 * i2c_wait_for_writes - wait for a write
205 * @dd: the infinipath device
206 *
207 * We use this instead of udelay directly, so we can make sure
208 * that previous register writes have been flushed all the way
209 * to the chip. Since we are delaying anyway, the cost doesn't
210 * hurt, and makes the bit twiddling more regular
211 */
212static void i2c_wait_for_writes(struct ipath_devdata *dd)
213{
214 (void)ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
215 rmb();
216}
217
218static void scl_out(struct ipath_devdata *dd, u8 bit)
219{
220 udelay(1);
221 i2c_gpio_set(dd, i2c_line_scl, bit ? i2c_line_high : i2c_line_low);
222
223 i2c_wait_for_writes(dd);
224}
225
226static void sda_out(struct ipath_devdata *dd, u8 bit)
227{
228 i2c_gpio_set(dd, i2c_line_sda, bit ? i2c_line_high : i2c_line_low);
229
230 i2c_wait_for_writes(dd);
231}
232
233static u8 sda_in(struct ipath_devdata *dd, int wait)
234{
235 enum i2c_state bit;
236
237 if (i2c_gpio_get(dd, i2c_line_sda, &bit))
238 ipath_dbg("get bit failed!\n");
239
240 if (wait)
241 i2c_wait_for_writes(dd);
242
243 return bit == i2c_line_high ? 1U : 0;
244}
245
246/**
247 * i2c_ackrcv - see if ack following write is true
248 * @dd: the infinipath device
249 */
250static int i2c_ackrcv(struct ipath_devdata *dd)
251{
252 u8 ack_received;
253
254 /* AT ENTRY SCL = LOW */
255 /* change direction, ignore data */
256 ack_received = sda_in(dd, 1);
257 scl_out(dd, i2c_line_high);
258 ack_received = sda_in(dd, 1) == 0;
259 scl_out(dd, i2c_line_low);
260 return ack_received;
261}
262
263/**
264 * rd_byte - read a byte, leaving ACK, STOP, etc up to caller
265 * @dd: the infinipath device
266 *
267 * Returns byte shifted out of device
268 */
269static int rd_byte(struct ipath_devdata *dd)
270{
271 int bit_cntr, data;
272
273 data = 0;
274
275 for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
276 data <<= 1;
277 scl_out(dd, i2c_line_high);
278 data |= sda_in(dd, 0);
279 scl_out(dd, i2c_line_low);
280 }
281 return data;
282}
283
284/**
285 * wr_byte - write a byte, one bit at a time
286 * @dd: the infinipath device
287 * @data: the byte to write
288 *
289 * Returns 0 if we got the following ack, otherwise 1
290 */
291static int wr_byte(struct ipath_devdata *dd, u8 data)
292{
293 int bit_cntr;
294 u8 bit;
295
296 for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
297 bit = (data >> bit_cntr) & 1;
298 sda_out(dd, bit);
299 scl_out(dd, i2c_line_high);
300 scl_out(dd, i2c_line_low);
301 }
302 return (!i2c_ackrcv(dd)) ? 1 : 0;
303}
304
305static void send_ack(struct ipath_devdata *dd)
306{
307 sda_out(dd, i2c_line_low);
308 scl_out(dd, i2c_line_high);
309 scl_out(dd, i2c_line_low);
310 sda_out(dd, i2c_line_high);
311}
312
313/**
314 * i2c_startcmd - transmit the start condition, followed by address/cmd
315 * @dd: the infinipath device
316 * @offset_dir: direction byte
317 *
318 * (both clock/data high, clock high, data low while clock is high)
319 */
320static int i2c_startcmd(struct ipath_devdata *dd, u8 offset_dir)
321{
322 int res;
323
324 /* issue start sequence */
325 sda_out(dd, i2c_line_high);
326 scl_out(dd, i2c_line_high);
327 sda_out(dd, i2c_line_low);
328 scl_out(dd, i2c_line_low);
329
330 /* issue length and direction byte */
331 res = wr_byte(dd, offset_dir);
332
333 if (res)
334 ipath_cdbg(VERBOSE, "No ack to complete start\n");
335
336 return res;
337}
338
339/**
340 * stop_cmd - transmit the stop condition
341 * @dd: the infinipath device
342 *
343 * (both clock/data low, clock high, data high while clock is high)
344 */
345static void stop_cmd(struct ipath_devdata *dd)
346{
347 scl_out(dd, i2c_line_low);
348 sda_out(dd, i2c_line_low);
349 scl_out(dd, i2c_line_high);
350 sda_out(dd, i2c_line_high);
351 udelay(2);
352}
353
354/**
355 * eeprom_reset - reset I2C communication
356 * @dd: the infinipath device
357 */
358
359static int eeprom_reset(struct ipath_devdata *dd)
360{
361 int clock_cycles_left = 9;
362 u64 *gpioval = &dd->ipath_gpio_out;
363 int ret;
364 unsigned long flags;
365
366 spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
367 /* Make sure shadows are consistent */
368 dd->ipath_extctrl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl);
369 *gpioval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_out);
370 spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
371
372 ipath_cdbg(VERBOSE, "Resetting i2c eeprom; initial gpioout reg "
373 "is %llx\n", (unsigned long long) *gpioval);
374
375 /*
376 * This is to get the i2c into a known state, by first going low,
377 * then tristate sda (and then tristate scl as first thing
378 * in loop)
379 */
380 scl_out(dd, i2c_line_low);
381 sda_out(dd, i2c_line_high);
382
383 /* Clock up to 9 cycles looking for SDA hi, then issue START and STOP */
384 while (clock_cycles_left--) {
385 scl_out(dd, i2c_line_high);
386
387 /* SDA seen high, issue START by dropping it while SCL high */
388 if (sda_in(dd, 0)) {
389 sda_out(dd, i2c_line_low);
390 scl_out(dd, i2c_line_low);
391 /* ATMEL spec says must be followed by STOP. */
392 scl_out(dd, i2c_line_high);
393 sda_out(dd, i2c_line_high);
394 ret = 0;
395 goto bail;
396 }
397
398 scl_out(dd, i2c_line_low);
399 }
400
401 ret = 1;
402
403bail:
404 return ret;
405}
406
407/*
408 * Probe for I2C device at specified address. Returns 0 for "success"
409 * to match rest of this file.
410 * Leave bus in "reasonable" state for further commands.
411 */
412static int i2c_probe(struct ipath_devdata *dd, int devaddr)
413{
414 int ret;
415
416 ret = eeprom_reset(dd);
417 if (ret) {
418 ipath_dev_err(dd, "Failed reset probing device 0x%02X\n",
419 devaddr);
420 return ret;
421 }
422 /*
423 * Reset no longer leaves bus in start condition, so normal
424 * i2c_startcmd() will do.
425 */
426 ret = i2c_startcmd(dd, devaddr | READ_CMD);
427 if (ret)
428 ipath_cdbg(VERBOSE, "Failed startcmd for device 0x%02X\n",
429 devaddr);
430 else {
431 /*
432 * Device did respond. Complete a single-byte read, because some
433 * devices apparently cannot handle STOP immediately after they
434 * ACK the start-cmd.
435 */
436 int data;
437 data = rd_byte(dd);
438 stop_cmd(dd);
439 ipath_cdbg(VERBOSE, "Response from device 0x%02X\n", devaddr);
440 }
441 return ret;
442}
443
444/*
445 * Returns the "i2c type". This is a pointer to a struct that describes
446 * the I2C chain on this board. To minimize impact on struct ipath_devdata,
447 * the (small integer) index into the table is actually memoized, rather
448 * then the pointer.
449 * Memoization is because the type is determined on the first call per chip.
450 * An alternative would be to move type determination to early
451 * init code.
452 */
453static struct i2c_chain_desc *ipath_i2c_type(struct ipath_devdata *dd)
454{
455 int idx;
456
457 /* Get memoized index, from previous successful probes */
458 idx = dd->ipath_i2c_chain_type - 1;
459 if (idx >= 0 && idx < (ARRAY_SIZE(i2c_chains) - 1))
460 goto done;
461
462 idx = 0;
463 while (i2c_chains[idx].probe_dev != IPATH_NO_DEV) {
464 /* if probe succeeds, this is type */
465 if (!i2c_probe(dd, i2c_chains[idx].probe_dev))
466 break;
467 ++idx;
468 }
469
470 /*
471 * Old EEPROM (first entry) may require a reset after probe,
472 * rather than being able to "start" after "stop"
473 */
474 if (idx == 0)
475 eeprom_reset(dd);
476
477 if (i2c_chains[idx].probe_dev == IPATH_NO_DEV)
478 idx = -1;
479 else
480 dd->ipath_i2c_chain_type = idx + 1;
481done:
482 return (idx >= 0) ? i2c_chains + idx : NULL;
483}
484
485static int ipath_eeprom_internal_read(struct ipath_devdata *dd,
486 u8 eeprom_offset, void *buffer, int len)
487{
488 int ret;
489 struct i2c_chain_desc *icd;
490 u8 *bp = buffer;
491
492 ret = 1;
493 icd = ipath_i2c_type(dd);
494 if (!icd)
495 goto bail;
496
497 if (icd->eeprom_dev == IPATH_NO_DEV) {
498 /* legacy not-really-I2C */
499 ipath_cdbg(VERBOSE, "Start command only address\n");
500 eeprom_offset = (eeprom_offset << 1) | READ_CMD;
501 ret = i2c_startcmd(dd, eeprom_offset);
502 } else {
503 /* Actual I2C */
504 ipath_cdbg(VERBOSE, "Start command uses devaddr\n");
505 if (i2c_startcmd(dd, icd->eeprom_dev | WRITE_CMD)) {
506 ipath_dbg("Failed EEPROM startcmd\n");
507 stop_cmd(dd);
508 ret = 1;
509 goto bail;
510 }
511 ret = wr_byte(dd, eeprom_offset);
512 stop_cmd(dd);
513 if (ret) {
514 ipath_dev_err(dd, "Failed to write EEPROM address\n");
515 ret = 1;
516 goto bail;
517 }
518 ret = i2c_startcmd(dd, icd->eeprom_dev | READ_CMD);
519 }
520 if (ret) {
521 ipath_dbg("Failed startcmd for dev %02X\n", icd->eeprom_dev);
522 stop_cmd(dd);
523 ret = 1;
524 goto bail;
525 }
526
527 /*
528 * eeprom keeps clocking data out as long as we ack, automatically
529 * incrementing the address.
530 */
531 while (len-- > 0) {
532 /* get and store data */
533 *bp++ = rd_byte(dd);
534 /* send ack if not the last byte */
535 if (len)
536 send_ack(dd);
537 }
538
539 stop_cmd(dd);
540
541 ret = 0;
542
543bail:
544 return ret;
545}
546
547static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset,
548 const void *buffer, int len)
549{
550 int sub_len;
551 const u8 *bp = buffer;
552 int max_wait_time, i;
553 int ret;
554 struct i2c_chain_desc *icd;
555
556 ret = 1;
557 icd = ipath_i2c_type(dd);
558 if (!icd)
559 goto bail;
560
561 while (len > 0) {
562 if (icd->eeprom_dev == IPATH_NO_DEV) {
563 if (i2c_startcmd(dd,
564 (eeprom_offset << 1) | WRITE_CMD)) {
565 ipath_dbg("Failed to start cmd offset %u\n",
566 eeprom_offset);
567 goto failed_write;
568 }
569 } else {
570 /* Real I2C */
571 if (i2c_startcmd(dd, icd->eeprom_dev | WRITE_CMD)) {
572 ipath_dbg("Failed EEPROM startcmd\n");
573 goto failed_write;
574 }
575 ret = wr_byte(dd, eeprom_offset);
576 if (ret) {
577 ipath_dev_err(dd, "Failed to write EEPROM "
578 "address\n");
579 goto failed_write;
580 }
581 }
582
583 sub_len = min(len, 4);
584 eeprom_offset += sub_len;
585 len -= sub_len;
586
587 for (i = 0; i < sub_len; i++) {
588 if (wr_byte(dd, *bp++)) {
589 ipath_dbg("no ack after byte %u/%u (%u "
590 "total remain)\n", i, sub_len,
591 len + sub_len - i);
592 goto failed_write;
593 }
594 }
595
596 stop_cmd(dd);
597
598 /*
599 * wait for write complete by waiting for a successful
600 * read (the chip replies with a zero after the write
601 * cmd completes, and before it writes to the eeprom.
602 * The startcmd for the read will fail the ack until
603 * the writes have completed. We do this inline to avoid
604 * the debug prints that are in the real read routine
605 * if the startcmd fails.
606 * We also use the proper device address, so it doesn't matter
607 * whether we have real eeprom_dev. legacy likes any address.
608 */
609 max_wait_time = 100;
610 while (i2c_startcmd(dd, icd->eeprom_dev | READ_CMD)) {
611 stop_cmd(dd);
612 if (!--max_wait_time) {
613 ipath_dbg("Did not get successful read to "
614 "complete write\n");
615 goto failed_write;
616 }
617 }
618 /* now read (and ignore) the resulting byte */
619 rd_byte(dd);
620 stop_cmd(dd);
621 }
622
623 ret = 0;
624 goto bail;
625
626failed_write:
627 stop_cmd(dd);
628 ret = 1;
629
630bail:
631 return ret;
632}
633
634/**
635 * ipath_eeprom_read - receives bytes from the eeprom via I2C
636 * @dd: the infinipath device
637 * @eeprom_offset: address to read from
638 * @buffer: where to store result
639 * @len: number of bytes to receive
640 */
641int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
642 void *buff, int len)
643{
644 int ret;
645
646 ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
647 if (!ret) {
648 ret = ipath_eeprom_internal_read(dd, eeprom_offset, buff, len);
649 mutex_unlock(&dd->ipath_eep_lock);
650 }
651
652 return ret;
653}
654
655/**
656 * ipath_eeprom_write - writes data to the eeprom via I2C
657 * @dd: the infinipath device
658 * @eeprom_offset: where to place data
659 * @buffer: data to write
660 * @len: number of bytes to write
661 */
662int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset,
663 const void *buff, int len)
664{
665 int ret;
666
667 ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
668 if (!ret) {
669 ret = ipath_eeprom_internal_write(dd, eeprom_offset, buff, len);
670 mutex_unlock(&dd->ipath_eep_lock);
671 }
672
673 return ret;
674}
675
676static u8 flash_csum(struct ipath_flash *ifp, int adjust)
677{
678 u8 *ip = (u8 *) ifp;
679 u8 csum = 0, len;
680
681 /*
682 * Limit length checksummed to max length of actual data.
683 * Checksum of erased eeprom will still be bad, but we avoid
684 * reading past the end of the buffer we were passed.
685 */
686 len = ifp->if_length;
687 if (len > sizeof(struct ipath_flash))
688 len = sizeof(struct ipath_flash);
689 while (len--)
690 csum += *ip++;
691 csum -= ifp->if_csum;
692 csum = ~csum;
693 if (adjust)
694 ifp->if_csum = csum;
695
696 return csum;
697}
698
699/**
700 * ipath_get_guid - get the GUID from the i2c device
701 * @dd: the infinipath device
702 *
703 * We have the capability to use the ipath_nguid field, and get
704 * the guid from the first chip's flash, to use for all of them.
705 */
706void ipath_get_eeprom_info(struct ipath_devdata *dd)
707{
708 void *buf;
709 struct ipath_flash *ifp;
710 __be64 guid;
711 int len, eep_stat;
712 u8 csum, *bguid;
713 int t = dd->ipath_unit;
714 struct ipath_devdata *dd0 = ipath_lookup(0);
715
716 if (t && dd0->ipath_nguid > 1 && t <= dd0->ipath_nguid) {
717 u8 oguid;
718 dd->ipath_guid = dd0->ipath_guid;
719 bguid = (u8 *) & dd->ipath_guid;
720
721 oguid = bguid[7];
722 bguid[7] += t;
723 if (oguid > bguid[7]) {
724 if (bguid[6] == 0xff) {
725 if (bguid[5] == 0xff) {
726 ipath_dev_err(
727 dd,
728 "Can't set %s GUID from "
729 "base, wraps to OUI!\n",
730 ipath_get_unit_name(t));
731 dd->ipath_guid = 0;
732 goto bail;
733 }
734 bguid[5]++;
735 }
736 bguid[6]++;
737 }
738 dd->ipath_nguid = 1;
739
740 ipath_dbg("nguid %u, so adding %u to device 0 guid, "
741 "for %llx\n",
742 dd0->ipath_nguid, t,
743 (unsigned long long) be64_to_cpu(dd->ipath_guid));
744 goto bail;
745 }
746
747 /*
748 * read full flash, not just currently used part, since it may have
749 * been written with a newer definition
750 * */
751 len = sizeof(struct ipath_flash);
752 buf = vmalloc(len);
753 if (!buf) {
754 ipath_dev_err(dd, "Couldn't allocate memory to read %u "
755 "bytes from eeprom for GUID\n", len);
756 goto bail;
757 }
758
759 mutex_lock(&dd->ipath_eep_lock);
760 eep_stat = ipath_eeprom_internal_read(dd, 0, buf, len);
761 mutex_unlock(&dd->ipath_eep_lock);
762
763 if (eep_stat) {
764 ipath_dev_err(dd, "Failed reading GUID from eeprom\n");
765 goto done;
766 }
767 ifp = (struct ipath_flash *)buf;
768
769 csum = flash_csum(ifp, 0);
770 if (csum != ifp->if_csum) {
771 dev_info(&dd->pcidev->dev, "Bad I2C flash checksum: "
772 "0x%x, not 0x%x\n", csum, ifp->if_csum);
773 goto done;
774 }
775 if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
776 *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
777 ipath_dev_err(dd, "Invalid GUID %llx from flash; "
778 "ignoring\n",
779 *(unsigned long long *) ifp->if_guid);
780 /* don't allow GUID if all 0 or all 1's */
781 goto done;
782 }
783
784 /* complain, but allow it */
785 if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
786 dev_info(&dd->pcidev->dev, "Warning, GUID %llx is "
787 "default, probably not correct!\n",
788 *(unsigned long long *) ifp->if_guid);
789
790 bguid = ifp->if_guid;
791 if (!bguid[0] && !bguid[1] && !bguid[2]) {
792 /* original incorrect GUID format in flash; fix in
793 * core copy, by shifting up 2 octets; don't need to
794 * change top octet, since both it and shifted are
795 * 0.. */
796 bguid[1] = bguid[3];
797 bguid[2] = bguid[4];
798 bguid[3] = bguid[4] = 0;
799 guid = *(__be64 *) ifp->if_guid;
800 ipath_cdbg(VERBOSE, "Old GUID format in flash, top 3 zero, "
801 "shifting 2 octets\n");
802 } else
803 guid = *(__be64 *) ifp->if_guid;
804 dd->ipath_guid = guid;
805 dd->ipath_nguid = ifp->if_numguid;
806 /*
807 * Things are slightly complicated by the desire to transparently
808 * support both the Pathscale 10-digit serial number and the QLogic
809 * 13-character version.
810 */
811 if ((ifp->if_fversion > 1) && ifp->if_sprefix[0]
812 && ((u8 *)ifp->if_sprefix)[0] != 0xFF) {
813 /* This board has a Serial-prefix, which is stored
814 * elsewhere for backward-compatibility.
815 */
816 char *snp = dd->ipath_serial;
817 memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
818 snp[sizeof ifp->if_sprefix] = '\0';
819 len = strlen(snp);
820 snp += len;
821 len = (sizeof dd->ipath_serial) - len;
822 if (len > sizeof ifp->if_serial) {
823 len = sizeof ifp->if_serial;
824 }
825 memcpy(snp, ifp->if_serial, len);
826 } else
827 memcpy(dd->ipath_serial, ifp->if_serial,
828 sizeof ifp->if_serial);
829 if (!strstr(ifp->if_comment, "Tested successfully"))
830 ipath_dev_err(dd, "Board SN %s did not pass functional "
831 "test: %s\n", dd->ipath_serial,
832 ifp->if_comment);
833
834 ipath_cdbg(VERBOSE, "Initted GUID to %llx from eeprom\n",
835 (unsigned long long) be64_to_cpu(dd->ipath_guid));
836
837 memcpy(&dd->ipath_eep_st_errs, &ifp->if_errcntp, IPATH_EEP_LOG_CNT);
838 /*
839 * Power-on (actually "active") hours are kept as little-endian value
840 * in EEPROM, but as seconds in a (possibly as small as 24-bit)
841 * atomic_t while running.
842 */
843 atomic_set(&dd->ipath_active_time, 0);
844 dd->ipath_eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
845
846done:
847 vfree(buf);
848
849bail:;
850}
851
852/**
853 * ipath_update_eeprom_log - copy active-time and error counters to eeprom
854 * @dd: the infinipath device
855 *
856 * Although the time is kept as seconds in the ipath_devdata struct, it is
857 * rounded to hours for re-write, as we have only 16 bits in EEPROM.
858 * First-cut code reads whole (expected) struct ipath_flash, modifies,
859 * re-writes. Future direction: read/write only what we need, assuming
860 * that the EEPROM had to have been "good enough" for driver init, and
861 * if not, we aren't making it worse.
862 *
863 */
864
865int ipath_update_eeprom_log(struct ipath_devdata *dd)
866{
867 void *buf;
868 struct ipath_flash *ifp;
869 int len, hi_water;
870 uint32_t new_time, new_hrs;
871 u8 csum;
872 int ret, idx;
873 unsigned long flags;
874
875 /* first, check if we actually need to do anything. */
876 ret = 0;
877 for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
878 if (dd->ipath_eep_st_new_errs[idx]) {
879 ret = 1;
880 break;
881 }
882 }
883 new_time = atomic_read(&dd->ipath_active_time);
884
885 if (ret == 0 && new_time < 3600)
886 return 0;
887
888 /*
889 * The quick-check above determined that there is something worthy
890 * of logging, so get current contents and do a more detailed idea.
891 * read full flash, not just currently used part, since it may have
892 * been written with a newer definition
893 */
894 len = sizeof(struct ipath_flash);
895 buf = vmalloc(len);
896 ret = 1;
897 if (!buf) {
898 ipath_dev_err(dd, "Couldn't allocate memory to read %u "
899 "bytes from eeprom for logging\n", len);
900 goto bail;
901 }
902
903 /* Grab semaphore and read current EEPROM. If we get an
904 * error, let go, but if not, keep it until we finish write.
905 */
906 ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
907 if (ret) {
908 ipath_dev_err(dd, "Unable to acquire EEPROM for logging\n");
909 goto free_bail;
910 }
911 ret = ipath_eeprom_internal_read(dd, 0, buf, len);
912 if (ret) {
913 mutex_unlock(&dd->ipath_eep_lock);
914 ipath_dev_err(dd, "Unable read EEPROM for logging\n");
915 goto free_bail;
916 }
917 ifp = (struct ipath_flash *)buf;
918
919 csum = flash_csum(ifp, 0);
920 if (csum != ifp->if_csum) {
921 mutex_unlock(&dd->ipath_eep_lock);
922 ipath_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
923 csum, ifp->if_csum);
924 ret = 1;
925 goto free_bail;
926 }
927 hi_water = 0;
928 spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
929 for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
930 int new_val = dd->ipath_eep_st_new_errs[idx];
931 if (new_val) {
932 /*
933 * If we have seen any errors, add to EEPROM values
934 * We need to saturate at 0xFF (255) and we also
935 * would need to adjust the checksum if we were
936 * trying to minimize EEPROM traffic
937 * Note that we add to actual current count in EEPROM,
938 * in case it was altered while we were running.
939 */
940 new_val += ifp->if_errcntp[idx];
941 if (new_val > 0xFF)
942 new_val = 0xFF;
943 if (ifp->if_errcntp[idx] != new_val) {
944 ifp->if_errcntp[idx] = new_val;
945 hi_water = offsetof(struct ipath_flash,
946 if_errcntp) + idx;
947 }
948 /*
949 * update our shadow (used to minimize EEPROM
950 * traffic), to match what we are about to write.
951 */
952 dd->ipath_eep_st_errs[idx] = new_val;
953 dd->ipath_eep_st_new_errs[idx] = 0;
954 }
955 }
956 /*
957 * now update active-time. We would like to round to the nearest hour
958 * but unless atomic_t are sure to be proper signed ints we cannot,
959 * because we need to account for what we "transfer" to EEPROM and
960 * if we log an hour at 31 minutes, then we would need to set
961 * active_time to -29 to accurately count the _next_ hour.
962 */
963 if (new_time >= 3600) {
964 new_hrs = new_time / 3600;
965 atomic_sub((new_hrs * 3600), &dd->ipath_active_time);
966 new_hrs += dd->ipath_eep_hrs;
967 if (new_hrs > 0xFFFF)
968 new_hrs = 0xFFFF;
969 dd->ipath_eep_hrs = new_hrs;
970 if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
971 ifp->if_powerhour[0] = new_hrs & 0xFF;
972 hi_water = offsetof(struct ipath_flash, if_powerhour);
973 }
974 if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
975 ifp->if_powerhour[1] = new_hrs >> 8;
976 hi_water = offsetof(struct ipath_flash, if_powerhour)
977 + 1;
978 }
979 }
980 /*
981 * There is a tiny possibility that we could somehow fail to write
982 * the EEPROM after updating our shadows, but problems from holding
983 * the spinlock too long are a much bigger issue.
984 */
985 spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
986 if (hi_water) {
987 /* we made some change to the data, uopdate cksum and write */
988 csum = flash_csum(ifp, 1);
989 ret = ipath_eeprom_internal_write(dd, 0, buf, hi_water + 1);
990 }
991 mutex_unlock(&dd->ipath_eep_lock);
992 if (ret)
993 ipath_dev_err(dd, "Failed updating EEPROM\n");
994
995free_bail:
996 vfree(buf);
997bail:
998 return ret;
999
1000}
1001
1002/**
1003 * ipath_inc_eeprom_err - increment one of the four error counters
1004 * that are logged to EEPROM.
1005 * @dd: the infinipath device
1006 * @eidx: 0..3, the counter to increment
1007 * @incr: how much to add
1008 *
1009 * Each counter is 8-bits, and saturates at 255 (0xFF). They
1010 * are copied to the EEPROM (aka flash) whenever ipath_update_eeprom_log()
1011 * is called, but it can only be called in a context that allows sleep.
1012 * This function can be called even at interrupt level.
1013 */
1014
1015void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr)
1016{
1017 uint new_val;
1018 unsigned long flags;
1019
1020 spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
1021 new_val = dd->ipath_eep_st_new_errs[eidx] + incr;
1022 if (new_val > 255)
1023 new_val = 255;
1024 dd->ipath_eep_st_new_errs[eidx] = new_val;
1025 spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
1026 return;
1027}
1028
1029static int ipath_tempsense_internal_read(struct ipath_devdata *dd, u8 regnum)
1030{
1031 int ret;
1032 struct i2c_chain_desc *icd;
1033
1034 ret = -ENOENT;
1035
1036 icd = ipath_i2c_type(dd);
1037 if (!icd)
1038 goto bail;
1039
1040 if (icd->temp_dev == IPATH_NO_DEV) {
1041 /* tempsense only exists on new, real-I2C boards */
1042 ret = -ENXIO;
1043 goto bail;
1044 }
1045
1046 if (i2c_startcmd(dd, icd->temp_dev | WRITE_CMD)) {
1047 ipath_dbg("Failed tempsense startcmd\n");
1048 stop_cmd(dd);
1049 ret = -ENXIO;
1050 goto bail;
1051 }
1052 ret = wr_byte(dd, regnum);
1053 stop_cmd(dd);
1054 if (ret) {
1055 ipath_dev_err(dd, "Failed tempsense WR command %02X\n",
1056 regnum);
1057 ret = -ENXIO;
1058 goto bail;
1059 }
1060 if (i2c_startcmd(dd, icd->temp_dev | READ_CMD)) {
1061 ipath_dbg("Failed tempsense RD startcmd\n");
1062 stop_cmd(dd);
1063 ret = -ENXIO;
1064 goto bail;
1065 }
1066 /*
1067 * We can only clock out one byte per command, sensibly
1068 */
1069 ret = rd_byte(dd);
1070 stop_cmd(dd);
1071
1072bail:
1073 return ret;
1074}
1075
1076#define VALID_TS_RD_REG_MASK 0xBF
1077
1078/**
1079 * ipath_tempsense_read - read register of temp sensor via I2C
1080 * @dd: the infinipath device
1081 * @regnum: register to read from
1082 *
1083 * returns reg contents (0..255) or < 0 for error
1084 */
1085int ipath_tempsense_read(struct ipath_devdata *dd, u8 regnum)
1086{
1087 int ret;
1088
1089 if (regnum > 7)
1090 return -EINVAL;
1091
1092 /* return a bogus value for (the one) register we do not have */
1093 if (!((1 << regnum) & VALID_TS_RD_REG_MASK))
1094 return 0;
1095
1096 ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
1097 if (!ret) {
1098 ret = ipath_tempsense_internal_read(dd, regnum);
1099 mutex_unlock(&dd->ipath_eep_lock);
1100 }
1101
1102 /*
1103 * There are three possibilities here:
1104 * ret is actual value (0..255)
1105 * ret is -ENXIO or -EINVAL from code in this file
1106 * ret is -EINTR from mutex_lock_interruptible.
1107 */
1108 return ret;
1109}
1110
1111static int ipath_tempsense_internal_write(struct ipath_devdata *dd,
1112 u8 regnum, u8 data)
1113{
1114 int ret = -ENOENT;
1115 struct i2c_chain_desc *icd;
1116
1117 icd = ipath_i2c_type(dd);
1118 if (!icd)
1119 goto bail;
1120
1121 if (icd->temp_dev == IPATH_NO_DEV) {
1122 /* tempsense only exists on new, real-I2C boards */
1123 ret = -ENXIO;
1124 goto bail;
1125 }
1126 if (i2c_startcmd(dd, icd->temp_dev | WRITE_CMD)) {
1127 ipath_dbg("Failed tempsense startcmd\n");
1128 stop_cmd(dd);
1129 ret = -ENXIO;
1130 goto bail;
1131 }
1132 ret = wr_byte(dd, regnum);
1133 if (ret) {
1134 stop_cmd(dd);
1135 ipath_dev_err(dd, "Failed to write tempsense command %02X\n",
1136 regnum);
1137 ret = -ENXIO;
1138 goto bail;
1139 }
1140 ret = wr_byte(dd, data);
1141 stop_cmd(dd);
1142 ret = i2c_startcmd(dd, icd->temp_dev | READ_CMD);
1143 if (ret) {
1144 ipath_dev_err(dd, "Failed tempsense data wrt to %02X\n",
1145 regnum);
1146 ret = -ENXIO;
1147 }
1148
1149bail:
1150 return ret;
1151}
1152
1153#define VALID_TS_WR_REG_MASK ((1 << 9) | (1 << 0xB) | (1 << 0xD))
1154
1155/**
1156 * ipath_tempsense_write - write register of temp sensor via I2C
1157 * @dd: the infinipath device
1158 * @regnum: register to write
1159 * @data: data to write
1160 *
1161 * returns 0 for success or < 0 for error
1162 */
1163int ipath_tempsense_write(struct ipath_devdata *dd, u8 regnum, u8 data)
1164{
1165 int ret;
1166
1167 if (regnum > 15 || !((1 << regnum) & VALID_TS_WR_REG_MASK))
1168 return -EINVAL;
1169
1170 ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
1171 if (!ret) {
1172 ret = ipath_tempsense_internal_write(dd, regnum, data);
1173 mutex_unlock(&dd->ipath_eep_lock);
1174 }
1175
1176 /*
1177 * There are three possibilities here:
1178 * ret is 0 for success
1179 * ret is -ENXIO or -EINVAL from code in this file
1180 * ret is -EINTR from mutex_lock_interruptible.
1181 */
1182 return ret;
1183}
diff --git a/drivers/staging/rdma/ipath/ipath_file_ops.c b/drivers/staging/rdma/ipath/ipath_file_ops.c
deleted file mode 100644
index 6187b848b3ca..000000000000
--- a/drivers/staging/rdma/ipath/ipath_file_ops.c
+++ /dev/null
@@ -1,2619 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/pci.h>
35#include <linux/poll.h>
36#include <linux/cdev.h>
37#include <linux/swap.h>
38#include <linux/export.h>
39#include <linux/vmalloc.h>
40#include <linux/slab.h>
41#include <linux/highmem.h>
42#include <linux/io.h>
43#include <linux/jiffies.h>
44#include <linux/cpu.h>
45#include <linux/uio.h>
46#include <asm/pgtable.h>
47
48#include "ipath_kernel.h"
49#include "ipath_common.h"
50#include "ipath_user_sdma.h"
51
52static int ipath_open(struct inode *, struct file *);
53static int ipath_close(struct inode *, struct file *);
54static ssize_t ipath_write(struct file *, const char __user *, size_t,
55 loff_t *);
56static ssize_t ipath_write_iter(struct kiocb *, struct iov_iter *from);
57static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
58static int ipath_mmap(struct file *, struct vm_area_struct *);
59
60/*
61 * This is really, really weird shit - write() and writev() here
62 * have completely unrelated semantics. Sucky userland ABI,
63 * film at 11.
64 */
65static const struct file_operations ipath_file_ops = {
66 .owner = THIS_MODULE,
67 .write = ipath_write,
68 .write_iter = ipath_write_iter,
69 .open = ipath_open,
70 .release = ipath_close,
71 .poll = ipath_poll,
72 .mmap = ipath_mmap,
73 .llseek = noop_llseek,
74};
75
76/*
77 * Convert kernel virtual addresses to physical addresses so they don't
78 * potentially conflict with the chip addresses used as mmap offsets.
79 * It doesn't really matter what mmap offset we use as long as we can
80 * interpret it correctly.
81 */
82static u64 cvt_kvaddr(void *p)
83{
84 struct page *page;
85 u64 paddr = 0;
86
87 page = vmalloc_to_page(p);
88 if (page)
89 paddr = page_to_pfn(page) << PAGE_SHIFT;
90
91 return paddr;
92}
93
94static int ipath_get_base_info(struct file *fp,
95 void __user *ubase, size_t ubase_size)
96{
97 struct ipath_portdata *pd = port_fp(fp);
98 int ret = 0;
99 struct ipath_base_info *kinfo = NULL;
100 struct ipath_devdata *dd = pd->port_dd;
101 unsigned subport_cnt;
102 int shared, master;
103 size_t sz;
104
105 subport_cnt = pd->port_subport_cnt;
106 if (!subport_cnt) {
107 shared = 0;
108 master = 0;
109 subport_cnt = 1;
110 } else {
111 shared = 1;
112 master = !subport_fp(fp);
113 }
114
115 sz = sizeof(*kinfo);
116 /* If port sharing is not requested, allow the old size structure */
117 if (!shared)
118 sz -= 7 * sizeof(u64);
119 if (ubase_size < sz) {
120 ipath_cdbg(PROC,
121 "Base size %zu, need %zu (version mismatch?)\n",
122 ubase_size, sz);
123 ret = -EINVAL;
124 goto bail;
125 }
126
127 kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
128 if (kinfo == NULL) {
129 ret = -ENOMEM;
130 goto bail;
131 }
132
133 ret = dd->ipath_f_get_base_info(pd, kinfo);
134 if (ret < 0)
135 goto bail;
136
137 kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt;
138 kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize;
139 kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt;
140 kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize;
141 /*
142 * have to mmap whole thing
143 */
144 kinfo->spi_rcv_egrbuftotlen =
145 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
146 kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk;
147 kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
148 pd->port_rcvegrbuf_chunks;
149 kinfo->spi_tidcnt = dd->ipath_rcvtidcnt / subport_cnt;
150 if (master)
151 kinfo->spi_tidcnt += dd->ipath_rcvtidcnt % subport_cnt;
152 /*
153 * for this use, may be ipath_cfgports summed over all chips that
154 * are are configured and present
155 */
156 kinfo->spi_nports = dd->ipath_cfgports;
157 /* unit (chip/board) our port is on */
158 kinfo->spi_unit = dd->ipath_unit;
159 /* for now, only a single page */
160 kinfo->spi_tid_maxsize = PAGE_SIZE;
161
162 /*
163 * Doing this per port, and based on the skip value, etc. This has
164 * to be the actual buffer size, since the protocol code treats it
165 * as an array.
166 *
167 * These have to be set to user addresses in the user code via mmap.
168 * These values are used on return to user code for the mmap target
169 * addresses only. For 32 bit, same 44 bit address problem, so use
170 * the physical address, not virtual. Before 2.6.11, using the
171 * page_address() macro worked, but in 2.6.11, even that returns the
172 * full 64 bit address (upper bits all 1's). So far, using the
173 * physical addresses (or chip offsets, for chip mapping) works, but
174 * no doubt some future kernel release will change that, and we'll be
175 * on to yet another method of dealing with this.
176 */
177 kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys;
178 kinfo->spi_rcvhdr_tailaddr = (u64) pd->port_rcvhdrqtailaddr_phys;
179 kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys;
180 kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys;
181 kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
182 (void *) dd->ipath_statusp -
183 (void *) dd->ipath_pioavailregs_dma;
184 if (!shared) {
185 kinfo->spi_piocnt = pd->port_piocnt;
186 kinfo->spi_piobufbase = (u64) pd->port_piobufs;
187 kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
188 dd->ipath_ureg_align * pd->port_port;
189 } else if (master) {
190 kinfo->spi_piocnt = (pd->port_piocnt / subport_cnt) +
191 (pd->port_piocnt % subport_cnt);
192 /* Master's PIO buffers are after all the slave's */
193 kinfo->spi_piobufbase = (u64) pd->port_piobufs +
194 dd->ipath_palign *
195 (pd->port_piocnt - kinfo->spi_piocnt);
196 } else {
197 unsigned slave = subport_fp(fp) - 1;
198
199 kinfo->spi_piocnt = pd->port_piocnt / subport_cnt;
200 kinfo->spi_piobufbase = (u64) pd->port_piobufs +
201 dd->ipath_palign * kinfo->spi_piocnt * slave;
202 }
203
204 if (shared) {
205 kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
206 dd->ipath_ureg_align * pd->port_port;
207 kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs;
208 kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base;
209 kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr;
210
211 kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase +
212 PAGE_SIZE * subport_fp(fp));
213
214 kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base +
215 pd->port_rcvhdrq_size * subport_fp(fp));
216 kinfo->spi_rcvhdr_tailaddr = 0;
217 kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf +
218 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size *
219 subport_fp(fp));
220
221 kinfo->spi_subport_uregbase =
222 cvt_kvaddr(pd->subport_uregbase);
223 kinfo->spi_subport_rcvegrbuf =
224 cvt_kvaddr(pd->subport_rcvegrbuf);
225 kinfo->spi_subport_rcvhdr_base =
226 cvt_kvaddr(pd->subport_rcvhdr_base);
227 ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n",
228 kinfo->spi_port, kinfo->spi_runtime_flags,
229 (unsigned long long) kinfo->spi_subport_uregbase,
230 (unsigned long long) kinfo->spi_subport_rcvegrbuf,
231 (unsigned long long) kinfo->spi_subport_rcvhdr_base);
232 }
233
234 /*
235 * All user buffers are 2KB buffers. If we ever support
236 * giving 4KB buffers to user processes, this will need some
237 * work.
238 */
239 kinfo->spi_pioindex = (kinfo->spi_piobufbase -
240 (dd->ipath_piobufbase & 0xffffffff)) / dd->ipath_palign;
241 kinfo->spi_pioalign = dd->ipath_palign;
242
243 kinfo->spi_qpair = IPATH_KD_QP;
244 /*
245 * user mode PIO buffers are always 2KB, even when 4KB can
246 * be received, and sent via the kernel; this is ibmaxlen
247 * for 2K MTU.
248 */
249 kinfo->spi_piosize = dd->ipath_piosize2k - 2 * sizeof(u32);
250 kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */
251 kinfo->spi_port = pd->port_port;
252 kinfo->spi_subport = subport_fp(fp);
253 kinfo->spi_sw_version = IPATH_KERN_SWVERSION;
254 kinfo->spi_hw_version = dd->ipath_revision;
255
256 if (master) {
257 kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER;
258 }
259
260 sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
261 if (copy_to_user(ubase, kinfo, sz))
262 ret = -EFAULT;
263
264bail:
265 kfree(kinfo);
266 return ret;
267}
268
269/**
270 * ipath_tid_update - update a port TID
271 * @pd: the port
272 * @fp: the ipath device file
273 * @ti: the TID information
274 *
275 * The new implementation as of Oct 2004 is that the driver assigns
276 * the tid and returns it to the caller. To make it easier to
277 * catch bugs, and to reduce search time, we keep a cursor for
278 * each port, walking the shadow tid array to find one that's not
279 * in use.
280 *
281 * For now, if we can't allocate the full list, we fail, although
282 * in the long run, we'll allocate as many as we can, and the
283 * caller will deal with that by trying the remaining pages later.
284 * That means that when we fail, we have to mark the tids as not in
285 * use again, in our shadow copy.
286 *
287 * It's up to the caller to free the tids when they are done.
288 * We'll unlock the pages as they free them.
289 *
290 * Also, right now we are locking one page at a time, but since
291 * the intended use of this routine is for a single group of
292 * virtually contiguous pages, that should change to improve
293 * performance.
294 */
295static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp,
296 const struct ipath_tid_info *ti)
297{
298 int ret = 0, ntids;
299 u32 tid, porttid, cnt, i, tidcnt, tidoff;
300 u16 *tidlist;
301 struct ipath_devdata *dd = pd->port_dd;
302 u64 physaddr;
303 unsigned long vaddr;
304 u64 __iomem *tidbase;
305 unsigned long tidmap[8];
306 struct page **pagep = NULL;
307 unsigned subport = subport_fp(fp);
308
309 if (!dd->ipath_pageshadow) {
310 ret = -ENOMEM;
311 goto done;
312 }
313
314 cnt = ti->tidcnt;
315 if (!cnt) {
316 ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n",
317 (unsigned long long) ti->tidlist);
318 /*
319 * Should we treat as success? likely a bug
320 */
321 ret = -EFAULT;
322 goto done;
323 }
324 porttid = pd->port_port * dd->ipath_rcvtidcnt;
325 if (!pd->port_subport_cnt) {
326 tidcnt = dd->ipath_rcvtidcnt;
327 tid = pd->port_tidcursor;
328 tidoff = 0;
329 } else if (!subport) {
330 tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) +
331 (dd->ipath_rcvtidcnt % pd->port_subport_cnt);
332 tidoff = dd->ipath_rcvtidcnt - tidcnt;
333 porttid += tidoff;
334 tid = tidcursor_fp(fp);
335 } else {
336 tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt;
337 tidoff = tidcnt * (subport - 1);
338 porttid += tidoff;
339 tid = tidcursor_fp(fp);
340 }
341 if (cnt > tidcnt) {
342 /* make sure it all fits in port_tid_pg_list */
343 dev_info(&dd->pcidev->dev, "Process tried to allocate %u "
344 "TIDs, only trying max (%u)\n", cnt, tidcnt);
345 cnt = tidcnt;
346 }
347 pagep = &((struct page **) pd->port_tid_pg_list)[tidoff];
348 tidlist = &((u16 *) &pagep[dd->ipath_rcvtidcnt])[tidoff];
349
350 memset(tidmap, 0, sizeof(tidmap));
351 /* before decrement; chip actual # */
352 ntids = tidcnt;
353 tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
354 dd->ipath_rcvtidbase +
355 porttid * sizeof(*tidbase));
356
357 ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n",
358 pd->port_port, cnt, tid, tidbase);
359
360 /* virtual address of first page in transfer */
361 vaddr = ti->tidvaddr;
362 if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
363 cnt * PAGE_SIZE)) {
364 ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n",
365 (void *)vaddr, cnt);
366 ret = -EFAULT;
367 goto done;
368 }
369 ret = ipath_get_user_pages(vaddr, cnt, pagep);
370 if (ret) {
371 if (ret == -EBUSY) {
372 ipath_dbg("Failed to lock addr %p, %u pages "
373 "(already locked)\n",
374 (void *) vaddr, cnt);
375 /*
376 * for now, continue, and see what happens but with
377 * the new implementation, this should never happen,
378 * unless perhaps the user has mpin'ed the pages
379 * themselves (something we need to test)
380 */
381 ret = 0;
382 } else {
383 dev_info(&dd->pcidev->dev,
384 "Failed to lock addr %p, %u pages: "
385 "errno %d\n", (void *) vaddr, cnt, -ret);
386 goto done;
387 }
388 }
389 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
390 for (; ntids--; tid++) {
391 if (tid == tidcnt)
392 tid = 0;
393 if (!dd->ipath_pageshadow[porttid + tid])
394 break;
395 }
396 if (ntids < 0) {
397 /*
398 * oops, wrapped all the way through their TIDs,
399 * and didn't have enough free; see comments at
400 * start of routine
401 */
402 ipath_dbg("Not enough free TIDs for %u pages "
403 "(index %d), failing\n", cnt, i);
404 i--; /* last tidlist[i] not filled in */
405 ret = -ENOMEM;
406 break;
407 }
408 tidlist[i] = tid + tidoff;
409 ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, "
410 "vaddr %lx\n", i, tid + tidoff, vaddr);
411 /* we "know" system pages and TID pages are same size */
412 dd->ipath_pageshadow[porttid + tid] = pagep[i];
413 dd->ipath_physshadow[porttid + tid] = ipath_map_page(
414 dd->pcidev, pagep[i], 0, PAGE_SIZE,
415 PCI_DMA_FROMDEVICE);
416 /*
417 * don't need atomic or it's overhead
418 */
419 __set_bit(tid, tidmap);
420 physaddr = dd->ipath_physshadow[porttid + tid];
421 ipath_stats.sps_pagelocks++;
422 ipath_cdbg(VERBOSE,
423 "TID %u, vaddr %lx, physaddr %llx pgp %p\n",
424 tid, vaddr, (unsigned long long) physaddr,
425 pagep[i]);
426 dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED,
427 physaddr);
428 /*
429 * don't check this tid in ipath_portshadow, since we
430 * just filled it in; start with the next one.
431 */
432 tid++;
433 }
434
435 if (ret) {
436 u32 limit;
437 cleanup:
438 /* jump here if copy out of updated info failed... */
439 ipath_dbg("After failure (ret=%d), undo %d of %d entries\n",
440 -ret, i, cnt);
441 /* same code that's in ipath_free_tid() */
442 limit = sizeof(tidmap) * BITS_PER_BYTE;
443 if (limit > tidcnt)
444 /* just in case size changes in future */
445 limit = tidcnt;
446 tid = find_first_bit((const unsigned long *)tidmap, limit);
447 for (; tid < limit; tid++) {
448 if (!test_bit(tid, tidmap))
449 continue;
450 if (dd->ipath_pageshadow[porttid + tid]) {
451 ipath_cdbg(VERBOSE, "Freeing TID %u\n",
452 tid);
453 dd->ipath_f_put_tid(dd, &tidbase[tid],
454 RCVHQ_RCV_TYPE_EXPECTED,
455 dd->ipath_tidinvalid);
456 pci_unmap_page(dd->pcidev,
457 dd->ipath_physshadow[porttid + tid],
458 PAGE_SIZE, PCI_DMA_FROMDEVICE);
459 dd->ipath_pageshadow[porttid + tid] = NULL;
460 ipath_stats.sps_pageunlocks++;
461 }
462 }
463 ipath_release_user_pages(pagep, cnt);
464 } else {
465 /*
466 * Copy the updated array, with ipath_tid's filled in, back
467 * to user. Since we did the copy in already, this "should
468 * never fail" If it does, we have to clean up...
469 */
470 if (copy_to_user((void __user *)
471 (unsigned long) ti->tidlist,
472 tidlist, cnt * sizeof(*tidlist))) {
473 ret = -EFAULT;
474 goto cleanup;
475 }
476 if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
477 tidmap, sizeof tidmap)) {
478 ret = -EFAULT;
479 goto cleanup;
480 }
481 if (tid == tidcnt)
482 tid = 0;
483 if (!pd->port_subport_cnt)
484 pd->port_tidcursor = tid;
485 else
486 tidcursor_fp(fp) = tid;
487 }
488
489done:
490 if (ret)
491 ipath_dbg("Failed to map %u TID pages, failing with %d\n",
492 ti->tidcnt, -ret);
493 return ret;
494}
495
496/**
497 * ipath_tid_free - free a port TID
498 * @pd: the port
499 * @subport: the subport
500 * @ti: the TID info
501 *
502 * right now we are unlocking one page at a time, but since
503 * the intended use of this routine is for a single group of
504 * virtually contiguous pages, that should change to improve
505 * performance. We check that the TID is in range for this port
506 * but otherwise don't check validity; if user has an error and
507 * frees the wrong tid, it's only their own data that can thereby
508 * be corrupted. We do check that the TID was in use, for sanity
509 * We always use our idea of the saved address, not the address that
510 * they pass in to us.
511 */
512
513static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport,
514 const struct ipath_tid_info *ti)
515{
516 int ret = 0;
517 u32 tid, porttid, cnt, limit, tidcnt;
518 struct ipath_devdata *dd = pd->port_dd;
519 u64 __iomem *tidbase;
520 unsigned long tidmap[8];
521
522 if (!dd->ipath_pageshadow) {
523 ret = -ENOMEM;
524 goto done;
525 }
526
527 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
528 sizeof tidmap)) {
529 ret = -EFAULT;
530 goto done;
531 }
532
533 porttid = pd->port_port * dd->ipath_rcvtidcnt;
534 if (!pd->port_subport_cnt)
535 tidcnt = dd->ipath_rcvtidcnt;
536 else if (!subport) {
537 tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) +
538 (dd->ipath_rcvtidcnt % pd->port_subport_cnt);
539 porttid += dd->ipath_rcvtidcnt - tidcnt;
540 } else {
541 tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt;
542 porttid += tidcnt * (subport - 1);
543 }
544 tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
545 dd->ipath_rcvtidbase +
546 porttid * sizeof(*tidbase));
547
548 limit = sizeof(tidmap) * BITS_PER_BYTE;
549 if (limit > tidcnt)
550 /* just in case size changes in future */
551 limit = tidcnt;
552 tid = find_first_bit(tidmap, limit);
553 ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) "
554 "set is %d, porttid %u\n", pd->port_port, ti->tidcnt,
555 limit, tid, porttid);
556 for (cnt = 0; tid < limit; tid++) {
557 /*
558 * small optimization; if we detect a run of 3 or so without
559 * any set, use find_first_bit again. That's mainly to
560 * accelerate the case where we wrapped, so we have some at
561 * the beginning, and some at the end, and a big gap
562 * in the middle.
563 */
564 if (!test_bit(tid, tidmap))
565 continue;
566 cnt++;
567 if (dd->ipath_pageshadow[porttid + tid]) {
568 struct page *p;
569 p = dd->ipath_pageshadow[porttid + tid];
570 dd->ipath_pageshadow[porttid + tid] = NULL;
571 ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
572 pid_nr(pd->port_pid), tid);
573 dd->ipath_f_put_tid(dd, &tidbase[tid],
574 RCVHQ_RCV_TYPE_EXPECTED,
575 dd->ipath_tidinvalid);
576 pci_unmap_page(dd->pcidev,
577 dd->ipath_physshadow[porttid + tid],
578 PAGE_SIZE, PCI_DMA_FROMDEVICE);
579 ipath_release_user_pages(&p, 1);
580 ipath_stats.sps_pageunlocks++;
581 } else
582 ipath_dbg("Unused tid %u, ignoring\n", tid);
583 }
584 if (cnt != ti->tidcnt)
585 ipath_dbg("passed in tidcnt %d, only %d bits set in map\n",
586 ti->tidcnt, cnt);
587done:
588 if (ret)
589 ipath_dbg("Failed to unmap %u TID pages, failing with %d\n",
590 ti->tidcnt, -ret);
591 return ret;
592}
593
594/**
595 * ipath_set_part_key - set a partition key
596 * @pd: the port
597 * @key: the key
598 *
599 * We can have up to 4 active at a time (other than the default, which is
600 * always allowed). This is somewhat tricky, since multiple ports may set
601 * the same key, so we reference count them, and clean up at exit. All 4
602 * partition keys are packed into a single infinipath register. It's an
603 * error for a process to set the same pkey multiple times. We provide no
604 * mechanism to de-allocate a pkey at this time, we may eventually need to
605 * do that. I've used the atomic operations, and no locking, and only make
606 * a single pass through what's available. This should be more than
607 * adequate for some time. I'll think about spinlocks or the like if and as
608 * it's necessary.
609 */
610static int ipath_set_part_key(struct ipath_portdata *pd, u16 key)
611{
612 struct ipath_devdata *dd = pd->port_dd;
613 int i, any = 0, pidx = -1;
614 u16 lkey = key & 0x7FFF;
615 int ret;
616
617 if (lkey == (IPATH_DEFAULT_P_KEY & 0x7FFF)) {
618 /* nothing to do; this key always valid */
619 ret = 0;
620 goto bail;
621 }
622
623 ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys "
624 "%hx:%x %hx:%x %hx:%x %hx:%x\n",
625 pd->port_port, key, dd->ipath_pkeys[0],
626 atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1],
627 atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2],
628 atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3],
629 atomic_read(&dd->ipath_pkeyrefs[3]));
630
631 if (!lkey) {
632 ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n",
633 pd->port_port);
634 ret = -EINVAL;
635 goto bail;
636 }
637
638 /*
639 * Set the full membership bit, because it has to be
640 * set in the register or the packet, and it seems
641 * cleaner to set in the register than to force all
642 * callers to set it. (see bug 4331)
643 */
644 key |= 0x8000;
645
646 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
647 if (!pd->port_pkeys[i] && pidx == -1)
648 pidx = i;
649 if (pd->port_pkeys[i] == key) {
650 ipath_cdbg(VERBOSE, "p%u tries to set same pkey "
651 "(%x) more than once\n",
652 pd->port_port, key);
653 ret = -EEXIST;
654 goto bail;
655 }
656 }
657 if (pidx == -1) {
658 ipath_dbg("All pkeys for port %u already in use, "
659 "can't set %x\n", pd->port_port, key);
660 ret = -EBUSY;
661 goto bail;
662 }
663 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
664 if (!dd->ipath_pkeys[i]) {
665 any++;
666 continue;
667 }
668 if (dd->ipath_pkeys[i] == key) {
669 atomic_t *pkrefs = &dd->ipath_pkeyrefs[i];
670
671 if (atomic_inc_return(pkrefs) > 1) {
672 pd->port_pkeys[pidx] = key;
673 ipath_cdbg(VERBOSE, "p%u set key %x "
674 "matches #%d, count now %d\n",
675 pd->port_port, key, i,
676 atomic_read(pkrefs));
677 ret = 0;
678 goto bail;
679 } else {
680 /*
681 * lost race, decrement count, catch below
682 */
683 atomic_dec(pkrefs);
684 ipath_cdbg(VERBOSE, "Lost race, count was "
685 "0, after dec, it's %d\n",
686 atomic_read(pkrefs));
687 any++;
688 }
689 }
690 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
691 /*
692 * It makes no sense to have both the limited and
693 * full membership PKEY set at the same time since
694 * the unlimited one will disable the limited one.
695 */
696 ret = -EEXIST;
697 goto bail;
698 }
699 }
700 if (!any) {
701 ipath_dbg("port %u, all pkeys already in use, "
702 "can't set %x\n", pd->port_port, key);
703 ret = -EBUSY;
704 goto bail;
705 }
706 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
707 if (!dd->ipath_pkeys[i] &&
708 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
709 u64 pkey;
710
711 /* for ipathstats, etc. */
712 ipath_stats.sps_pkeys[i] = lkey;
713 pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key;
714 pkey =
715 (u64) dd->ipath_pkeys[0] |
716 ((u64) dd->ipath_pkeys[1] << 16) |
717 ((u64) dd->ipath_pkeys[2] << 32) |
718 ((u64) dd->ipath_pkeys[3] << 48);
719 ipath_cdbg(PROC, "p%u set key %x in #%d, "
720 "portidx %d, new pkey reg %llx\n",
721 pd->port_port, key, i, pidx,
722 (unsigned long long) pkey);
723 ipath_write_kreg(
724 dd, dd->ipath_kregs->kr_partitionkey, pkey);
725
726 ret = 0;
727 goto bail;
728 }
729 }
730 ipath_dbg("port %u, all pkeys already in use 2nd pass, "
731 "can't set %x\n", pd->port_port, key);
732 ret = -EBUSY;
733
734bail:
735 return ret;
736}
737
738/**
739 * ipath_manage_rcvq - manage a port's receive queue
740 * @pd: the port
741 * @subport: the subport
742 * @start_stop: action to carry out
743 *
744 * start_stop == 0 disables receive on the port, for use in queue
745 * overflow conditions. start_stop==1 re-enables, to be used to
746 * re-init the software copy of the head register
747 */
748static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
749 int start_stop)
750{
751 struct ipath_devdata *dd = pd->port_dd;
752
753 ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n",
754 start_stop ? "en" : "dis", dd->ipath_unit,
755 pd->port_port, subport);
756 if (subport)
757 goto bail;
758 /* atomically clear receive enable port. */
759 if (start_stop) {
760 /*
761 * On enable, force in-memory copy of the tail register to
762 * 0, so that protocol code doesn't have to worry about
763 * whether or not the chip has yet updated the in-memory
764 * copy or not on return from the system call. The chip
765 * always resets it's tail register back to 0 on a
766 * transition from disabled to enabled. This could cause a
767 * problem if software was broken, and did the enable w/o
768 * the disable, but eventually the in-memory copy will be
769 * updated and correct itself, even in the face of software
770 * bugs.
771 */
772 if (pd->port_rcvhdrtail_kvaddr)
773 ipath_clear_rcvhdrtail(pd);
774 set_bit(dd->ipath_r_portenable_shift + pd->port_port,
775 &dd->ipath_rcvctrl);
776 } else
777 clear_bit(dd->ipath_r_portenable_shift + pd->port_port,
778 &dd->ipath_rcvctrl);
779 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
780 dd->ipath_rcvctrl);
781 /* now be sure chip saw it before we return */
782 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
783 if (start_stop) {
784 /*
785 * And try to be sure that tail reg update has happened too.
786 * This should in theory interlock with the RXE changes to
787 * the tail register. Don't assign it to the tail register
788 * in memory copy, since we could overwrite an update by the
789 * chip if we did.
790 */
791 ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
792 }
793 /* always; new head should be equal to new tail; see above */
794bail:
795 return 0;
796}
797
798static void ipath_clean_part_key(struct ipath_portdata *pd,
799 struct ipath_devdata *dd)
800{
801 int i, j, pchanged = 0;
802 u64 oldpkey;
803
804 /* for debugging only */
805 oldpkey = (u64) dd->ipath_pkeys[0] |
806 ((u64) dd->ipath_pkeys[1] << 16) |
807 ((u64) dd->ipath_pkeys[2] << 32) |
808 ((u64) dd->ipath_pkeys[3] << 48);
809
810 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
811 if (!pd->port_pkeys[i])
812 continue;
813 ipath_cdbg(VERBOSE, "look for key[%d] %hx in pkeys\n", i,
814 pd->port_pkeys[i]);
815 for (j = 0; j < ARRAY_SIZE(dd->ipath_pkeys); j++) {
816 /* check for match independent of the global bit */
817 if ((dd->ipath_pkeys[j] & 0x7fff) !=
818 (pd->port_pkeys[i] & 0x7fff))
819 continue;
820 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[j])) {
821 ipath_cdbg(VERBOSE, "p%u clear key "
822 "%x matches #%d\n",
823 pd->port_port,
824 pd->port_pkeys[i], j);
825 ipath_stats.sps_pkeys[j] =
826 dd->ipath_pkeys[j] = 0;
827 pchanged++;
828 } else {
829 ipath_cdbg(VERBOSE, "p%u key %x matches #%d, "
830 "but ref still %d\n", pd->port_port,
831 pd->port_pkeys[i], j,
832 atomic_read(&dd->ipath_pkeyrefs[j]));
833 break;
834 }
835 }
836 pd->port_pkeys[i] = 0;
837 }
838 if (pchanged) {
839 u64 pkey = (u64) dd->ipath_pkeys[0] |
840 ((u64) dd->ipath_pkeys[1] << 16) |
841 ((u64) dd->ipath_pkeys[2] << 32) |
842 ((u64) dd->ipath_pkeys[3] << 48);
843 ipath_cdbg(VERBOSE, "p%u old pkey reg %llx, "
844 "new pkey reg %llx\n", pd->port_port,
845 (unsigned long long) oldpkey,
846 (unsigned long long) pkey);
847 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
848 pkey);
849 }
850}
851
852/*
853 * Initialize the port data with the receive buffer sizes
854 * so this can be done while the master port is locked.
855 * Otherwise, there is a race with a slave opening the port
856 * and seeing these fields uninitialized.
857 */
858static void init_user_egr_sizes(struct ipath_portdata *pd)
859{
860 struct ipath_devdata *dd = pd->port_dd;
861 unsigned egrperchunk, egrcnt, size;
862
863 /*
864 * to avoid wasting a lot of memory, we allocate 32KB chunks of
865 * physically contiguous memory, advance through it until used up
866 * and then allocate more. Of course, we need memory to store those
867 * extra pointers, now. Started out with 256KB, but under heavy
868 * memory pressure (creating large files and then copying them over
869 * NFS while doing lots of MPI jobs), we hit some allocation
870 * failures, even though we can sleep... (2.6.10) Still get
871 * failures at 64K. 32K is the lowest we can go without wasting
872 * additional memory.
873 */
874 size = 0x8000;
875 egrperchunk = size / dd->ipath_rcvegrbufsize;
876 egrcnt = dd->ipath_rcvegrcnt;
877 pd->port_rcvegrbuf_chunks = (egrcnt + egrperchunk - 1) / egrperchunk;
878 pd->port_rcvegrbufs_perchunk = egrperchunk;
879 pd->port_rcvegrbuf_size = size;
880}
881
882/**
883 * ipath_create_user_egr - allocate eager TID buffers
884 * @pd: the port to allocate TID buffers for
885 *
886 * This routine is now quite different for user and kernel, because
887 * the kernel uses skb's, for the accelerated network performance
888 * This is the user port version
889 *
890 * Allocate the eager TID buffers and program them into infinipath
891 * They are no longer completely contiguous, we do multiple allocation
892 * calls.
893 */
894static int ipath_create_user_egr(struct ipath_portdata *pd)
895{
896 struct ipath_devdata *dd = pd->port_dd;
897 unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
898 size_t size;
899 int ret;
900 gfp_t gfp_flags;
901
902 /*
903 * GFP_USER, but without GFP_FS, so buffer cache can be
904 * coalesced (we hope); otherwise, even at order 4,
905 * heavy filesystem activity makes these fail, and we can
906 * use compound pages.
907 */
908 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
909
910 egrcnt = dd->ipath_rcvegrcnt;
911 /* TID number offset for this port */
912 egroff = (pd->port_port - 1) * egrcnt + dd->ipath_p0_rcvegrcnt;
913 egrsize = dd->ipath_rcvegrbufsize;
914 ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid "
915 "offset %x, egrsize %u\n", egrcnt, egroff, egrsize);
916
917 chunk = pd->port_rcvegrbuf_chunks;
918 egrperchunk = pd->port_rcvegrbufs_perchunk;
919 size = pd->port_rcvegrbuf_size;
920 pd->port_rcvegrbuf = kmalloc_array(chunk, sizeof(pd->port_rcvegrbuf[0]),
921 GFP_KERNEL);
922 if (!pd->port_rcvegrbuf) {
923 ret = -ENOMEM;
924 goto bail;
925 }
926 pd->port_rcvegrbuf_phys =
927 kmalloc_array(chunk, sizeof(pd->port_rcvegrbuf_phys[0]),
928 GFP_KERNEL);
929 if (!pd->port_rcvegrbuf_phys) {
930 ret = -ENOMEM;
931 goto bail_rcvegrbuf;
932 }
933 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
934
935 pd->port_rcvegrbuf[e] = dma_alloc_coherent(
936 &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e],
937 gfp_flags);
938
939 if (!pd->port_rcvegrbuf[e]) {
940 ret = -ENOMEM;
941 goto bail_rcvegrbuf_phys;
942 }
943 }
944
945 pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0];
946
947 for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) {
948 dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk];
949 unsigned i;
950
951 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
952 dd->ipath_f_put_tid(dd, e + egroff +
953 (u64 __iomem *)
954 ((char __iomem *)
955 dd->ipath_kregbase +
956 dd->ipath_rcvegrbase),
957 RCVHQ_RCV_TYPE_EAGER, pa);
958 pa += egrsize;
959 }
960 cond_resched(); /* don't hog the cpu */
961 }
962
963 ret = 0;
964 goto bail;
965
966bail_rcvegrbuf_phys:
967 for (e = 0; e < pd->port_rcvegrbuf_chunks &&
968 pd->port_rcvegrbuf[e]; e++) {
969 dma_free_coherent(&dd->pcidev->dev, size,
970 pd->port_rcvegrbuf[e],
971 pd->port_rcvegrbuf_phys[e]);
972
973 }
974 kfree(pd->port_rcvegrbuf_phys);
975 pd->port_rcvegrbuf_phys = NULL;
976bail_rcvegrbuf:
977 kfree(pd->port_rcvegrbuf);
978 pd->port_rcvegrbuf = NULL;
979bail:
980 return ret;
981}
982
983
984/* common code for the mappings on dma_alloc_coherent mem */
985static int ipath_mmap_mem(struct vm_area_struct *vma,
986 struct ipath_portdata *pd, unsigned len, int write_ok,
987 void *kvaddr, char *what)
988{
989 struct ipath_devdata *dd = pd->port_dd;
990 unsigned long pfn;
991 int ret;
992
993 if ((vma->vm_end - vma->vm_start) > len) {
994 dev_info(&dd->pcidev->dev,
995 "FAIL on %s: len %lx > %x\n", what,
996 vma->vm_end - vma->vm_start, len);
997 ret = -EFAULT;
998 goto bail;
999 }
1000
1001 if (!write_ok) {
1002 if (vma->vm_flags & VM_WRITE) {
1003 dev_info(&dd->pcidev->dev,
1004 "%s must be mapped readonly\n", what);
1005 ret = -EPERM;
1006 goto bail;
1007 }
1008
1009 /* don't allow them to later change with mprotect */
1010 vma->vm_flags &= ~VM_MAYWRITE;
1011 }
1012
1013 pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
1014 ret = remap_pfn_range(vma, vma->vm_start, pfn,
1015 len, vma->vm_page_prot);
1016 if (ret)
1017 dev_info(&dd->pcidev->dev, "%s port%u mmap of %lx, %x "
1018 "bytes r%c failed: %d\n", what, pd->port_port,
1019 pfn, len, write_ok?'w':'o', ret);
1020 else
1021 ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes "
1022 "r%c\n", what, pd->port_port, pfn, len,
1023 write_ok?'w':'o');
1024bail:
1025 return ret;
1026}
1027
1028static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd,
1029 u64 ureg)
1030{
1031 unsigned long phys;
1032 int ret;
1033
1034 /*
1035 * This is real hardware, so use io_remap. This is the mechanism
1036 * for the user process to update the head registers for their port
1037 * in the chip.
1038 */
1039 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
1040 dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen "
1041 "%lx > PAGE\n", vma->vm_end - vma->vm_start);
1042 ret = -EFAULT;
1043 } else {
1044 phys = dd->ipath_physaddr + ureg;
1045 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1046
1047 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
1048 ret = io_remap_pfn_range(vma, vma->vm_start,
1049 phys >> PAGE_SHIFT,
1050 vma->vm_end - vma->vm_start,
1051 vma->vm_page_prot);
1052 }
1053 return ret;
1054}
1055
1056static int mmap_piobufs(struct vm_area_struct *vma,
1057 struct ipath_devdata *dd,
1058 struct ipath_portdata *pd,
1059 unsigned piobufs, unsigned piocnt)
1060{
1061 unsigned long phys;
1062 int ret;
1063
1064 /*
1065 * When we map the PIO buffers in the chip, we want to map them as
1066 * writeonly, no read possible. This prevents access to previous
1067 * process data, and catches users who might try to read the i/o
1068 * space due to a bug.
1069 */
1070 if ((vma->vm_end - vma->vm_start) > (piocnt * dd->ipath_palign)) {
1071 dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: "
1072 "reqlen %lx > PAGE\n",
1073 vma->vm_end - vma->vm_start);
1074 ret = -EINVAL;
1075 goto bail;
1076 }
1077
1078 phys = dd->ipath_physaddr + piobufs;
1079
1080#if defined(__powerpc__)
1081 /* There isn't a generic way to specify writethrough mappings */
1082 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
1083 pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
1084 pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
1085#endif
1086
1087 /*
1088 * don't allow them to later change to readable with mprotect (for when
1089 * not initially mapped readable, as is normally the case)
1090 */
1091 vma->vm_flags &= ~VM_MAYREAD;
1092 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
1093
1094 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
1095 vma->vm_end - vma->vm_start,
1096 vma->vm_page_prot);
1097bail:
1098 return ret;
1099}
1100
1101static int mmap_rcvegrbufs(struct vm_area_struct *vma,
1102 struct ipath_portdata *pd)
1103{
1104 struct ipath_devdata *dd = pd->port_dd;
1105 unsigned long start, size;
1106 size_t total_size, i;
1107 unsigned long pfn;
1108 int ret;
1109
1110 size = pd->port_rcvegrbuf_size;
1111 total_size = pd->port_rcvegrbuf_chunks * size;
1112 if ((vma->vm_end - vma->vm_start) > total_size) {
1113 dev_info(&dd->pcidev->dev, "FAIL on egr bufs: "
1114 "reqlen %lx > actual %lx\n",
1115 vma->vm_end - vma->vm_start,
1116 (unsigned long) total_size);
1117 ret = -EINVAL;
1118 goto bail;
1119 }
1120
1121 if (vma->vm_flags & VM_WRITE) {
1122 dev_info(&dd->pcidev->dev, "Can't map eager buffers as "
1123 "writable (flags=%lx)\n", vma->vm_flags);
1124 ret = -EPERM;
1125 goto bail;
1126 }
1127 /* don't allow them to later change to writeable with mprotect */
1128 vma->vm_flags &= ~VM_MAYWRITE;
1129
1130 start = vma->vm_start;
1131
1132 for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) {
1133 pfn = virt_to_phys(pd->port_rcvegrbuf[i]) >> PAGE_SHIFT;
1134 ret = remap_pfn_range(vma, start, pfn, size,
1135 vma->vm_page_prot);
1136 if (ret < 0)
1137 goto bail;
1138 }
1139 ret = 0;
1140
1141bail:
1142 return ret;
1143}
1144
1145/*
1146 * ipath_file_vma_fault - handle a VMA page fault.
1147 */
1148static int ipath_file_vma_fault(struct vm_area_struct *vma,
1149 struct vm_fault *vmf)
1150{
1151 struct page *page;
1152
1153 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
1154 if (!page)
1155 return VM_FAULT_SIGBUS;
1156 get_page(page);
1157 vmf->page = page;
1158
1159 return 0;
1160}
1161
1162static const struct vm_operations_struct ipath_file_vm_ops = {
1163 .fault = ipath_file_vma_fault,
1164};
1165
1166static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
1167 struct ipath_portdata *pd, unsigned subport)
1168{
1169 unsigned long len;
1170 struct ipath_devdata *dd;
1171 void *addr;
1172 size_t size;
1173 int ret = 0;
1174
1175 /* If the port is not shared, all addresses should be physical */
1176 if (!pd->port_subport_cnt)
1177 goto bail;
1178
1179 dd = pd->port_dd;
1180 size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
1181
1182 /*
1183 * Each process has all the subport uregbase, rcvhdrq, and
1184 * rcvegrbufs mmapped - as an array for all the processes,
1185 * and also separately for this process.
1186 */
1187 if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) {
1188 addr = pd->subport_uregbase;
1189 size = PAGE_SIZE * pd->port_subport_cnt;
1190 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) {
1191 addr = pd->subport_rcvhdr_base;
1192 size = pd->port_rcvhdrq_size * pd->port_subport_cnt;
1193 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) {
1194 addr = pd->subport_rcvegrbuf;
1195 size *= pd->port_subport_cnt;
1196 } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase +
1197 PAGE_SIZE * subport)) {
1198 addr = pd->subport_uregbase + PAGE_SIZE * subport;
1199 size = PAGE_SIZE;
1200 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base +
1201 pd->port_rcvhdrq_size * subport)) {
1202 addr = pd->subport_rcvhdr_base +
1203 pd->port_rcvhdrq_size * subport;
1204 size = pd->port_rcvhdrq_size;
1205 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf +
1206 size * subport)) {
1207 addr = pd->subport_rcvegrbuf + size * subport;
1208 /* rcvegrbufs are read-only on the slave */
1209 if (vma->vm_flags & VM_WRITE) {
1210 dev_info(&dd->pcidev->dev,
1211 "Can't map eager buffers as "
1212 "writable (flags=%lx)\n", vma->vm_flags);
1213 ret = -EPERM;
1214 goto bail;
1215 }
1216 /*
1217 * Don't allow permission to later change to writeable
1218 * with mprotect.
1219 */
1220 vma->vm_flags &= ~VM_MAYWRITE;
1221 } else {
1222 goto bail;
1223 }
1224 len = vma->vm_end - vma->vm_start;
1225 if (len > size) {
1226 ipath_cdbg(MM, "FAIL: reqlen %lx > %zx\n", len, size);
1227 ret = -EINVAL;
1228 goto bail;
1229 }
1230
1231 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
1232 vma->vm_ops = &ipath_file_vm_ops;
1233 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1234 ret = 1;
1235
1236bail:
1237 return ret;
1238}
1239
1240/**
1241 * ipath_mmap - mmap various structures into user space
1242 * @fp: the file pointer
1243 * @vma: the VM area
1244 *
1245 * We use this to have a shared buffer between the kernel and the user code
1246 * for the rcvhdr queue, egr buffers, and the per-port user regs and pio
1247 * buffers in the chip. We have the open and close entries so we can bump
1248 * the ref count and keep the driver from being unloaded while still mapped.
1249 */
1250static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1251{
1252 struct ipath_portdata *pd;
1253 struct ipath_devdata *dd;
1254 u64 pgaddr, ureg;
1255 unsigned piobufs, piocnt;
1256 int ret;
1257
1258 pd = port_fp(fp);
1259 if (!pd) {
1260 ret = -EINVAL;
1261 goto bail;
1262 }
1263 dd = pd->port_dd;
1264
1265 /*
1266 * This is the ipath_do_user_init() code, mapping the shared buffers
1267 * into the user process. The address referred to by vm_pgoff is the
1268 * file offset passed via mmap(). For shared ports, this is the
1269 * kernel vmalloc() address of the pages to share with the master.
1270 * For non-shared or master ports, this is a physical address.
1271 * We only do one mmap for each space mapped.
1272 */
1273 pgaddr = vma->vm_pgoff << PAGE_SHIFT;
1274
1275 /*
1276 * Check for 0 in case one of the allocations failed, but user
1277 * called mmap anyway.
1278 */
1279 if (!pgaddr) {
1280 ret = -EINVAL;
1281 goto bail;
1282 }
1283
1284 ipath_cdbg(MM, "pgaddr %llx vm_start=%lx len %lx port %u:%u:%u\n",
1285 (unsigned long long) pgaddr, vma->vm_start,
1286 vma->vm_end - vma->vm_start, dd->ipath_unit,
1287 pd->port_port, subport_fp(fp));
1288
1289 /*
1290 * Physical addresses must fit in 40 bits for our hardware.
1291 * Check for kernel virtual addresses first, anything else must
1292 * match a HW or memory address.
1293 */
1294 ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp));
1295 if (ret) {
1296 if (ret > 0)
1297 ret = 0;
1298 goto bail;
1299 }
1300
1301 ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port;
1302 if (!pd->port_subport_cnt) {
1303 /* port is not shared */
1304 piocnt = pd->port_piocnt;
1305 piobufs = pd->port_piobufs;
1306 } else if (!subport_fp(fp)) {
1307 /* caller is the master */
1308 piocnt = (pd->port_piocnt / pd->port_subport_cnt) +
1309 (pd->port_piocnt % pd->port_subport_cnt);
1310 piobufs = pd->port_piobufs +
1311 dd->ipath_palign * (pd->port_piocnt - piocnt);
1312 } else {
1313 unsigned slave = subport_fp(fp) - 1;
1314
1315 /* caller is a slave */
1316 piocnt = pd->port_piocnt / pd->port_subport_cnt;
1317 piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave;
1318 }
1319
1320 if (pgaddr == ureg)
1321 ret = mmap_ureg(vma, dd, ureg);
1322 else if (pgaddr == piobufs)
1323 ret = mmap_piobufs(vma, dd, pd, piobufs, piocnt);
1324 else if (pgaddr == dd->ipath_pioavailregs_phys)
1325 /* in-memory copy of pioavail registers */
1326 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
1327 (void *) dd->ipath_pioavailregs_dma,
1328 "pioavail registers");
1329 else if (pgaddr == pd->port_rcvegr_phys)
1330 ret = mmap_rcvegrbufs(vma, pd);
1331 else if (pgaddr == (u64) pd->port_rcvhdrq_phys)
1332 /*
1333 * The rcvhdrq itself; readonly except on HT (so have
1334 * to allow writable mapping), multiple pages, contiguous
1335 * from an i/o perspective.
1336 */
1337 ret = ipath_mmap_mem(vma, pd, pd->port_rcvhdrq_size, 1,
1338 pd->port_rcvhdrq,
1339 "rcvhdrq");
1340 else if (pgaddr == (u64) pd->port_rcvhdrqtailaddr_phys)
1341 /* in-memory copy of rcvhdrq tail register */
1342 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
1343 pd->port_rcvhdrtail_kvaddr,
1344 "rcvhdrq tail");
1345 else
1346 ret = -EINVAL;
1347
1348 vma->vm_private_data = NULL;
1349
1350 if (ret < 0)
1351 dev_info(&dd->pcidev->dev,
1352 "Failure %d on off %llx len %lx\n",
1353 -ret, (unsigned long long)pgaddr,
1354 vma->vm_end - vma->vm_start);
1355bail:
1356 return ret;
1357}
1358
1359static unsigned ipath_poll_hdrqfull(struct ipath_portdata *pd)
1360{
1361 unsigned pollflag = 0;
1362
1363 if ((pd->poll_type & IPATH_POLL_TYPE_OVERFLOW) &&
1364 pd->port_hdrqfull != pd->port_hdrqfull_poll) {
1365 pollflag |= POLLIN | POLLRDNORM;
1366 pd->port_hdrqfull_poll = pd->port_hdrqfull;
1367 }
1368
1369 return pollflag;
1370}
1371
1372static unsigned int ipath_poll_urgent(struct ipath_portdata *pd,
1373 struct file *fp,
1374 struct poll_table_struct *pt)
1375{
1376 unsigned pollflag = 0;
1377 struct ipath_devdata *dd;
1378
1379 dd = pd->port_dd;
1380
1381 /* variable access in ipath_poll_hdrqfull() needs this */
1382 rmb();
1383 pollflag = ipath_poll_hdrqfull(pd);
1384
1385 if (pd->port_urgent != pd->port_urgent_poll) {
1386 pollflag |= POLLIN | POLLRDNORM;
1387 pd->port_urgent_poll = pd->port_urgent;
1388 }
1389
1390 if (!pollflag) {
1391 /* this saves a spin_lock/unlock in interrupt handler... */
1392 set_bit(IPATH_PORT_WAITING_URG, &pd->port_flag);
1393 /* flush waiting flag so don't miss an event... */
1394 wmb();
1395 poll_wait(fp, &pd->port_wait, pt);
1396 }
1397
1398 return pollflag;
1399}
1400
1401static unsigned int ipath_poll_next(struct ipath_portdata *pd,
1402 struct file *fp,
1403 struct poll_table_struct *pt)
1404{
1405 u32 head;
1406 u32 tail;
1407 unsigned pollflag = 0;
1408 struct ipath_devdata *dd;
1409
1410 dd = pd->port_dd;
1411
1412 /* variable access in ipath_poll_hdrqfull() needs this */
1413 rmb();
1414 pollflag = ipath_poll_hdrqfull(pd);
1415
1416 head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
1417 if (pd->port_rcvhdrtail_kvaddr)
1418 tail = ipath_get_rcvhdrtail(pd);
1419 else
1420 tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
1421
1422 if (head != tail)
1423 pollflag |= POLLIN | POLLRDNORM;
1424 else {
1425 /* this saves a spin_lock/unlock in interrupt handler */
1426 set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
1427 /* flush waiting flag so we don't miss an event */
1428 wmb();
1429
1430 set_bit(pd->port_port + dd->ipath_r_intravail_shift,
1431 &dd->ipath_rcvctrl);
1432
1433 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1434 dd->ipath_rcvctrl);
1435
1436 if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */
1437 ipath_write_ureg(dd, ur_rcvhdrhead,
1438 dd->ipath_rhdrhead_intr_off | head,
1439 pd->port_port);
1440
1441 poll_wait(fp, &pd->port_wait, pt);
1442 }
1443
1444 return pollflag;
1445}
1446
1447static unsigned int ipath_poll(struct file *fp,
1448 struct poll_table_struct *pt)
1449{
1450 struct ipath_portdata *pd;
1451 unsigned pollflag;
1452
1453 pd = port_fp(fp);
1454 if (!pd)
1455 pollflag = 0;
1456 else if (pd->poll_type & IPATH_POLL_TYPE_URGENT)
1457 pollflag = ipath_poll_urgent(pd, fp, pt);
1458 else
1459 pollflag = ipath_poll_next(pd, fp, pt);
1460
1461 return pollflag;
1462}
1463
1464static int ipath_supports_subports(int user_swmajor, int user_swminor)
1465{
1466 /* no subport implementation prior to software version 1.3 */
1467 return (user_swmajor > 1) || (user_swminor >= 3);
1468}
1469
1470static int ipath_compatible_subports(int user_swmajor, int user_swminor)
1471{
1472 /* this code is written long-hand for clarity */
1473 if (IPATH_USER_SWMAJOR != user_swmajor) {
1474 /* no promise of compatibility if major mismatch */
1475 return 0;
1476 }
1477 if (IPATH_USER_SWMAJOR == 1) {
1478 switch (IPATH_USER_SWMINOR) {
1479 case 0:
1480 case 1:
1481 case 2:
1482 /* no subport implementation so cannot be compatible */
1483 return 0;
1484 case 3:
1485 /* 3 is only compatible with itself */
1486 return user_swminor == 3;
1487 default:
1488 /* >= 4 are compatible (or are expected to be) */
1489 return user_swminor >= 4;
1490 }
1491 }
1492 /* make no promises yet for future major versions */
1493 return 0;
1494}
1495
1496static int init_subports(struct ipath_devdata *dd,
1497 struct ipath_portdata *pd,
1498 const struct ipath_user_info *uinfo)
1499{
1500 int ret = 0;
1501 unsigned num_subports;
1502 size_t size;
1503
1504 /*
1505 * If the user is requesting zero subports,
1506 * skip the subport allocation.
1507 */
1508 if (uinfo->spu_subport_cnt <= 0)
1509 goto bail;
1510
1511 /* Self-consistency check for ipath_compatible_subports() */
1512 if (ipath_supports_subports(IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR) &&
1513 !ipath_compatible_subports(IPATH_USER_SWMAJOR,
1514 IPATH_USER_SWMINOR)) {
1515 dev_info(&dd->pcidev->dev,
1516 "Inconsistent ipath_compatible_subports()\n");
1517 goto bail;
1518 }
1519
1520 /* Check for subport compatibility */
1521 if (!ipath_compatible_subports(uinfo->spu_userversion >> 16,
1522 uinfo->spu_userversion & 0xffff)) {
1523 dev_info(&dd->pcidev->dev,
1524 "Mismatched user version (%d.%d) and driver "
1525 "version (%d.%d) while port sharing. Ensure "
1526 "that driver and library are from the same "
1527 "release.\n",
1528 (int) (uinfo->spu_userversion >> 16),
1529 (int) (uinfo->spu_userversion & 0xffff),
1530 IPATH_USER_SWMAJOR,
1531 IPATH_USER_SWMINOR);
1532 goto bail;
1533 }
1534 if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) {
1535 ret = -EINVAL;
1536 goto bail;
1537 }
1538
1539 num_subports = uinfo->spu_subport_cnt;
1540 pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports);
1541 if (!pd->subport_uregbase) {
1542 ret = -ENOMEM;
1543 goto bail;
1544 }
1545 /* Note: pd->port_rcvhdrq_size isn't initialized yet. */
1546 size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1547 sizeof(u32), PAGE_SIZE) * num_subports;
1548 pd->subport_rcvhdr_base = vzalloc(size);
1549 if (!pd->subport_rcvhdr_base) {
1550 ret = -ENOMEM;
1551 goto bail_ureg;
1552 }
1553
1554 pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks *
1555 pd->port_rcvegrbuf_size *
1556 num_subports);
1557 if (!pd->subport_rcvegrbuf) {
1558 ret = -ENOMEM;
1559 goto bail_rhdr;
1560 }
1561
1562 pd->port_subport_cnt = uinfo->spu_subport_cnt;
1563 pd->port_subport_id = uinfo->spu_subport_id;
1564 pd->active_slaves = 1;
1565 set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
1566 goto bail;
1567
1568bail_rhdr:
1569 vfree(pd->subport_rcvhdr_base);
1570bail_ureg:
1571 vfree(pd->subport_uregbase);
1572 pd->subport_uregbase = NULL;
1573bail:
1574 return ret;
1575}
1576
1577static int try_alloc_port(struct ipath_devdata *dd, int port,
1578 struct file *fp,
1579 const struct ipath_user_info *uinfo)
1580{
1581 struct ipath_portdata *pd;
1582 int ret;
1583
1584 if (!(pd = dd->ipath_pd[port])) {
1585 void *ptmp;
1586
1587 pd = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL);
1588
1589 /*
1590 * Allocate memory for use in ipath_tid_update() just once
1591 * at open, not per call. Reduces cost of expected send
1592 * setup.
1593 */
1594 ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) +
1595 dd->ipath_rcvtidcnt * sizeof(struct page **),
1596 GFP_KERNEL);
1597 if (!pd || !ptmp) {
1598 ipath_dev_err(dd, "Unable to allocate portdata "
1599 "memory, failing open\n");
1600 ret = -ENOMEM;
1601 kfree(pd);
1602 kfree(ptmp);
1603 goto bail;
1604 }
1605 dd->ipath_pd[port] = pd;
1606 dd->ipath_pd[port]->port_port = port;
1607 dd->ipath_pd[port]->port_dd = dd;
1608 dd->ipath_pd[port]->port_tid_pg_list = ptmp;
1609 init_waitqueue_head(&dd->ipath_pd[port]->port_wait);
1610 }
1611 if (!pd->port_cnt) {
1612 pd->userversion = uinfo->spu_userversion;
1613 init_user_egr_sizes(pd);
1614 if ((ret = init_subports(dd, pd, uinfo)) != 0)
1615 goto bail;
1616 ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n",
1617 current->comm, current->pid, dd->ipath_unit,
1618 port);
1619 pd->port_cnt = 1;
1620 port_fp(fp) = pd;
1621 pd->port_pid = get_pid(task_pid(current));
1622 strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
1623 ipath_stats.sps_ports++;
1624 ret = 0;
1625 } else
1626 ret = -EBUSY;
1627
1628bail:
1629 return ret;
1630}
1631
1632static inline int usable(struct ipath_devdata *dd)
1633{
1634 return dd &&
1635 (dd->ipath_flags & IPATH_PRESENT) &&
1636 dd->ipath_kregbase &&
1637 dd->ipath_lid &&
1638 !(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED
1639 | IPATH_LINKUNK));
1640}
1641
1642static int find_free_port(int unit, struct file *fp,
1643 const struct ipath_user_info *uinfo)
1644{
1645 struct ipath_devdata *dd = ipath_lookup(unit);
1646 int ret, i;
1647
1648 if (!dd) {
1649 ret = -ENODEV;
1650 goto bail;
1651 }
1652
1653 if (!usable(dd)) {
1654 ret = -ENETDOWN;
1655 goto bail;
1656 }
1657
1658 for (i = 1; i < dd->ipath_cfgports; i++) {
1659 ret = try_alloc_port(dd, i, fp, uinfo);
1660 if (ret != -EBUSY)
1661 goto bail;
1662 }
1663 ret = -EBUSY;
1664
1665bail:
1666 return ret;
1667}
1668
1669static int find_best_unit(struct file *fp,
1670 const struct ipath_user_info *uinfo)
1671{
1672 int ret = 0, i, prefunit = -1, devmax;
1673 int maxofallports, npresent, nup;
1674 int ndev;
1675
1676 devmax = ipath_count_units(&npresent, &nup, &maxofallports);
1677
1678 /*
1679 * This code is present to allow a knowledgeable person to
1680 * specify the layout of processes to processors before opening
1681 * this driver, and then we'll assign the process to the "closest"
1682 * InfiniPath chip to that processor (we assume reasonable connectivity,
1683 * for now). This code assumes that if affinity has been set
1684 * before this point, that at most one cpu is set; for now this
1685 * is reasonable. I check for both cpumask_empty() and cpumask_full(),
1686 * in case some kernel variant sets none of the bits when no
1687 * affinity is set. 2.6.11 and 12 kernels have all present
1688 * cpus set. Some day we'll have to fix it up further to handle
1689 * a cpu subset. This algorithm fails for two HT chips connected
1690 * in tunnel fashion. Eventually this needs real topology
1691 * information. There may be some issues with dual core numbering
1692 * as well. This needs more work prior to release.
1693 */
1694 if (!cpumask_empty(tsk_cpus_allowed(current)) &&
1695 !cpumask_full(tsk_cpus_allowed(current))) {
1696 int ncpus = num_online_cpus(), curcpu = -1, nset = 0;
1697 get_online_cpus();
1698 for_each_online_cpu(i)
1699 if (cpumask_test_cpu(i, tsk_cpus_allowed(current))) {
1700 ipath_cdbg(PROC, "%s[%u] affinity set for "
1701 "cpu %d/%d\n", current->comm,
1702 current->pid, i, ncpus);
1703 curcpu = i;
1704 nset++;
1705 }
1706 put_online_cpus();
1707 if (curcpu != -1 && nset != ncpus) {
1708 if (npresent) {
1709 prefunit = curcpu / (ncpus / npresent);
1710 ipath_cdbg(PROC,"%s[%u] %d chips, %d cpus, "
1711 "%d cpus/chip, select unit %d\n",
1712 current->comm, current->pid,
1713 npresent, ncpus, ncpus / npresent,
1714 prefunit);
1715 }
1716 }
1717 }
1718
1719 /*
1720 * user ports start at 1, kernel port is 0
1721 * For now, we do round-robin access across all chips
1722 */
1723
1724 if (prefunit != -1)
1725 devmax = prefunit + 1;
1726recheck:
1727 for (i = 1; i < maxofallports; i++) {
1728 for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax;
1729 ndev++) {
1730 struct ipath_devdata *dd = ipath_lookup(ndev);
1731
1732 if (!usable(dd))
1733 continue; /* can't use this unit */
1734 if (i >= dd->ipath_cfgports)
1735 /*
1736 * Maxed out on users of this unit. Try
1737 * next.
1738 */
1739 continue;
1740 ret = try_alloc_port(dd, i, fp, uinfo);
1741 if (!ret)
1742 goto done;
1743 }
1744 }
1745
1746 if (npresent) {
1747 if (nup == 0) {
1748 ret = -ENETDOWN;
1749 ipath_dbg("No ports available (none initialized "
1750 "and ready)\n");
1751 } else {
1752 if (prefunit > 0) {
1753 /* if started above 0, retry from 0 */
1754 ipath_cdbg(PROC,
1755 "%s[%u] no ports on prefunit "
1756 "%d, clear and re-check\n",
1757 current->comm, current->pid,
1758 prefunit);
1759 devmax = ipath_count_units(NULL, NULL,
1760 NULL);
1761 prefunit = -1;
1762 goto recheck;
1763 }
1764 ret = -EBUSY;
1765 ipath_dbg("No ports available\n");
1766 }
1767 } else {
1768 ret = -ENXIO;
1769 ipath_dbg("No boards found\n");
1770 }
1771
1772done:
1773 return ret;
1774}
1775
1776static int find_shared_port(struct file *fp,
1777 const struct ipath_user_info *uinfo)
1778{
1779 int devmax, ndev, i;
1780 int ret = 0;
1781
1782 devmax = ipath_count_units(NULL, NULL, NULL);
1783
1784 for (ndev = 0; ndev < devmax; ndev++) {
1785 struct ipath_devdata *dd = ipath_lookup(ndev);
1786
1787 if (!usable(dd))
1788 continue;
1789 for (i = 1; i < dd->ipath_cfgports; i++) {
1790 struct ipath_portdata *pd = dd->ipath_pd[i];
1791
1792 /* Skip ports which are not yet open */
1793 if (!pd || !pd->port_cnt)
1794 continue;
1795 /* Skip port if it doesn't match the requested one */
1796 if (pd->port_subport_id != uinfo->spu_subport_id)
1797 continue;
1798 /* Verify the sharing process matches the master */
1799 if (pd->port_subport_cnt != uinfo->spu_subport_cnt ||
1800 pd->userversion != uinfo->spu_userversion ||
1801 pd->port_cnt >= pd->port_subport_cnt) {
1802 ret = -EINVAL;
1803 goto done;
1804 }
1805 port_fp(fp) = pd;
1806 subport_fp(fp) = pd->port_cnt++;
1807 pd->port_subpid[subport_fp(fp)] =
1808 get_pid(task_pid(current));
1809 tidcursor_fp(fp) = 0;
1810 pd->active_slaves |= 1 << subport_fp(fp);
1811 ipath_cdbg(PROC,
1812 "%s[%u] %u sharing %s[%u] unit:port %u:%u\n",
1813 current->comm, current->pid,
1814 subport_fp(fp),
1815 pd->port_comm, pid_nr(pd->port_pid),
1816 dd->ipath_unit, pd->port_port);
1817 ret = 1;
1818 goto done;
1819 }
1820 }
1821
1822done:
1823 return ret;
1824}
1825
1826static int ipath_open(struct inode *in, struct file *fp)
1827{
1828 /* The real work is performed later in ipath_assign_port() */
1829 fp->private_data = kzalloc(sizeof(struct ipath_filedata), GFP_KERNEL);
1830 return fp->private_data ? 0 : -ENOMEM;
1831}
1832
1833/* Get port early, so can set affinity prior to memory allocation */
1834static int ipath_assign_port(struct file *fp,
1835 const struct ipath_user_info *uinfo)
1836{
1837 int ret;
1838 int i_minor;
1839 unsigned swmajor, swminor;
1840
1841 /* Check to be sure we haven't already initialized this file */
1842 if (port_fp(fp)) {
1843 ret = -EINVAL;
1844 goto done;
1845 }
1846
1847 /* for now, if major version is different, bail */
1848 swmajor = uinfo->spu_userversion >> 16;
1849 if (swmajor != IPATH_USER_SWMAJOR) {
1850 ipath_dbg("User major version %d not same as driver "
1851 "major %d\n", uinfo->spu_userversion >> 16,
1852 IPATH_USER_SWMAJOR);
1853 ret = -ENODEV;
1854 goto done;
1855 }
1856
1857 swminor = uinfo->spu_userversion & 0xffff;
1858 if (swminor != IPATH_USER_SWMINOR)
1859 ipath_dbg("User minor version %d not same as driver "
1860 "minor %d\n", swminor, IPATH_USER_SWMINOR);
1861
1862 mutex_lock(&ipath_mutex);
1863
1864 if (ipath_compatible_subports(swmajor, swminor) &&
1865 uinfo->spu_subport_cnt &&
1866 (ret = find_shared_port(fp, uinfo))) {
1867 if (ret > 0)
1868 ret = 0;
1869 goto done_chk_sdma;
1870 }
1871
1872 i_minor = iminor(file_inode(fp)) - IPATH_USER_MINOR_BASE;
1873 ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
1874 (long)file_inode(fp)->i_rdev, i_minor);
1875
1876 if (i_minor)
1877 ret = find_free_port(i_minor - 1, fp, uinfo);
1878 else
1879 ret = find_best_unit(fp, uinfo);
1880
1881done_chk_sdma:
1882 if (!ret) {
1883 struct ipath_filedata *fd = fp->private_data;
1884 const struct ipath_portdata *pd = fd->pd;
1885 const struct ipath_devdata *dd = pd->port_dd;
1886
1887 fd->pq = ipath_user_sdma_queue_create(&dd->pcidev->dev,
1888 dd->ipath_unit,
1889 pd->port_port,
1890 fd->subport);
1891
1892 if (!fd->pq)
1893 ret = -ENOMEM;
1894 }
1895
1896 mutex_unlock(&ipath_mutex);
1897
1898done:
1899 return ret;
1900}
1901
1902
1903static int ipath_do_user_init(struct file *fp,
1904 const struct ipath_user_info *uinfo)
1905{
1906 int ret;
1907 struct ipath_portdata *pd = port_fp(fp);
1908 struct ipath_devdata *dd;
1909 u32 head32;
1910
1911 /* Subports don't need to initialize anything since master did it. */
1912 if (subport_fp(fp)) {
1913 ret = wait_event_interruptible(pd->port_wait,
1914 !test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag));
1915 goto done;
1916 }
1917
1918 dd = pd->port_dd;
1919
1920 if (uinfo->spu_rcvhdrsize) {
1921 ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize);
1922 if (ret)
1923 goto done;
1924 }
1925
1926 /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
1927
1928 /* some ports may get extra buffers, calculate that here */
1929 if (pd->port_port <= dd->ipath_ports_extrabuf)
1930 pd->port_piocnt = dd->ipath_pbufsport + 1;
1931 else
1932 pd->port_piocnt = dd->ipath_pbufsport;
1933
1934 /* for right now, kernel piobufs are at end, so port 1 is at 0 */
1935 if (pd->port_port <= dd->ipath_ports_extrabuf)
1936 pd->port_pio_base = (dd->ipath_pbufsport + 1)
1937 * (pd->port_port - 1);
1938 else
1939 pd->port_pio_base = dd->ipath_ports_extrabuf +
1940 dd->ipath_pbufsport * (pd->port_port - 1);
1941 pd->port_piobufs = dd->ipath_piobufbase +
1942 pd->port_pio_base * dd->ipath_palign;
1943 ipath_cdbg(VERBOSE, "piobuf base for port %u is 0x%x, piocnt %u,"
1944 " first pio %u\n", pd->port_port, pd->port_piobufs,
1945 pd->port_piocnt, pd->port_pio_base);
1946 ipath_chg_pioavailkernel(dd, pd->port_pio_base, pd->port_piocnt, 0);
1947
1948 /*
1949 * Now allocate the rcvhdr Q and eager TIDs; skip the TID
1950 * array for time being. If pd->port_port > chip-supported,
1951 * we need to do extra stuff here to handle by handling overflow
1952 * through port 0, someday
1953 */
1954 ret = ipath_create_rcvhdrq(dd, pd);
1955 if (!ret)
1956 ret = ipath_create_user_egr(pd);
1957 if (ret)
1958 goto done;
1959
1960 /*
1961 * set the eager head register for this port to the current values
1962 * of the tail pointers, since we don't know if they were
1963 * updated on last use of the port.
1964 */
1965 head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
1966 ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
1967 pd->port_lastrcvhdrqtail = -1;
1968 ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n",
1969 pd->port_port, head32);
1970 pd->port_tidcursor = 0; /* start at beginning after open */
1971
1972 /* initialize poll variables... */
1973 pd->port_urgent = 0;
1974 pd->port_urgent_poll = 0;
1975 pd->port_hdrqfull_poll = pd->port_hdrqfull;
1976
1977 /*
1978 * Now enable the port for receive.
1979 * For chips that are set to DMA the tail register to memory
1980 * when they change (and when the update bit transitions from
1981 * 0 to 1. So for those chips, we turn it off and then back on.
1982 * This will (very briefly) affect any other open ports, but the
1983 * duration is very short, and therefore isn't an issue. We
1984 * explicitly set the in-memory tail copy to 0 beforehand, so we
1985 * don't have to wait to be sure the DMA update has happened
1986 * (chip resets head/tail to 0 on transition to enable).
1987 */
1988 set_bit(dd->ipath_r_portenable_shift + pd->port_port,
1989 &dd->ipath_rcvctrl);
1990 if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1991 if (pd->port_rcvhdrtail_kvaddr)
1992 ipath_clear_rcvhdrtail(pd);
1993 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1994 dd->ipath_rcvctrl &
1995 ~(1ULL << dd->ipath_r_tailupd_shift));
1996 }
1997 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1998 dd->ipath_rcvctrl);
1999 /* Notify any waiting slaves */
2000 if (pd->port_subport_cnt) {
2001 clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
2002 wake_up(&pd->port_wait);
2003 }
2004done:
2005 return ret;
2006}
2007
2008/**
2009 * unlock_exptid - unlock any expected TID entries port still had in use
2010 * @pd: port
2011 *
2012 * We don't actually update the chip here, because we do a bulk update
2013 * below, using ipath_f_clear_tids.
2014 */
2015static void unlock_expected_tids(struct ipath_portdata *pd)
2016{
2017 struct ipath_devdata *dd = pd->port_dd;
2018 int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt;
2019 int i, cnt = 0, maxtid = port_tidbase + dd->ipath_rcvtidcnt;
2020
2021 ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n",
2022 pd->port_port);
2023 for (i = port_tidbase; i < maxtid; i++) {
2024 struct page *ps = dd->ipath_pageshadow[i];
2025
2026 if (!ps)
2027 continue;
2028
2029 dd->ipath_pageshadow[i] = NULL;
2030 pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i],
2031 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2032 ipath_release_user_pages_on_close(&ps, 1);
2033 cnt++;
2034 ipath_stats.sps_pageunlocks++;
2035 }
2036 if (cnt)
2037 ipath_cdbg(VERBOSE, "Port %u locked %u expTID entries\n",
2038 pd->port_port, cnt);
2039
2040 if (ipath_stats.sps_pagelocks || ipath_stats.sps_pageunlocks)
2041 ipath_cdbg(VERBOSE, "%llu pages locked, %llu unlocked\n",
2042 (unsigned long long) ipath_stats.sps_pagelocks,
2043 (unsigned long long)
2044 ipath_stats.sps_pageunlocks);
2045}
2046
2047static int ipath_close(struct inode *in, struct file *fp)
2048{
2049 struct ipath_filedata *fd;
2050 struct ipath_portdata *pd;
2051 struct ipath_devdata *dd;
2052 unsigned long flags;
2053 unsigned port;
2054 struct pid *pid;
2055
2056 ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n",
2057 (long)in->i_rdev, fp->private_data);
2058
2059 mutex_lock(&ipath_mutex);
2060
2061 fd = fp->private_data;
2062 fp->private_data = NULL;
2063 pd = fd->pd;
2064 if (!pd) {
2065 mutex_unlock(&ipath_mutex);
2066 goto bail;
2067 }
2068
2069 dd = pd->port_dd;
2070
2071 /* drain user sdma queue */
2072 ipath_user_sdma_queue_drain(dd, fd->pq);
2073 ipath_user_sdma_queue_destroy(fd->pq);
2074
2075 if (--pd->port_cnt) {
2076 /*
2077 * XXX If the master closes the port before the slave(s),
2078 * revoke the mmap for the eager receive queue so
2079 * the slave(s) don't wait for receive data forever.
2080 */
2081 pd->active_slaves &= ~(1 << fd->subport);
2082 put_pid(pd->port_subpid[fd->subport]);
2083 pd->port_subpid[fd->subport] = NULL;
2084 mutex_unlock(&ipath_mutex);
2085 goto bail;
2086 }
2087 /* early; no interrupt users after this */
2088 spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
2089 port = pd->port_port;
2090 dd->ipath_pd[port] = NULL;
2091 pid = pd->port_pid;
2092 pd->port_pid = NULL;
2093 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
2094
2095 if (pd->port_rcvwait_to || pd->port_piowait_to
2096 || pd->port_rcvnowait || pd->port_pionowait) {
2097 ipath_cdbg(VERBOSE, "port%u, %u rcv, %u pio wait timeo; "
2098 "%u rcv %u, pio already\n",
2099 pd->port_port, pd->port_rcvwait_to,
2100 pd->port_piowait_to, pd->port_rcvnowait,
2101 pd->port_pionowait);
2102 pd->port_rcvwait_to = pd->port_piowait_to =
2103 pd->port_rcvnowait = pd->port_pionowait = 0;
2104 }
2105 if (pd->port_flag) {
2106 ipath_cdbg(PROC, "port %u port_flag set: 0x%lx\n",
2107 pd->port_port, pd->port_flag);
2108 pd->port_flag = 0;
2109 }
2110
2111 if (dd->ipath_kregbase) {
2112 /* atomically clear receive enable port and intr avail. */
2113 clear_bit(dd->ipath_r_portenable_shift + port,
2114 &dd->ipath_rcvctrl);
2115 clear_bit(pd->port_port + dd->ipath_r_intravail_shift,
2116 &dd->ipath_rcvctrl);
2117 ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl,
2118 dd->ipath_rcvctrl);
2119 /* and read back from chip to be sure that nothing
2120 * else is in flight when we do the rest */
2121 (void)ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2122
2123 /* clean up the pkeys for this port user */
2124 ipath_clean_part_key(pd, dd);
2125 /*
2126 * be paranoid, and never write 0's to these, just use an
2127 * unused part of the port 0 tail page. Of course,
2128 * rcvhdraddr points to a large chunk of memory, so this
2129 * could still trash things, but at least it won't trash
2130 * page 0, and by disabling the port, it should stop "soon",
2131 * even if a packet or two is in already in flight after we
2132 * disabled the port.
2133 */
2134 ipath_write_kreg_port(dd,
2135 dd->ipath_kregs->kr_rcvhdrtailaddr, port,
2136 dd->ipath_dummy_hdrq_phys);
2137 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
2138 pd->port_port, dd->ipath_dummy_hdrq_phys);
2139
2140 ipath_disarm_piobufs(dd, pd->port_pio_base, pd->port_piocnt);
2141 ipath_chg_pioavailkernel(dd, pd->port_pio_base,
2142 pd->port_piocnt, 1);
2143
2144 dd->ipath_f_clear_tids(dd, pd->port_port);
2145
2146 if (dd->ipath_pageshadow)
2147 unlock_expected_tids(pd);
2148 ipath_stats.sps_ports--;
2149 ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
2150 pd->port_comm, pid_nr(pid),
2151 dd->ipath_unit, port);
2152 }
2153
2154 put_pid(pid);
2155 mutex_unlock(&ipath_mutex);
2156 ipath_free_pddata(dd, pd); /* after releasing the mutex */
2157
2158bail:
2159 kfree(fd);
2160 return 0;
2161}
2162
2163static int ipath_port_info(struct ipath_portdata *pd, u16 subport,
2164 struct ipath_port_info __user *uinfo)
2165{
2166 struct ipath_port_info info;
2167 int nup;
2168 int ret;
2169 size_t sz;
2170
2171 (void) ipath_count_units(NULL, &nup, NULL);
2172 info.num_active = nup;
2173 info.unit = pd->port_dd->ipath_unit;
2174 info.port = pd->port_port;
2175 info.subport = subport;
2176 /* Don't return new fields if old library opened the port. */
2177 if (ipath_supports_subports(pd->userversion >> 16,
2178 pd->userversion & 0xffff)) {
2179 /* Number of user ports available for this device. */
2180 info.num_ports = pd->port_dd->ipath_cfgports - 1;
2181 info.num_subports = pd->port_subport_cnt;
2182 sz = sizeof(info);
2183 } else
2184 sz = sizeof(info) - 2 * sizeof(u16);
2185
2186 if (copy_to_user(uinfo, &info, sz)) {
2187 ret = -EFAULT;
2188 goto bail;
2189 }
2190 ret = 0;
2191
2192bail:
2193 return ret;
2194}
2195
2196static int ipath_get_slave_info(struct ipath_portdata *pd,
2197 void __user *slave_mask_addr)
2198{
2199 int ret = 0;
2200
2201 if (copy_to_user(slave_mask_addr, &pd->active_slaves, sizeof(u32)))
2202 ret = -EFAULT;
2203 return ret;
2204}
2205
2206static int ipath_sdma_get_inflight(struct ipath_user_sdma_queue *pq,
2207 u32 __user *inflightp)
2208{
2209 const u32 val = ipath_user_sdma_inflight_counter(pq);
2210
2211 if (put_user(val, inflightp))
2212 return -EFAULT;
2213
2214 return 0;
2215}
2216
2217static int ipath_sdma_get_complete(struct ipath_devdata *dd,
2218 struct ipath_user_sdma_queue *pq,
2219 u32 __user *completep)
2220{
2221 u32 val;
2222 int err;
2223
2224 err = ipath_user_sdma_make_progress(dd, pq);
2225 if (err < 0)
2226 return err;
2227
2228 val = ipath_user_sdma_complete_counter(pq);
2229 if (put_user(val, completep))
2230 return -EFAULT;
2231
2232 return 0;
2233}
2234
2235static ssize_t ipath_write(struct file *fp, const char __user *data,
2236 size_t count, loff_t *off)
2237{
2238 const struct ipath_cmd __user *ucmd;
2239 struct ipath_portdata *pd;
2240 const void __user *src;
2241 size_t consumed, copy;
2242 struct ipath_cmd cmd;
2243 ssize_t ret = 0;
2244 void *dest;
2245
2246 if (count < sizeof(cmd.type)) {
2247 ret = -EINVAL;
2248 goto bail;
2249 }
2250
2251 ucmd = (const struct ipath_cmd __user *) data;
2252
2253 if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
2254 ret = -EFAULT;
2255 goto bail;
2256 }
2257
2258 consumed = sizeof(cmd.type);
2259
2260 switch (cmd.type) {
2261 case IPATH_CMD_ASSIGN_PORT:
2262 case __IPATH_CMD_USER_INIT:
2263 case IPATH_CMD_USER_INIT:
2264 copy = sizeof(cmd.cmd.user_info);
2265 dest = &cmd.cmd.user_info;
2266 src = &ucmd->cmd.user_info;
2267 break;
2268 case IPATH_CMD_RECV_CTRL:
2269 copy = sizeof(cmd.cmd.recv_ctrl);
2270 dest = &cmd.cmd.recv_ctrl;
2271 src = &ucmd->cmd.recv_ctrl;
2272 break;
2273 case IPATH_CMD_PORT_INFO:
2274 copy = sizeof(cmd.cmd.port_info);
2275 dest = &cmd.cmd.port_info;
2276 src = &ucmd->cmd.port_info;
2277 break;
2278 case IPATH_CMD_TID_UPDATE:
2279 case IPATH_CMD_TID_FREE:
2280 copy = sizeof(cmd.cmd.tid_info);
2281 dest = &cmd.cmd.tid_info;
2282 src = &ucmd->cmd.tid_info;
2283 break;
2284 case IPATH_CMD_SET_PART_KEY:
2285 copy = sizeof(cmd.cmd.part_key);
2286 dest = &cmd.cmd.part_key;
2287 src = &ucmd->cmd.part_key;
2288 break;
2289 case __IPATH_CMD_SLAVE_INFO:
2290 copy = sizeof(cmd.cmd.slave_mask_addr);
2291 dest = &cmd.cmd.slave_mask_addr;
2292 src = &ucmd->cmd.slave_mask_addr;
2293 break;
2294 case IPATH_CMD_PIOAVAILUPD: // force an update of PIOAvail reg
2295 copy = 0;
2296 src = NULL;
2297 dest = NULL;
2298 break;
2299 case IPATH_CMD_POLL_TYPE:
2300 copy = sizeof(cmd.cmd.poll_type);
2301 dest = &cmd.cmd.poll_type;
2302 src = &ucmd->cmd.poll_type;
2303 break;
2304 case IPATH_CMD_ARMLAUNCH_CTRL:
2305 copy = sizeof(cmd.cmd.armlaunch_ctrl);
2306 dest = &cmd.cmd.armlaunch_ctrl;
2307 src = &ucmd->cmd.armlaunch_ctrl;
2308 break;
2309 case IPATH_CMD_SDMA_INFLIGHT:
2310 copy = sizeof(cmd.cmd.sdma_inflight);
2311 dest = &cmd.cmd.sdma_inflight;
2312 src = &ucmd->cmd.sdma_inflight;
2313 break;
2314 case IPATH_CMD_SDMA_COMPLETE:
2315 copy = sizeof(cmd.cmd.sdma_complete);
2316 dest = &cmd.cmd.sdma_complete;
2317 src = &ucmd->cmd.sdma_complete;
2318 break;
2319 default:
2320 ret = -EINVAL;
2321 goto bail;
2322 }
2323
2324 if (copy) {
2325 if ((count - consumed) < copy) {
2326 ret = -EINVAL;
2327 goto bail;
2328 }
2329
2330 if (copy_from_user(dest, src, copy)) {
2331 ret = -EFAULT;
2332 goto bail;
2333 }
2334
2335 consumed += copy;
2336 }
2337
2338 pd = port_fp(fp);
2339 if (!pd && cmd.type != __IPATH_CMD_USER_INIT &&
2340 cmd.type != IPATH_CMD_ASSIGN_PORT) {
2341 ret = -EINVAL;
2342 goto bail;
2343 }
2344
2345 switch (cmd.type) {
2346 case IPATH_CMD_ASSIGN_PORT:
2347 ret = ipath_assign_port(fp, &cmd.cmd.user_info);
2348 if (ret)
2349 goto bail;
2350 break;
2351 case __IPATH_CMD_USER_INIT:
2352 /* backwards compatibility, get port first */
2353 ret = ipath_assign_port(fp, &cmd.cmd.user_info);
2354 if (ret)
2355 goto bail;
2356 /* and fall through to current version. */
2357 case IPATH_CMD_USER_INIT:
2358 ret = ipath_do_user_init(fp, &cmd.cmd.user_info);
2359 if (ret)
2360 goto bail;
2361 ret = ipath_get_base_info(
2362 fp, (void __user *) (unsigned long)
2363 cmd.cmd.user_info.spu_base_info,
2364 cmd.cmd.user_info.spu_base_info_size);
2365 break;
2366 case IPATH_CMD_RECV_CTRL:
2367 ret = ipath_manage_rcvq(pd, subport_fp(fp), cmd.cmd.recv_ctrl);
2368 break;
2369 case IPATH_CMD_PORT_INFO:
2370 ret = ipath_port_info(pd, subport_fp(fp),
2371 (struct ipath_port_info __user *)
2372 (unsigned long) cmd.cmd.port_info);
2373 break;
2374 case IPATH_CMD_TID_UPDATE:
2375 ret = ipath_tid_update(pd, fp, &cmd.cmd.tid_info);
2376 break;
2377 case IPATH_CMD_TID_FREE:
2378 ret = ipath_tid_free(pd, subport_fp(fp), &cmd.cmd.tid_info);
2379 break;
2380 case IPATH_CMD_SET_PART_KEY:
2381 ret = ipath_set_part_key(pd, cmd.cmd.part_key);
2382 break;
2383 case __IPATH_CMD_SLAVE_INFO:
2384 ret = ipath_get_slave_info(pd,
2385 (void __user *) (unsigned long)
2386 cmd.cmd.slave_mask_addr);
2387 break;
2388 case IPATH_CMD_PIOAVAILUPD:
2389 ipath_force_pio_avail_update(pd->port_dd);
2390 break;
2391 case IPATH_CMD_POLL_TYPE:
2392 pd->poll_type = cmd.cmd.poll_type;
2393 break;
2394 case IPATH_CMD_ARMLAUNCH_CTRL:
2395 if (cmd.cmd.armlaunch_ctrl)
2396 ipath_enable_armlaunch(pd->port_dd);
2397 else
2398 ipath_disable_armlaunch(pd->port_dd);
2399 break;
2400 case IPATH_CMD_SDMA_INFLIGHT:
2401 ret = ipath_sdma_get_inflight(user_sdma_queue_fp(fp),
2402 (u32 __user *) (unsigned long)
2403 cmd.cmd.sdma_inflight);
2404 break;
2405 case IPATH_CMD_SDMA_COMPLETE:
2406 ret = ipath_sdma_get_complete(pd->port_dd,
2407 user_sdma_queue_fp(fp),
2408 (u32 __user *) (unsigned long)
2409 cmd.cmd.sdma_complete);
2410 break;
2411 }
2412
2413 if (ret >= 0)
2414 ret = consumed;
2415
2416bail:
2417 return ret;
2418}
2419
2420static ssize_t ipath_write_iter(struct kiocb *iocb, struct iov_iter *from)
2421{
2422 struct file *filp = iocb->ki_filp;
2423 struct ipath_filedata *fp = filp->private_data;
2424 struct ipath_portdata *pd = port_fp(filp);
2425 struct ipath_user_sdma_queue *pq = fp->pq;
2426
2427 if (!iter_is_iovec(from) || !from->nr_segs)
2428 return -EINVAL;
2429
2430 return ipath_user_sdma_writev(pd->port_dd, pq, from->iov, from->nr_segs);
2431}
2432
2433static struct class *ipath_class;
2434
2435static int init_cdev(int minor, char *name, const struct file_operations *fops,
2436 struct cdev **cdevp, struct device **devp)
2437{
2438 const dev_t dev = MKDEV(IPATH_MAJOR, minor);
2439 struct cdev *cdev = NULL;
2440 struct device *device = NULL;
2441 int ret;
2442
2443 cdev = cdev_alloc();
2444 if (!cdev) {
2445 printk(KERN_ERR IPATH_DRV_NAME
2446 ": Could not allocate cdev for minor %d, %s\n",
2447 minor, name);
2448 ret = -ENOMEM;
2449 goto done;
2450 }
2451
2452 cdev->owner = THIS_MODULE;
2453 cdev->ops = fops;
2454 kobject_set_name(&cdev->kobj, name);
2455
2456 ret = cdev_add(cdev, dev, 1);
2457 if (ret < 0) {
2458 printk(KERN_ERR IPATH_DRV_NAME
2459 ": Could not add cdev for minor %d, %s (err %d)\n",
2460 minor, name, -ret);
2461 goto err_cdev;
2462 }
2463
2464 device = device_create(ipath_class, NULL, dev, NULL, name);
2465
2466 if (IS_ERR(device)) {
2467 ret = PTR_ERR(device);
2468 printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
2469 "device for minor %d, %s (err %d)\n",
2470 minor, name, -ret);
2471 goto err_cdev;
2472 }
2473
2474 goto done;
2475
2476err_cdev:
2477 cdev_del(cdev);
2478 cdev = NULL;
2479
2480done:
2481 if (ret >= 0) {
2482 *cdevp = cdev;
2483 *devp = device;
2484 } else {
2485 *cdevp = NULL;
2486 *devp = NULL;
2487 }
2488
2489 return ret;
2490}
2491
2492int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
2493 struct cdev **cdevp, struct device **devp)
2494{
2495 return init_cdev(minor, name, fops, cdevp, devp);
2496}
2497
2498static void cleanup_cdev(struct cdev **cdevp,
2499 struct device **devp)
2500{
2501 struct device *dev = *devp;
2502
2503 if (dev) {
2504 device_unregister(dev);
2505 *devp = NULL;
2506 }
2507
2508 if (*cdevp) {
2509 cdev_del(*cdevp);
2510 *cdevp = NULL;
2511 }
2512}
2513
2514void ipath_cdev_cleanup(struct cdev **cdevp,
2515 struct device **devp)
2516{
2517 cleanup_cdev(cdevp, devp);
2518}
2519
2520static struct cdev *wildcard_cdev;
2521static struct device *wildcard_dev;
2522
2523static const dev_t dev = MKDEV(IPATH_MAJOR, 0);
2524
2525static int user_init(void)
2526{
2527 int ret;
2528
2529 ret = register_chrdev_region(dev, IPATH_NMINORS, IPATH_DRV_NAME);
2530 if (ret < 0) {
2531 printk(KERN_ERR IPATH_DRV_NAME ": Could not register "
2532 "chrdev region (err %d)\n", -ret);
2533 goto done;
2534 }
2535
2536 ipath_class = class_create(THIS_MODULE, IPATH_DRV_NAME);
2537
2538 if (IS_ERR(ipath_class)) {
2539 ret = PTR_ERR(ipath_class);
2540 printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
2541 "device class (err %d)\n", -ret);
2542 goto bail;
2543 }
2544
2545 goto done;
2546bail:
2547 unregister_chrdev_region(dev, IPATH_NMINORS);
2548done:
2549 return ret;
2550}
2551
2552static void user_cleanup(void)
2553{
2554 if (ipath_class) {
2555 class_destroy(ipath_class);
2556 ipath_class = NULL;
2557 }
2558
2559 unregister_chrdev_region(dev, IPATH_NMINORS);
2560}
2561
2562static atomic_t user_count = ATOMIC_INIT(0);
2563static atomic_t user_setup = ATOMIC_INIT(0);
2564
2565int ipath_user_add(struct ipath_devdata *dd)
2566{
2567 char name[10];
2568 int ret;
2569
2570 if (atomic_inc_return(&user_count) == 1) {
2571 ret = user_init();
2572 if (ret < 0) {
2573 ipath_dev_err(dd, "Unable to set up user support: "
2574 "error %d\n", -ret);
2575 goto bail;
2576 }
2577 ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev,
2578 &wildcard_dev);
2579 if (ret < 0) {
2580 ipath_dev_err(dd, "Could not create wildcard "
2581 "minor: error %d\n", -ret);
2582 goto bail_user;
2583 }
2584
2585 atomic_set(&user_setup, 1);
2586 }
2587
2588 snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit);
2589
2590 ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops,
2591 &dd->user_cdev, &dd->user_dev);
2592 if (ret < 0)
2593 ipath_dev_err(dd, "Could not create user minor %d, %s\n",
2594 dd->ipath_unit + 1, name);
2595
2596 goto bail;
2597
2598bail_user:
2599 user_cleanup();
2600bail:
2601 return ret;
2602}
2603
2604void ipath_user_remove(struct ipath_devdata *dd)
2605{
2606 cleanup_cdev(&dd->user_cdev, &dd->user_dev);
2607
2608 if (atomic_dec_return(&user_count) == 0) {
2609 if (atomic_read(&user_setup) == 0)
2610 goto bail;
2611
2612 cleanup_cdev(&wildcard_cdev, &wildcard_dev);
2613 user_cleanup();
2614
2615 atomic_set(&user_setup, 0);
2616 }
2617bail:
2618 return;
2619}
diff --git a/drivers/staging/rdma/ipath/ipath_fs.c b/drivers/staging/rdma/ipath/ipath_fs.c
deleted file mode 100644
index 476fcdf05acb..000000000000
--- a/drivers/staging/rdma/ipath/ipath_fs.c
+++ /dev/null
@@ -1,415 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/module.h>
35#include <linux/fs.h>
36#include <linux/mount.h>
37#include <linux/pagemap.h>
38#include <linux/init.h>
39#include <linux/namei.h>
40#include <linux/slab.h>
41
42#include "ipath_kernel.h"
43
44#define IPATHFS_MAGIC 0x726a77
45
46static struct super_block *ipath_super;
47
48static int ipathfs_mknod(struct inode *dir, struct dentry *dentry,
49 umode_t mode, const struct file_operations *fops,
50 void *data)
51{
52 int error;
53 struct inode *inode = new_inode(dir->i_sb);
54
55 if (!inode) {
56 error = -EPERM;
57 goto bail;
58 }
59
60 inode->i_ino = get_next_ino();
61 inode->i_mode = mode;
62 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
63 inode->i_private = data;
64 if (S_ISDIR(mode)) {
65 inode->i_op = &simple_dir_inode_operations;
66 inc_nlink(inode);
67 inc_nlink(dir);
68 }
69
70 inode->i_fop = fops;
71
72 d_instantiate(dentry, inode);
73 error = 0;
74
75bail:
76 return error;
77}
78
79static int create_file(const char *name, umode_t mode,
80 struct dentry *parent, struct dentry **dentry,
81 const struct file_operations *fops, void *data)
82{
83 int error;
84
85 inode_lock(d_inode(parent));
86 *dentry = lookup_one_len(name, parent, strlen(name));
87 if (!IS_ERR(*dentry))
88 error = ipathfs_mknod(d_inode(parent), *dentry,
89 mode, fops, data);
90 else
91 error = PTR_ERR(*dentry);
92 inode_unlock(d_inode(parent));
93
94 return error;
95}
96
97static ssize_t atomic_stats_read(struct file *file, char __user *buf,
98 size_t count, loff_t *ppos)
99{
100 return simple_read_from_buffer(buf, count, ppos, &ipath_stats,
101 sizeof ipath_stats);
102}
103
104static const struct file_operations atomic_stats_ops = {
105 .read = atomic_stats_read,
106 .llseek = default_llseek,
107};
108
109static ssize_t atomic_counters_read(struct file *file, char __user *buf,
110 size_t count, loff_t *ppos)
111{
112 struct infinipath_counters counters;
113 struct ipath_devdata *dd;
114
115 dd = file_inode(file)->i_private;
116 dd->ipath_f_read_counters(dd, &counters);
117
118 return simple_read_from_buffer(buf, count, ppos, &counters,
119 sizeof counters);
120}
121
122static const struct file_operations atomic_counters_ops = {
123 .read = atomic_counters_read,
124 .llseek = default_llseek,
125};
126
127static ssize_t flash_read(struct file *file, char __user *buf,
128 size_t count, loff_t *ppos)
129{
130 struct ipath_devdata *dd;
131 ssize_t ret;
132 loff_t pos;
133 char *tmp;
134
135 pos = *ppos;
136
137 if ( pos < 0) {
138 ret = -EINVAL;
139 goto bail;
140 }
141
142 if (pos >= sizeof(struct ipath_flash)) {
143 ret = 0;
144 goto bail;
145 }
146
147 if (count > sizeof(struct ipath_flash) - pos)
148 count = sizeof(struct ipath_flash) - pos;
149
150 tmp = kmalloc(count, GFP_KERNEL);
151 if (!tmp) {
152 ret = -ENOMEM;
153 goto bail;
154 }
155
156 dd = file_inode(file)->i_private;
157 if (ipath_eeprom_read(dd, pos, tmp, count)) {
158 ipath_dev_err(dd, "failed to read from flash\n");
159 ret = -ENXIO;
160 goto bail_tmp;
161 }
162
163 if (copy_to_user(buf, tmp, count)) {
164 ret = -EFAULT;
165 goto bail_tmp;
166 }
167
168 *ppos = pos + count;
169 ret = count;
170
171bail_tmp:
172 kfree(tmp);
173
174bail:
175 return ret;
176}
177
178static ssize_t flash_write(struct file *file, const char __user *buf,
179 size_t count, loff_t *ppos)
180{
181 struct ipath_devdata *dd;
182 ssize_t ret;
183 loff_t pos;
184 char *tmp;
185
186 pos = *ppos;
187
188 if (pos != 0) {
189 ret = -EINVAL;
190 goto bail;
191 }
192
193 if (count != sizeof(struct ipath_flash)) {
194 ret = -EINVAL;
195 goto bail;
196 }
197
198 tmp = memdup_user(buf, count);
199 if (IS_ERR(tmp))
200 return PTR_ERR(tmp);
201
202 dd = file_inode(file)->i_private;
203 if (ipath_eeprom_write(dd, pos, tmp, count)) {
204 ret = -ENXIO;
205 ipath_dev_err(dd, "failed to write to flash\n");
206 goto bail_tmp;
207 }
208
209 *ppos = pos + count;
210 ret = count;
211
212bail_tmp:
213 kfree(tmp);
214
215bail:
216 return ret;
217}
218
219static const struct file_operations flash_ops = {
220 .read = flash_read,
221 .write = flash_write,
222 .llseek = default_llseek,
223};
224
225static int create_device_files(struct super_block *sb,
226 struct ipath_devdata *dd)
227{
228 struct dentry *dir, *tmp;
229 char unit[10];
230 int ret;
231
232 snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
233 ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
234 &simple_dir_operations, dd);
235 if (ret) {
236 printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
237 goto bail;
238 }
239
240 ret = create_file("atomic_counters", S_IFREG|S_IRUGO, dir, &tmp,
241 &atomic_counters_ops, dd);
242 if (ret) {
243 printk(KERN_ERR "create_file(%s/atomic_counters) "
244 "failed: %d\n", unit, ret);
245 goto bail;
246 }
247
248 ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
249 &flash_ops, dd);
250 if (ret) {
251 printk(KERN_ERR "create_file(%s/flash) "
252 "failed: %d\n", unit, ret);
253 goto bail;
254 }
255
256bail:
257 return ret;
258}
259
260static int remove_file(struct dentry *parent, char *name)
261{
262 struct dentry *tmp;
263 int ret;
264
265 tmp = lookup_one_len(name, parent, strlen(name));
266
267 if (IS_ERR(tmp)) {
268 ret = PTR_ERR(tmp);
269 goto bail;
270 }
271
272 spin_lock(&tmp->d_lock);
273 if (simple_positive(tmp)) {
274 dget_dlock(tmp);
275 __d_drop(tmp);
276 spin_unlock(&tmp->d_lock);
277 simple_unlink(d_inode(parent), tmp);
278 } else
279 spin_unlock(&tmp->d_lock);
280
281 ret = 0;
282bail:
283 /*
284 * We don't expect clients to care about the return value, but
285 * it's there if they need it.
286 */
287 return ret;
288}
289
290static int remove_device_files(struct super_block *sb,
291 struct ipath_devdata *dd)
292{
293 struct dentry *dir, *root;
294 char unit[10];
295 int ret;
296
297 root = dget(sb->s_root);
298 inode_lock(d_inode(root));
299 snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
300 dir = lookup_one_len(unit, root, strlen(unit));
301
302 if (IS_ERR(dir)) {
303 ret = PTR_ERR(dir);
304 printk(KERN_ERR "Lookup of %s failed\n", unit);
305 goto bail;
306 }
307
308 remove_file(dir, "flash");
309 remove_file(dir, "atomic_counters");
310 d_delete(dir);
311 ret = simple_rmdir(d_inode(root), dir);
312
313bail:
314 inode_unlock(d_inode(root));
315 dput(root);
316 return ret;
317}
318
319static int ipathfs_fill_super(struct super_block *sb, void *data,
320 int silent)
321{
322 struct ipath_devdata *dd, *tmp;
323 unsigned long flags;
324 int ret;
325
326 static struct tree_descr files[] = {
327 [2] = {"atomic_stats", &atomic_stats_ops, S_IRUGO},
328 {""},
329 };
330
331 ret = simple_fill_super(sb, IPATHFS_MAGIC, files);
332 if (ret) {
333 printk(KERN_ERR "simple_fill_super failed: %d\n", ret);
334 goto bail;
335 }
336
337 spin_lock_irqsave(&ipath_devs_lock, flags);
338
339 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
340 spin_unlock_irqrestore(&ipath_devs_lock, flags);
341 ret = create_device_files(sb, dd);
342 if (ret)
343 goto bail;
344 spin_lock_irqsave(&ipath_devs_lock, flags);
345 }
346
347 spin_unlock_irqrestore(&ipath_devs_lock, flags);
348
349bail:
350 return ret;
351}
352
353static struct dentry *ipathfs_mount(struct file_system_type *fs_type,
354 int flags, const char *dev_name, void *data)
355{
356 struct dentry *ret;
357 ret = mount_single(fs_type, flags, data, ipathfs_fill_super);
358 if (!IS_ERR(ret))
359 ipath_super = ret->d_sb;
360 return ret;
361}
362
363static void ipathfs_kill_super(struct super_block *s)
364{
365 kill_litter_super(s);
366 ipath_super = NULL;
367}
368
369int ipathfs_add_device(struct ipath_devdata *dd)
370{
371 int ret;
372
373 if (ipath_super == NULL) {
374 ret = 0;
375 goto bail;
376 }
377
378 ret = create_device_files(ipath_super, dd);
379
380bail:
381 return ret;
382}
383
384int ipathfs_remove_device(struct ipath_devdata *dd)
385{
386 int ret;
387
388 if (ipath_super == NULL) {
389 ret = 0;
390 goto bail;
391 }
392
393 ret = remove_device_files(ipath_super, dd);
394
395bail:
396 return ret;
397}
398
399static struct file_system_type ipathfs_fs_type = {
400 .owner = THIS_MODULE,
401 .name = "ipathfs",
402 .mount = ipathfs_mount,
403 .kill_sb = ipathfs_kill_super,
404};
405MODULE_ALIAS_FS("ipathfs");
406
407int __init ipath_init_ipathfs(void)
408{
409 return register_filesystem(&ipathfs_fs_type);
410}
411
412void __exit ipath_exit_ipathfs(void)
413{
414 unregister_filesystem(&ipathfs_fs_type);
415}
diff --git a/drivers/staging/rdma/ipath/ipath_iba6110.c b/drivers/staging/rdma/ipath/ipath_iba6110.c
deleted file mode 100644
index 5f13572a5e24..000000000000
--- a/drivers/staging/rdma/ipath/ipath_iba6110.c
+++ /dev/null
@@ -1,1939 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/*
35 * This file contains all of the code that is specific to the InfiniPath
36 * HT chip.
37 */
38
39#include <linux/vmalloc.h>
40#include <linux/pci.h>
41#include <linux/delay.h>
42#include <linux/htirq.h>
43#include <rdma/ib_verbs.h>
44
45#include "ipath_kernel.h"
46#include "ipath_registers.h"
47
48static void ipath_setup_ht_setextled(struct ipath_devdata *, u64, u64);
49
50
51/*
52 * This lists the InfiniPath registers, in the actual chip layout.
53 * This structure should never be directly accessed.
54 *
55 * The names are in InterCap form because they're taken straight from
56 * the chip specification. Since they're only used in this file, they
57 * don't pollute the rest of the source.
58*/
59
60struct _infinipath_do_not_use_kernel_regs {
61 unsigned long long Revision;
62 unsigned long long Control;
63 unsigned long long PageAlign;
64 unsigned long long PortCnt;
65 unsigned long long DebugPortSelect;
66 unsigned long long DebugPort;
67 unsigned long long SendRegBase;
68 unsigned long long UserRegBase;
69 unsigned long long CounterRegBase;
70 unsigned long long Scratch;
71 unsigned long long ReservedMisc1;
72 unsigned long long InterruptConfig;
73 unsigned long long IntBlocked;
74 unsigned long long IntMask;
75 unsigned long long IntStatus;
76 unsigned long long IntClear;
77 unsigned long long ErrorMask;
78 unsigned long long ErrorStatus;
79 unsigned long long ErrorClear;
80 unsigned long long HwErrMask;
81 unsigned long long HwErrStatus;
82 unsigned long long HwErrClear;
83 unsigned long long HwDiagCtrl;
84 unsigned long long MDIO;
85 unsigned long long IBCStatus;
86 unsigned long long IBCCtrl;
87 unsigned long long ExtStatus;
88 unsigned long long ExtCtrl;
89 unsigned long long GPIOOut;
90 unsigned long long GPIOMask;
91 unsigned long long GPIOStatus;
92 unsigned long long GPIOClear;
93 unsigned long long RcvCtrl;
94 unsigned long long RcvBTHQP;
95 unsigned long long RcvHdrSize;
96 unsigned long long RcvHdrCnt;
97 unsigned long long RcvHdrEntSize;
98 unsigned long long RcvTIDBase;
99 unsigned long long RcvTIDCnt;
100 unsigned long long RcvEgrBase;
101 unsigned long long RcvEgrCnt;
102 unsigned long long RcvBufBase;
103 unsigned long long RcvBufSize;
104 unsigned long long RxIntMemBase;
105 unsigned long long RxIntMemSize;
106 unsigned long long RcvPartitionKey;
107 unsigned long long ReservedRcv[10];
108 unsigned long long SendCtrl;
109 unsigned long long SendPIOBufBase;
110 unsigned long long SendPIOSize;
111 unsigned long long SendPIOBufCnt;
112 unsigned long long SendPIOAvailAddr;
113 unsigned long long TxIntMemBase;
114 unsigned long long TxIntMemSize;
115 unsigned long long ReservedSend[9];
116 unsigned long long SendBufferError;
117 unsigned long long SendBufferErrorCONT1;
118 unsigned long long SendBufferErrorCONT2;
119 unsigned long long SendBufferErrorCONT3;
120 unsigned long long ReservedSBE[4];
121 unsigned long long RcvHdrAddr0;
122 unsigned long long RcvHdrAddr1;
123 unsigned long long RcvHdrAddr2;
124 unsigned long long RcvHdrAddr3;
125 unsigned long long RcvHdrAddr4;
126 unsigned long long RcvHdrAddr5;
127 unsigned long long RcvHdrAddr6;
128 unsigned long long RcvHdrAddr7;
129 unsigned long long RcvHdrAddr8;
130 unsigned long long ReservedRHA[7];
131 unsigned long long RcvHdrTailAddr0;
132 unsigned long long RcvHdrTailAddr1;
133 unsigned long long RcvHdrTailAddr2;
134 unsigned long long RcvHdrTailAddr3;
135 unsigned long long RcvHdrTailAddr4;
136 unsigned long long RcvHdrTailAddr5;
137 unsigned long long RcvHdrTailAddr6;
138 unsigned long long RcvHdrTailAddr7;
139 unsigned long long RcvHdrTailAddr8;
140 unsigned long long ReservedRHTA[7];
141 unsigned long long Sync; /* Software only */
142 unsigned long long Dump; /* Software only */
143 unsigned long long SimVer; /* Software only */
144 unsigned long long ReservedSW[5];
145 unsigned long long SerdesConfig0;
146 unsigned long long SerdesConfig1;
147 unsigned long long SerdesStatus;
148 unsigned long long XGXSConfig;
149 unsigned long long ReservedSW2[4];
150};
151
152struct _infinipath_do_not_use_counters {
153 __u64 LBIntCnt;
154 __u64 LBFlowStallCnt;
155 __u64 Reserved1;
156 __u64 TxUnsupVLErrCnt;
157 __u64 TxDataPktCnt;
158 __u64 TxFlowPktCnt;
159 __u64 TxDwordCnt;
160 __u64 TxLenErrCnt;
161 __u64 TxMaxMinLenErrCnt;
162 __u64 TxUnderrunCnt;
163 __u64 TxFlowStallCnt;
164 __u64 TxDroppedPktCnt;
165 __u64 RxDroppedPktCnt;
166 __u64 RxDataPktCnt;
167 __u64 RxFlowPktCnt;
168 __u64 RxDwordCnt;
169 __u64 RxLenErrCnt;
170 __u64 RxMaxMinLenErrCnt;
171 __u64 RxICRCErrCnt;
172 __u64 RxVCRCErrCnt;
173 __u64 RxFlowCtrlErrCnt;
174 __u64 RxBadFormatCnt;
175 __u64 RxLinkProblemCnt;
176 __u64 RxEBPCnt;
177 __u64 RxLPCRCErrCnt;
178 __u64 RxBufOvflCnt;
179 __u64 RxTIDFullErrCnt;
180 __u64 RxTIDValidErrCnt;
181 __u64 RxPKeyMismatchCnt;
182 __u64 RxP0HdrEgrOvflCnt;
183 __u64 RxP1HdrEgrOvflCnt;
184 __u64 RxP2HdrEgrOvflCnt;
185 __u64 RxP3HdrEgrOvflCnt;
186 __u64 RxP4HdrEgrOvflCnt;
187 __u64 RxP5HdrEgrOvflCnt;
188 __u64 RxP6HdrEgrOvflCnt;
189 __u64 RxP7HdrEgrOvflCnt;
190 __u64 RxP8HdrEgrOvflCnt;
191 __u64 Reserved6;
192 __u64 Reserved7;
193 __u64 IBStatusChangeCnt;
194 __u64 IBLinkErrRecoveryCnt;
195 __u64 IBLinkDownedCnt;
196 __u64 IBSymbolErrCnt;
197};
198
199#define IPATH_KREG_OFFSET(field) (offsetof( \
200 struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
201#define IPATH_CREG_OFFSET(field) (offsetof( \
202 struct _infinipath_do_not_use_counters, field) / sizeof(u64))
203
204static const struct ipath_kregs ipath_ht_kregs = {
205 .kr_control = IPATH_KREG_OFFSET(Control),
206 .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
207 .kr_debugport = IPATH_KREG_OFFSET(DebugPort),
208 .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
209 .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
210 .kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
211 .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
212 .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
213 .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
214 .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
215 .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
216 .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
217 .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
218 .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
219 .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
220 .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
221 .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
222 .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
223 .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
224 .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
225 .kr_intclear = IPATH_KREG_OFFSET(IntClear),
226 .kr_interruptconfig = IPATH_KREG_OFFSET(InterruptConfig),
227 .kr_intmask = IPATH_KREG_OFFSET(IntMask),
228 .kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
229 .kr_mdio = IPATH_KREG_OFFSET(MDIO),
230 .kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
231 .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
232 .kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
233 .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
234 .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
235 .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
236 .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
237 .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
238 .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
239 .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
240 .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
241 .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
242 .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
243 .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
244 .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
245 .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
246 .kr_revision = IPATH_KREG_OFFSET(Revision),
247 .kr_scratch = IPATH_KREG_OFFSET(Scratch),
248 .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
249 .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
250 .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr),
251 .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase),
252 .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt),
253 .kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize),
254 .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
255 .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
256 .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
257 .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
258 .kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0),
259 .kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1),
260 .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
261 .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
262 /*
263 * These should not be used directly via ipath_write_kreg64(),
264 * use them with ipath_write_kreg64_port(),
265 */
266 .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
267 .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0)
268};
269
270static const struct ipath_cregs ipath_ht_cregs = {
271 .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
272 .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
273 .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
274 .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
275 .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
276 .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
277 .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
278 .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
279 .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
280 .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
281 .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
282 .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
283 /* calc from Reg_CounterRegBase + offset */
284 .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
285 .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
286 .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
287 .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
288 .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
289 .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
290 .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
291 .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
292 .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
293 .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
294 .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
295 .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
296 .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
297 .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
298 .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
299 .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
300 .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
301 .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
302 .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
303 .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
304 .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
305};
306
307/* kr_intstatus, kr_intclear, kr_intmask bits */
308#define INFINIPATH_I_RCVURG_MASK ((1U<<9)-1)
309#define INFINIPATH_I_RCVURG_SHIFT 0
310#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<9)-1)
311#define INFINIPATH_I_RCVAVAIL_SHIFT 12
312
313/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
314#define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0
315#define INFINIPATH_HWE_HTCMEMPARITYERR_MASK 0x3FFFFFULL
316#define INFINIPATH_HWE_HTCLNKABYTE0CRCERR 0x0000000000800000ULL
317#define INFINIPATH_HWE_HTCLNKABYTE1CRCERR 0x0000000001000000ULL
318#define INFINIPATH_HWE_HTCLNKBBYTE0CRCERR 0x0000000002000000ULL
319#define INFINIPATH_HWE_HTCLNKBBYTE1CRCERR 0x0000000004000000ULL
320#define INFINIPATH_HWE_HTCMISCERR4 0x0000000008000000ULL
321#define INFINIPATH_HWE_HTCMISCERR5 0x0000000010000000ULL
322#define INFINIPATH_HWE_HTCMISCERR6 0x0000000020000000ULL
323#define INFINIPATH_HWE_HTCMISCERR7 0x0000000040000000ULL
324#define INFINIPATH_HWE_HTCBUSTREQPARITYERR 0x0000000080000000ULL
325#define INFINIPATH_HWE_HTCBUSTRESPPARITYERR 0x0000000100000000ULL
326#define INFINIPATH_HWE_HTCBUSIREQPARITYERR 0x0000000200000000ULL
327#define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
328#define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
329#define INFINIPATH_HWE_HTBPLL_FBSLIP 0x0200000000000000ULL
330#define INFINIPATH_HWE_HTBPLL_RFSLIP 0x0400000000000000ULL
331#define INFINIPATH_HWE_HTAPLL_FBSLIP 0x0800000000000000ULL
332#define INFINIPATH_HWE_HTAPLL_RFSLIP 0x1000000000000000ULL
333#define INFINIPATH_HWE_SERDESPLLFAILED 0x2000000000000000ULL
334
335#define IBA6110_IBCS_LINKTRAININGSTATE_MASK 0xf
336#define IBA6110_IBCS_LINKSTATE_SHIFT 4
337
338/* kr_extstatus bits */
339#define INFINIPATH_EXTS_FREQSEL 0x2
340#define INFINIPATH_EXTS_SERDESSEL 0x4
341#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
342#define INFINIPATH_EXTS_MEMBIST_CORRECT 0x0000000000008000
343
344
345/* TID entries (memory), HT-only */
346#define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL /* 40 bits valid */
347#define INFINIPATH_RT_VALID 0x8000000000000000ULL
348#define INFINIPATH_RT_ADDR_SHIFT 0
349#define INFINIPATH_RT_BUFSIZE_MASK 0x3FFFULL
350#define INFINIPATH_RT_BUFSIZE_SHIFT 48
351
352#define INFINIPATH_R_INTRAVAIL_SHIFT 16
353#define INFINIPATH_R_TAILUPD_SHIFT 31
354
355/* kr_xgxsconfig bits */
356#define INFINIPATH_XGXS_RESET 0x7ULL
357
358/*
359 * masks and bits that are different in different chips, or present only
360 * in one
361 */
362static const ipath_err_t infinipath_hwe_htcmemparityerr_mask =
363 INFINIPATH_HWE_HTCMEMPARITYERR_MASK;
364static const ipath_err_t infinipath_hwe_htcmemparityerr_shift =
365 INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT;
366
367static const ipath_err_t infinipath_hwe_htclnkabyte0crcerr =
368 INFINIPATH_HWE_HTCLNKABYTE0CRCERR;
369static const ipath_err_t infinipath_hwe_htclnkabyte1crcerr =
370 INFINIPATH_HWE_HTCLNKABYTE1CRCERR;
371static const ipath_err_t infinipath_hwe_htclnkbbyte0crcerr =
372 INFINIPATH_HWE_HTCLNKBBYTE0CRCERR;
373static const ipath_err_t infinipath_hwe_htclnkbbyte1crcerr =
374 INFINIPATH_HWE_HTCLNKBBYTE1CRCERR;
375
376#define _IPATH_GPIO_SDA_NUM 1
377#define _IPATH_GPIO_SCL_NUM 0
378
379#define IPATH_GPIO_SDA \
380 (1ULL << (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
381#define IPATH_GPIO_SCL \
382 (1ULL << (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
383
384/* keep the code below somewhat more readable; not used elsewhere */
385#define _IPATH_HTLINK0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr | \
386 infinipath_hwe_htclnkabyte1crcerr)
387#define _IPATH_HTLINK1_CRCBITS (infinipath_hwe_htclnkbbyte0crcerr | \
388 infinipath_hwe_htclnkbbyte1crcerr)
389#define _IPATH_HTLANE0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr | \
390 infinipath_hwe_htclnkbbyte0crcerr)
391#define _IPATH_HTLANE1_CRCBITS (infinipath_hwe_htclnkabyte1crcerr | \
392 infinipath_hwe_htclnkbbyte1crcerr)
393
394static void hwerr_crcbits(struct ipath_devdata *dd, ipath_err_t hwerrs,
395 char *msg, size_t msgl)
396{
397 char bitsmsg[64];
398 ipath_err_t crcbits = hwerrs &
399 (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS);
400 /* don't check if 8bit HT */
401 if (dd->ipath_flags & IPATH_8BIT_IN_HT0)
402 crcbits &= ~infinipath_hwe_htclnkabyte1crcerr;
403 /* don't check if 8bit HT */
404 if (dd->ipath_flags & IPATH_8BIT_IN_HT1)
405 crcbits &= ~infinipath_hwe_htclnkbbyte1crcerr;
406 /*
407 * we'll want to ignore link errors on link that is
408 * not in use, if any. For now, complain about both
409 */
410 if (crcbits) {
411 u16 ctrl0, ctrl1;
412 snprintf(bitsmsg, sizeof bitsmsg,
413 "[HT%s lane %s CRC (%llx); powercycle to completely clear]",
414 !(crcbits & _IPATH_HTLINK1_CRCBITS) ?
415 "0 (A)" : (!(crcbits & _IPATH_HTLINK0_CRCBITS)
416 ? "1 (B)" : "0+1 (A+B)"),
417 !(crcbits & _IPATH_HTLANE1_CRCBITS) ? "0"
418 : (!(crcbits & _IPATH_HTLANE0_CRCBITS) ? "1" :
419 "0+1"), (unsigned long long) crcbits);
420 strlcat(msg, bitsmsg, msgl);
421
422 /*
423 * print extra info for debugging. slave/primary
424 * config word 4, 8 (link control 0, 1)
425 */
426
427 if (pci_read_config_word(dd->pcidev,
428 dd->ipath_ht_slave_off + 0x4,
429 &ctrl0))
430 dev_info(&dd->pcidev->dev, "Couldn't read "
431 "linkctrl0 of slave/primary "
432 "config block\n");
433 else if (!(ctrl0 & 1 << 6))
434 /* not if EOC bit set */
435 ipath_dbg("HT linkctrl0 0x%x%s%s\n", ctrl0,
436 ((ctrl0 >> 8) & 7) ? " CRC" : "",
437 ((ctrl0 >> 4) & 1) ? "linkfail" :
438 "");
439 if (pci_read_config_word(dd->pcidev,
440 dd->ipath_ht_slave_off + 0x8,
441 &ctrl1))
442 dev_info(&dd->pcidev->dev, "Couldn't read "
443 "linkctrl1 of slave/primary "
444 "config block\n");
445 else if (!(ctrl1 & 1 << 6))
446 /* not if EOC bit set */
447 ipath_dbg("HT linkctrl1 0x%x%s%s\n", ctrl1,
448 ((ctrl1 >> 8) & 7) ? " CRC" : "",
449 ((ctrl1 >> 4) & 1) ? "linkfail" :
450 "");
451
452 /* disable until driver reloaded */
453 dd->ipath_hwerrmask &= ~crcbits;
454 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
455 dd->ipath_hwerrmask);
456 ipath_dbg("HT crc errs: %s\n", msg);
457 } else
458 ipath_dbg("ignoring HT crc errors 0x%llx, "
459 "not in use\n", (unsigned long long)
460 (hwerrs & (_IPATH_HTLINK0_CRCBITS |
461 _IPATH_HTLINK1_CRCBITS)));
462}
463
464/* 6110 specific hardware errors... */
465static const struct ipath_hwerror_msgs ipath_6110_hwerror_msgs[] = {
466 INFINIPATH_HWE_MSG(HTCBUSIREQPARITYERR, "HTC Ireq Parity"),
467 INFINIPATH_HWE_MSG(HTCBUSTREQPARITYERR, "HTC Treq Parity"),
468 INFINIPATH_HWE_MSG(HTCBUSTRESPPARITYERR, "HTC Tresp Parity"),
469 INFINIPATH_HWE_MSG(HTCMISCERR5, "HT core Misc5"),
470 INFINIPATH_HWE_MSG(HTCMISCERR6, "HT core Misc6"),
471 INFINIPATH_HWE_MSG(HTCMISCERR7, "HT core Misc7"),
472 INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"),
473 INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
474};
475
476#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
477 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
478 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
479#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
480 << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
481
482static void ipath_ht_txe_recover(struct ipath_devdata *dd)
483{
484 ++ipath_stats.sps_txeparity;
485 dev_info(&dd->pcidev->dev,
486 "Recovering from TXE PIO parity error\n");
487}
488
489
490/**
491 * ipath_ht_handle_hwerrors - display hardware errors.
492 * @dd: the infinipath device
493 * @msg: the output buffer
494 * @msgl: the size of the output buffer
495 *
496 * Use same msg buffer as regular errors to avoid excessive stack
497 * use. Most hardware errors are catastrophic, but for right now,
498 * we'll print them and continue. We reuse the same message buffer as
499 * ipath_handle_errors() to avoid excessive stack usage.
500 */
501static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
502 size_t msgl)
503{
504 ipath_err_t hwerrs;
505 u32 bits, ctrl;
506 int isfatal = 0;
507 char bitsmsg[64];
508 int log_idx;
509
510 hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
511
512 if (!hwerrs) {
513 ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
514 /*
515 * better than printing cofusing messages
516 * This seems to be related to clearing the crc error, or
517 * the pll error during init.
518 */
519 goto bail;
520 } else if (hwerrs == -1LL) {
521 ipath_dev_err(dd, "Read of hardware error status failed "
522 "(all bits set); ignoring\n");
523 goto bail;
524 }
525 ipath_stats.sps_hwerrs++;
526
527 /* Always clear the error status register, except MEMBISTFAIL,
528 * regardless of whether we continue or stop using the chip.
529 * We want that set so we know it failed, even across driver reload.
530 * We'll still ignore it in the hwerrmask. We do this partly for
531 * diagnostics, but also for support */
532 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
533 hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
534
535 hwerrs &= dd->ipath_hwerrmask;
536
537 /* We log some errors to EEPROM, check if we have any of those. */
538 for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
539 if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
540 ipath_inc_eeprom_err(dd, log_idx, 1);
541
542 /*
543 * make sure we get this much out, unless told to be quiet,
544 * it's a parity error we may recover from,
545 * or it's occurred within the last 5 seconds
546 */
547 if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY |
548 RXE_EAGER_PARITY)) ||
549 (ipath_debug & __IPATH_VERBDBG))
550 dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
551 "(cleared)\n", (unsigned long long) hwerrs);
552 dd->ipath_lasthwerror |= hwerrs;
553
554 if (hwerrs & ~dd->ipath_hwe_bitsextant)
555 ipath_dev_err(dd, "hwerror interrupt with unknown errors "
556 "%llx set\n", (unsigned long long)
557 (hwerrs & ~dd->ipath_hwe_bitsextant));
558
559 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
560 if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
561 /*
562 * parity errors in send memory are recoverable,
563 * just cancel the send (if indicated in * sendbuffererror),
564 * count the occurrence, unfreeze (if no other handled
565 * hardware error bits are set), and continue. They can
566 * occur if a processor speculative read is done to the PIO
567 * buffer while we are sending a packet, for example.
568 */
569 if (hwerrs & TXE_PIO_PARITY) {
570 ipath_ht_txe_recover(dd);
571 hwerrs &= ~TXE_PIO_PARITY;
572 }
573
574 if (!hwerrs) {
575 ipath_dbg("Clearing freezemode on ignored or "
576 "recovered hardware error\n");
577 ipath_clear_freeze(dd);
578 }
579 }
580
581 *msg = '\0';
582
583 /*
584 * may someday want to decode into which bits are which
585 * functional area for parity errors, etc.
586 */
587 if (hwerrs & (infinipath_hwe_htcmemparityerr_mask
588 << INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT)) {
589 bits = (u32) ((hwerrs >>
590 INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) &
591 INFINIPATH_HWE_HTCMEMPARITYERR_MASK);
592 snprintf(bitsmsg, sizeof bitsmsg, "[HTC Parity Errs %x] ",
593 bits);
594 strlcat(msg, bitsmsg, msgl);
595 }
596
597 ipath_format_hwerrors(hwerrs,
598 ipath_6110_hwerror_msgs,
599 ARRAY_SIZE(ipath_6110_hwerror_msgs),
600 msg, msgl);
601
602 if (hwerrs & (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS))
603 hwerr_crcbits(dd, hwerrs, msg, msgl);
604
605 if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
606 strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]",
607 msgl);
608 /* ignore from now on, so disable until driver reloaded */
609 dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
610 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
611 dd->ipath_hwerrmask);
612 }
613#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
614 INFINIPATH_HWE_COREPLL_RFSLIP | \
615 INFINIPATH_HWE_HTBPLL_FBSLIP | \
616 INFINIPATH_HWE_HTBPLL_RFSLIP | \
617 INFINIPATH_HWE_HTAPLL_FBSLIP | \
618 INFINIPATH_HWE_HTAPLL_RFSLIP)
619
620 if (hwerrs & _IPATH_PLL_FAIL) {
621 snprintf(bitsmsg, sizeof bitsmsg,
622 "[PLL failed (%llx), InfiniPath hardware unusable]",
623 (unsigned long long) (hwerrs & _IPATH_PLL_FAIL));
624 strlcat(msg, bitsmsg, msgl);
625 /* ignore from now on, so disable until driver reloaded */
626 dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
627 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
628 dd->ipath_hwerrmask);
629 }
630
631 if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
632 /*
633 * If it occurs, it is left masked since the eternal
634 * interface is unused
635 */
636 dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
637 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
638 dd->ipath_hwerrmask);
639 }
640
641 if (hwerrs) {
642 /*
643 * if any set that we aren't ignoring; only
644 * make the complaint once, in case it's stuck
645 * or recurring, and we get here multiple
646 * times.
647 * force link down, so switch knows, and
648 * LEDs are turned off
649 */
650 if (dd->ipath_flags & IPATH_INITTED) {
651 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
652 ipath_setup_ht_setextled(dd,
653 INFINIPATH_IBCS_L_STATE_DOWN,
654 INFINIPATH_IBCS_LT_STATE_DISABLED);
655 ipath_dev_err(dd, "Fatal Hardware Error (freeze "
656 "mode), no longer usable, SN %.16s\n",
657 dd->ipath_serial);
658 isfatal = 1;
659 }
660 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
661 /* mark as having had error */
662 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
663 /*
664 * mark as not usable, at a minimum until driver
665 * is reloaded, probably until reboot, since no
666 * other reset is possible.
667 */
668 dd->ipath_flags &= ~IPATH_INITTED;
669 } else {
670 *msg = 0; /* recovered from all of them */
671 }
672 if (*msg)
673 ipath_dev_err(dd, "%s hardware error\n", msg);
674 if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg)
675 /*
676 * for status file; if no trailing brace is copied,
677 * we'll know it was truncated.
678 */
679 snprintf(dd->ipath_freezemsg,
680 dd->ipath_freezelen, "{%s}", msg);
681
682bail:;
683}
684
685/**
686 * ipath_ht_boardname - fill in the board name
687 * @dd: the infinipath device
688 * @name: the output buffer
689 * @namelen: the size of the output buffer
690 *
691 * fill in the board name, based on the board revision register
692 */
693static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
694 size_t namelen)
695{
696 char *n = NULL;
697 u8 boardrev = dd->ipath_boardrev;
698 int ret = 0;
699
700 switch (boardrev) {
701 case 5:
702 /*
703 * original production board; two production levels, with
704 * different serial number ranges. See ipath_ht_early_init() for
705 * case where we enable IPATH_GPIO_INTR for later serial # range.
706 * Original 112* serial number is no longer supported.
707 */
708 n = "InfiniPath_QHT7040";
709 break;
710 case 7:
711 /* small form factor production board */
712 n = "InfiniPath_QHT7140";
713 break;
714 default: /* don't know, just print the number */
715 ipath_dev_err(dd, "Don't yet know about board "
716 "with ID %u\n", boardrev);
717 snprintf(name, namelen, "Unknown_InfiniPath_QHT7xxx_%u",
718 boardrev);
719 break;
720 }
721 if (n)
722 snprintf(name, namelen, "%s", n);
723
724 if (ret) {
725 ipath_dev_err(dd, "Unsupported InfiniPath board %s!\n", name);
726 goto bail;
727 }
728 if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 ||
729 dd->ipath_minrev > 4)) {
730 /*
731 * This version of the driver only supports Rev 3.2 - 3.4
732 */
733 ipath_dev_err(dd,
734 "Unsupported InfiniPath hardware revision %u.%u!\n",
735 dd->ipath_majrev, dd->ipath_minrev);
736 ret = 1;
737 goto bail;
738 }
739 /*
740 * pkt/word counters are 32 bit, and therefore wrap fast enough
741 * that we snapshot them from a timer, and maintain 64 bit shadow
742 * copies
743 */
744 dd->ipath_flags |= IPATH_32BITCOUNTERS;
745 dd->ipath_flags |= IPATH_GPIO_INTR;
746 if (dd->ipath_lbus_speed != 800)
747 ipath_dev_err(dd,
748 "Incorrectly configured for HT @ %uMHz\n",
749 dd->ipath_lbus_speed);
750
751 /*
752 * set here, not in ipath_init_*_funcs because we have to do
753 * it after we can read chip registers.
754 */
755 dd->ipath_ureg_align =
756 ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
757
758bail:
759 return ret;
760}
761
762static void ipath_check_htlink(struct ipath_devdata *dd)
763{
764 u8 linkerr, link_off, i;
765
766 for (i = 0; i < 2; i++) {
767 link_off = dd->ipath_ht_slave_off + i * 4 + 0xd;
768 if (pci_read_config_byte(dd->pcidev, link_off, &linkerr))
769 dev_info(&dd->pcidev->dev, "Couldn't read "
770 "linkerror%d of HT slave/primary block\n",
771 i);
772 else if (linkerr & 0xf0) {
773 ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, "
774 "clearing\n", linkerr >> 4, i);
775 /*
776 * writing the linkerr bits that are set should
777 * clear them
778 */
779 if (pci_write_config_byte(dd->pcidev, link_off,
780 linkerr))
781 ipath_dbg("Failed write to clear HT "
782 "linkerror%d\n", i);
783 if (pci_read_config_byte(dd->pcidev, link_off,
784 &linkerr))
785 dev_info(&dd->pcidev->dev,
786 "Couldn't reread linkerror%d of "
787 "HT slave/primary block\n", i);
788 else if (linkerr & 0xf0)
789 dev_info(&dd->pcidev->dev,
790 "HT linkerror%d bits 0x%x "
791 "couldn't be cleared\n",
792 i, linkerr >> 4);
793 }
794 }
795}
796
797static int ipath_setup_ht_reset(struct ipath_devdata *dd)
798{
799 ipath_dbg("No reset possible for this InfiniPath hardware\n");
800 return 0;
801}
802
803#define HT_INTR_DISC_CONFIG 0x80 /* HT interrupt and discovery cap */
804#define HT_INTR_REG_INDEX 2 /* intconfig requires indirect accesses */
805
806/*
807 * Bits 13-15 of command==0 is slave/primary block. Clear any HT CRC
808 * errors. We only bother to do this at load time, because it's OK if
809 * it happened before we were loaded (first time after boot/reset),
810 * but any time after that, it's fatal anyway. Also need to not check
811 * for upper byte errors if we are in 8 bit mode, so figure out
812 * our width. For now, at least, also complain if it's 8 bit.
813 */
814static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
815 int pos, u8 cap_type)
816{
817 u8 linkwidth = 0, linkerr, link_a_b_off, link_off;
818 u16 linkctrl = 0;
819 int i;
820
821 dd->ipath_ht_slave_off = pos;
822 /* command word, master_host bit */
823 /* master host || slave */
824 if ((cap_type >> 2) & 1)
825 link_a_b_off = 4;
826 else
827 link_a_b_off = 0;
828 ipath_cdbg(VERBOSE, "HT%u (Link %c) connected to processor\n",
829 link_a_b_off ? 1 : 0,
830 link_a_b_off ? 'B' : 'A');
831
832 link_a_b_off += pos;
833
834 /*
835 * check both link control registers; clear both HT CRC sets if
836 * necessary.
837 */
838 for (i = 0; i < 2; i++) {
839 link_off = pos + i * 4 + 0x4;
840 if (pci_read_config_word(pdev, link_off, &linkctrl))
841 ipath_dev_err(dd, "Couldn't read HT link control%d "
842 "register\n", i);
843 else if (linkctrl & (0xf << 8)) {
844 ipath_cdbg(VERBOSE, "Clear linkctrl%d CRC Error "
845 "bits %x\n", i, linkctrl & (0xf << 8));
846 /*
847 * now write them back to clear the error.
848 */
849 pci_write_config_word(pdev, link_off,
850 linkctrl & (0xf << 8));
851 }
852 }
853
854 /*
855 * As with HT CRC bits, same for protocol errors that might occur
856 * during boot.
857 */
858 for (i = 0; i < 2; i++) {
859 link_off = pos + i * 4 + 0xd;
860 if (pci_read_config_byte(pdev, link_off, &linkerr))
861 dev_info(&pdev->dev, "Couldn't read linkerror%d "
862 "of HT slave/primary block\n", i);
863 else if (linkerr & 0xf0) {
864 ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, "
865 "clearing\n", linkerr >> 4, i);
866 /*
867 * writing the linkerr bits that are set will clear
868 * them
869 */
870 if (pci_write_config_byte
871 (pdev, link_off, linkerr))
872 ipath_dbg("Failed write to clear HT "
873 "linkerror%d\n", i);
874 if (pci_read_config_byte(pdev, link_off, &linkerr))
875 dev_info(&pdev->dev, "Couldn't reread "
876 "linkerror%d of HT slave/primary "
877 "block\n", i);
878 else if (linkerr & 0xf0)
879 dev_info(&pdev->dev, "HT linkerror%d bits "
880 "0x%x couldn't be cleared\n",
881 i, linkerr >> 4);
882 }
883 }
884
885 /*
886 * this is just for our link to the host, not devices connected
887 * through tunnel.
888 */
889
890 if (pci_read_config_byte(pdev, link_a_b_off + 7, &linkwidth))
891 ipath_dev_err(dd, "Couldn't read HT link width "
892 "config register\n");
893 else {
894 u32 width;
895 switch (linkwidth & 7) {
896 case 5:
897 width = 4;
898 break;
899 case 4:
900 width = 2;
901 break;
902 case 3:
903 width = 32;
904 break;
905 case 1:
906 width = 16;
907 break;
908 case 0:
909 default: /* if wrong, assume 8 bit */
910 width = 8;
911 break;
912 }
913
914 dd->ipath_lbus_width = width;
915
916 if (linkwidth != 0x11) {
917 ipath_dev_err(dd, "Not configured for 16 bit HT "
918 "(%x)\n", linkwidth);
919 if (!(linkwidth & 0xf)) {
920 ipath_dbg("Will ignore HT lane1 errors\n");
921 dd->ipath_flags |= IPATH_8BIT_IN_HT0;
922 }
923 }
924 }
925
926 /*
927 * this is just for our link to the host, not devices connected
928 * through tunnel.
929 */
930 if (pci_read_config_byte(pdev, link_a_b_off + 0xd, &linkwidth))
931 ipath_dev_err(dd, "Couldn't read HT link frequency "
932 "config register\n");
933 else {
934 u32 speed;
935 switch (linkwidth & 0xf) {
936 case 6:
937 speed = 1000;
938 break;
939 case 5:
940 speed = 800;
941 break;
942 case 4:
943 speed = 600;
944 break;
945 case 3:
946 speed = 500;
947 break;
948 case 2:
949 speed = 400;
950 break;
951 case 1:
952 speed = 300;
953 break;
954 default:
955 /*
956 * assume reserved and vendor-specific are 200...
957 */
958 case 0:
959 speed = 200;
960 break;
961 }
962 dd->ipath_lbus_speed = speed;
963 }
964
965 snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
966 "HyperTransport,%uMHz,x%u\n",
967 dd->ipath_lbus_speed,
968 dd->ipath_lbus_width);
969}
970
971static int ipath_ht_intconfig(struct ipath_devdata *dd)
972{
973 int ret;
974
975 if (dd->ipath_intconfig) {
976 ipath_write_kreg(dd, dd->ipath_kregs->kr_interruptconfig,
977 dd->ipath_intconfig); /* interrupt address */
978 ret = 0;
979 } else {
980 ipath_dev_err(dd, "No interrupts enabled, couldn't setup "
981 "interrupt address\n");
982 ret = -EINVAL;
983 }
984
985 return ret;
986}
987
988static void ipath_ht_irq_update(struct pci_dev *dev, int irq,
989 struct ht_irq_msg *msg)
990{
991 struct ipath_devdata *dd = pci_get_drvdata(dev);
992 u64 prev_intconfig = dd->ipath_intconfig;
993
994 dd->ipath_intconfig = msg->address_lo;
995 dd->ipath_intconfig |= ((u64) msg->address_hi) << 32;
996
997 /*
998 * If the previous value of dd->ipath_intconfig is zero, we're
999 * getting configured for the first time, and must not program the
1000 * intconfig register here (it will be programmed later, when the
1001 * hardware is ready). Otherwise, we should.
1002 */
1003 if (prev_intconfig)
1004 ipath_ht_intconfig(dd);
1005}
1006
1007/**
1008 * ipath_setup_ht_config - setup the interruptconfig register
1009 * @dd: the infinipath device
1010 * @pdev: the PCI device
1011 *
1012 * setup the interruptconfig register from the HT config info.
1013 * Also clear CRC errors in HT linkcontrol, if necessary.
1014 * This is done only for the real hardware. It is done before
1015 * chip address space is initted, so can't touch infinipath registers
1016 */
1017static int ipath_setup_ht_config(struct ipath_devdata *dd,
1018 struct pci_dev *pdev)
1019{
1020 int pos, ret;
1021
1022 ret = __ht_create_irq(pdev, 0, ipath_ht_irq_update);
1023 if (ret < 0) {
1024 ipath_dev_err(dd, "Couldn't create interrupt handler: "
1025 "err %d\n", ret);
1026 goto bail;
1027 }
1028 dd->ipath_irq = ret;
1029 ret = 0;
1030
1031 /*
1032 * Handle clearing CRC errors in linkctrl register if necessary. We
1033 * do this early, before we ever enable errors or hardware errors,
1034 * mostly to avoid causing the chip to enter freeze mode.
1035 */
1036 pos = pci_find_capability(pdev, PCI_CAP_ID_HT);
1037 if (!pos) {
1038 ipath_dev_err(dd, "Couldn't find HyperTransport "
1039 "capability; no interrupts\n");
1040 ret = -ENODEV;
1041 goto bail;
1042 }
1043 do {
1044 u8 cap_type;
1045
1046 /*
1047 * The HT capability type byte is 3 bytes after the
1048 * capability byte.
1049 */
1050 if (pci_read_config_byte(pdev, pos + 3, &cap_type)) {
1051 dev_info(&pdev->dev, "Couldn't read config "
1052 "command @ %d\n", pos);
1053 continue;
1054 }
1055 if (!(cap_type & 0xE0))
1056 slave_or_pri_blk(dd, pdev, pos, cap_type);
1057 } while ((pos = pci_find_next_capability(pdev, pos,
1058 PCI_CAP_ID_HT)));
1059
1060 dd->ipath_flags |= IPATH_SWAP_PIOBUFS;
1061
1062bail:
1063 return ret;
1064}
1065
1066/**
1067 * ipath_setup_ht_cleanup - clean up any per-chip chip-specific stuff
1068 * @dd: the infinipath device
1069 *
1070 * Called during driver unload.
1071 * This is currently a nop for the HT chip, not for all chips
1072 */
1073static void ipath_setup_ht_cleanup(struct ipath_devdata *dd)
1074{
1075}
1076
1077/**
1078 * ipath_setup_ht_setextled - set the state of the two external LEDs
1079 * @dd: the infinipath device
1080 * @lst: the L state
1081 * @ltst: the LT state
1082 *
1083 * Set the state of the two external LEDs, to indicate physical and
1084 * logical state of IB link. For this chip (at least with recommended
1085 * board pinouts), LED1 is Green (physical state), and LED2 is Yellow
1086 * (logical state)
1087 *
1088 * Note: We try to match the Mellanox HCA LED behavior as best
1089 * we can. Green indicates physical link state is OK (something is
1090 * plugged in, and we can train).
1091 * Amber indicates the link is logically up (ACTIVE).
1092 * Mellanox further blinks the amber LED to indicate data packet
1093 * activity, but we have no hardware support for that, so it would
1094 * require waking up every 10-20 msecs and checking the counters
1095 * on the chip, and then turning the LED off if appropriate. That's
1096 * visible overhead, so not something we will do.
1097 *
1098 */
1099static void ipath_setup_ht_setextled(struct ipath_devdata *dd,
1100 u64 lst, u64 ltst)
1101{
1102 u64 extctl;
1103 unsigned long flags = 0;
1104
1105 /* the diags use the LED to indicate diag info, so we leave
1106 * the external LED alone when the diags are running */
1107 if (ipath_diag_inuse)
1108 return;
1109
1110 /* Allow override of LED display for, e.g. Locating system in rack */
1111 if (dd->ipath_led_override) {
1112 ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
1113 ? INFINIPATH_IBCS_LT_STATE_LINKUP
1114 : INFINIPATH_IBCS_LT_STATE_DISABLED;
1115 lst = (dd->ipath_led_override & IPATH_LED_LOG)
1116 ? INFINIPATH_IBCS_L_STATE_ACTIVE
1117 : INFINIPATH_IBCS_L_STATE_DOWN;
1118 }
1119
1120 spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
1121 /*
1122 * start by setting both LED control bits to off, then turn
1123 * on the appropriate bit(s).
1124 */
1125 if (dd->ipath_boardrev == 8) { /* LS/X-1 uses different pins */
1126 /*
1127 * major difference is that INFINIPATH_EXTC_LEDGBLERR_OFF
1128 * is inverted, because it is normally used to indicate
1129 * a hardware fault at reset, if there were errors
1130 */
1131 extctl = (dd->ipath_extctrl & ~INFINIPATH_EXTC_LEDGBLOK_ON)
1132 | INFINIPATH_EXTC_LEDGBLERR_OFF;
1133 if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
1134 extctl &= ~INFINIPATH_EXTC_LEDGBLERR_OFF;
1135 if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
1136 extctl |= INFINIPATH_EXTC_LEDGBLOK_ON;
1137 } else {
1138 extctl = dd->ipath_extctrl &
1139 ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
1140 INFINIPATH_EXTC_LED2PRIPORT_ON);
1141 if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
1142 extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
1143 if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
1144 extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
1145 }
1146 dd->ipath_extctrl = extctl;
1147 ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
1148 spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
1149}
1150
1151static void ipath_init_ht_variables(struct ipath_devdata *dd)
1152{
1153 /*
1154 * setup the register offsets, since they are different for each
1155 * chip
1156 */
1157 dd->ipath_kregs = &ipath_ht_kregs;
1158 dd->ipath_cregs = &ipath_ht_cregs;
1159
1160 dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
1161 dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
1162 dd->ipath_gpio_sda = IPATH_GPIO_SDA;
1163 dd->ipath_gpio_scl = IPATH_GPIO_SCL;
1164
1165 /*
1166 * Fill in data for field-values that change in newer chips.
1167 * We dynamically specify only the mask for LINKTRAININGSTATE
1168 * and only the shift for LINKSTATE, as they are the only ones
1169 * that change. Also precalculate the 3 link states of interest
1170 * and the combined mask.
1171 */
1172 dd->ibcs_ls_shift = IBA6110_IBCS_LINKSTATE_SHIFT;
1173 dd->ibcs_lts_mask = IBA6110_IBCS_LINKTRAININGSTATE_MASK;
1174 dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
1175 dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
1176 dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
1177 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
1178 (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
1179 dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
1180 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
1181 (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
1182 dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
1183 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
1184 (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
1185
1186 /*
1187 * Fill in data for ibcc field-values that change in newer chips.
1188 * We dynamically specify only the mask for LINKINITCMD
1189 * and only the shift for LINKCMD and MAXPKTLEN, as they are
1190 * the only ones that change.
1191 */
1192 dd->ibcc_lic_mask = INFINIPATH_IBCC_LINKINITCMD_MASK;
1193 dd->ibcc_lc_shift = INFINIPATH_IBCC_LINKCMD_SHIFT;
1194 dd->ibcc_mpl_shift = INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
1195
1196 /* Fill in shifts for RcvCtrl. */
1197 dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
1198 dd->ipath_r_intravail_shift = INFINIPATH_R_INTRAVAIL_SHIFT;
1199 dd->ipath_r_tailupd_shift = INFINIPATH_R_TAILUPD_SHIFT;
1200 dd->ipath_r_portcfg_shift = 0; /* Not on IBA6110 */
1201
1202 dd->ipath_i_bitsextant =
1203 (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
1204 (INFINIPATH_I_RCVAVAIL_MASK <<
1205 INFINIPATH_I_RCVAVAIL_SHIFT) |
1206 INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
1207 INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
1208
1209 dd->ipath_e_bitsextant =
1210 INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
1211 INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
1212 INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
1213 INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
1214 INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
1215 INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
1216 INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
1217 INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
1218 INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
1219 INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN |
1220 INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN |
1221 INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT |
1222 INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
1223 INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED |
1224 INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
1225 INFINIPATH_E_HARDWARE;
1226
1227 dd->ipath_hwe_bitsextant =
1228 (INFINIPATH_HWE_HTCMEMPARITYERR_MASK <<
1229 INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) |
1230 (INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
1231 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
1232 (INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
1233 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
1234 INFINIPATH_HWE_HTCLNKABYTE0CRCERR |
1235 INFINIPATH_HWE_HTCLNKABYTE1CRCERR |
1236 INFINIPATH_HWE_HTCLNKBBYTE0CRCERR |
1237 INFINIPATH_HWE_HTCLNKBBYTE1CRCERR |
1238 INFINIPATH_HWE_HTCMISCERR4 |
1239 INFINIPATH_HWE_HTCMISCERR5 | INFINIPATH_HWE_HTCMISCERR6 |
1240 INFINIPATH_HWE_HTCMISCERR7 |
1241 INFINIPATH_HWE_HTCBUSTREQPARITYERR |
1242 INFINIPATH_HWE_HTCBUSTRESPPARITYERR |
1243 INFINIPATH_HWE_HTCBUSIREQPARITYERR |
1244 INFINIPATH_HWE_RXDSYNCMEMPARITYERR |
1245 INFINIPATH_HWE_MEMBISTFAILED |
1246 INFINIPATH_HWE_COREPLL_FBSLIP |
1247 INFINIPATH_HWE_COREPLL_RFSLIP |
1248 INFINIPATH_HWE_HTBPLL_FBSLIP |
1249 INFINIPATH_HWE_HTBPLL_RFSLIP |
1250 INFINIPATH_HWE_HTAPLL_FBSLIP |
1251 INFINIPATH_HWE_HTAPLL_RFSLIP |
1252 INFINIPATH_HWE_SERDESPLLFAILED |
1253 INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
1254 INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
1255
1256 dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
1257 dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
1258 dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
1259 dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
1260
1261 /*
1262 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
1263 * 2 is Some Misc, 3 is reserved for future.
1264 */
1265 dd->ipath_eep_st_masks[0].hwerrs_to_log =
1266 INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
1267 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
1268
1269 dd->ipath_eep_st_masks[1].hwerrs_to_log =
1270 INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
1271 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
1272
1273 dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
1274
1275 dd->delay_mult = 2; /* SDR, 4X, can't change */
1276
1277 dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
1278 dd->ipath_link_speed_supported = IPATH_IB_SDR;
1279 dd->ipath_link_width_enabled = IB_WIDTH_4X;
1280 dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
1281 /* these can't change for this chip, so set once */
1282 dd->ipath_link_width_active = dd->ipath_link_width_enabled;
1283 dd->ipath_link_speed_active = dd->ipath_link_speed_enabled;
1284}
1285
1286/**
1287 * ipath_ht_init_hwerrors - enable hardware errors
1288 * @dd: the infinipath device
1289 *
1290 * now that we have finished initializing everything that might reasonably
1291 * cause a hardware error, and cleared those errors bits as they occur,
1292 * we can enable hardware errors in the mask (potentially enabling
1293 * freeze mode), and enable hardware errors as errors (along with
1294 * everything else) in errormask
1295 */
1296static void ipath_ht_init_hwerrors(struct ipath_devdata *dd)
1297{
1298 ipath_err_t val;
1299 u64 extsval;
1300
1301 extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
1302
1303 if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
1304 ipath_dev_err(dd, "MemBIST did not complete!\n");
1305 if (extsval & INFINIPATH_EXTS_MEMBIST_CORRECT)
1306 ipath_dbg("MemBIST corrected\n");
1307
1308 ipath_check_htlink(dd);
1309
1310 /* barring bugs, all hwerrors become interrupts, which can */
1311 val = -1LL;
1312 /* don't look at crc lane1 if 8 bit */
1313 if (dd->ipath_flags & IPATH_8BIT_IN_HT0)
1314 val &= ~infinipath_hwe_htclnkabyte1crcerr;
1315 /* don't look at crc lane1 if 8 bit */
1316 if (dd->ipath_flags & IPATH_8BIT_IN_HT1)
1317 val &= ~infinipath_hwe_htclnkbbyte1crcerr;
1318
1319 /*
1320 * disable RXDSYNCMEMPARITY because external serdes is unused,
1321 * and therefore the logic will never be used or initialized,
1322 * and uninitialized state will normally result in this error
1323 * being asserted. Similarly for the external serdess pll
1324 * lock signal.
1325 */
1326 val &= ~(INFINIPATH_HWE_SERDESPLLFAILED |
1327 INFINIPATH_HWE_RXDSYNCMEMPARITYERR);
1328
1329 /*
1330 * Disable MISCERR4 because of an inversion in the HT core
1331 * logic checking for errors that cause this bit to be set.
1332 * The errata can also cause the protocol error bit to be set
1333 * in the HT config space linkerror register(s).
1334 */
1335 val &= ~INFINIPATH_HWE_HTCMISCERR4;
1336
1337 /*
1338 * PLL ignored because unused MDIO interface has a logic problem
1339 */
1340 if (dd->ipath_boardrev == 4 || dd->ipath_boardrev == 9)
1341 val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
1342 dd->ipath_hwerrmask = val;
1343}
1344
1345
1346
1347
1348/**
1349 * ipath_ht_bringup_serdes - bring up the serdes
1350 * @dd: the infinipath device
1351 */
1352static int ipath_ht_bringup_serdes(struct ipath_devdata *dd)
1353{
1354 u64 val, config1;
1355 int ret = 0, change = 0;
1356
1357 ipath_dbg("Trying to bringup serdes\n");
1358
1359 if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
1360 INFINIPATH_HWE_SERDESPLLFAILED)
1361 {
1362 ipath_dbg("At start, serdes PLL failed bit set in "
1363 "hwerrstatus, clearing and continuing\n");
1364 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
1365 INFINIPATH_HWE_SERDESPLLFAILED);
1366 }
1367
1368 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
1369 config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1);
1370
1371 ipath_cdbg(VERBOSE, "Initial serdes status is config0=%llx "
1372 "config1=%llx, sstatus=%llx xgxs %llx\n",
1373 (unsigned long long) val, (unsigned long long) config1,
1374 (unsigned long long)
1375 ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
1376 (unsigned long long)
1377 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
1378
1379 /* force reset on */
1380 val |= INFINIPATH_SERDC0_RESET_PLL
1381 /* | INFINIPATH_SERDC0_RESET_MASK */
1382 ;
1383 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
1384 udelay(15); /* need pll reset set at least for a bit */
1385
1386 if (val & INFINIPATH_SERDC0_RESET_PLL) {
1387 u64 val2 = val &= ~INFINIPATH_SERDC0_RESET_PLL;
1388 /* set lane resets, and tx idle, during pll reset */
1389 val2 |= INFINIPATH_SERDC0_RESET_MASK |
1390 INFINIPATH_SERDC0_TXIDLE;
1391 ipath_cdbg(VERBOSE, "Clearing serdes PLL reset (writing "
1392 "%llx)\n", (unsigned long long) val2);
1393 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0,
1394 val2);
1395 /*
1396 * be sure chip saw it
1397 */
1398 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1399 /*
1400 * need pll reset clear at least 11 usec before lane
1401 * resets cleared; give it a few more
1402 */
1403 udelay(15);
1404 val = val2; /* for check below */
1405 }
1406
1407 if (val & (INFINIPATH_SERDC0_RESET_PLL |
1408 INFINIPATH_SERDC0_RESET_MASK |
1409 INFINIPATH_SERDC0_TXIDLE)) {
1410 val &= ~(INFINIPATH_SERDC0_RESET_PLL |
1411 INFINIPATH_SERDC0_RESET_MASK |
1412 INFINIPATH_SERDC0_TXIDLE);
1413 /* clear them */
1414 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0,
1415 val);
1416 }
1417
1418 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
1419 if (val & INFINIPATH_XGXS_RESET) {
1420 /* normally true after boot */
1421 val &= ~INFINIPATH_XGXS_RESET;
1422 change = 1;
1423 }
1424 if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
1425 INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
1426 /* need to compensate for Tx inversion in partner */
1427 val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
1428 INFINIPATH_XGXS_RX_POL_SHIFT);
1429 val |= dd->ipath_rx_pol_inv <<
1430 INFINIPATH_XGXS_RX_POL_SHIFT;
1431 change = 1;
1432 }
1433 if (change)
1434 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
1435
1436 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
1437
1438 /* clear current and de-emphasis bits */
1439 config1 &= ~0x0ffffffff00ULL;
1440 /* set current to 20ma */
1441 config1 |= 0x00000000000ULL;
1442 /* set de-emphasis to -5.68dB */
1443 config1 |= 0x0cccc000000ULL;
1444 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1);
1445
1446 ipath_cdbg(VERBOSE, "After setup: serdes status is config0=%llx "
1447 "config1=%llx, sstatus=%llx xgxs %llx\n",
1448 (unsigned long long) val, (unsigned long long) config1,
1449 (unsigned long long)
1450 ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
1451 (unsigned long long)
1452 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
1453
1454 return ret; /* for now, say we always succeeded */
1455}
1456
1457/**
1458 * ipath_ht_quiet_serdes - set serdes to txidle
1459 * @dd: the infinipath device
1460 * driver is being unloaded
1461 */
1462static void ipath_ht_quiet_serdes(struct ipath_devdata *dd)
1463{
1464 u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
1465
1466 val |= INFINIPATH_SERDC0_TXIDLE;
1467 ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
1468 (unsigned long long) val);
1469 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
1470}
1471
1472/**
1473 * ipath_pe_put_tid - write a TID in chip
1474 * @dd: the infinipath device
1475 * @tidptr: pointer to the expected TID (in chip) to update
1476 * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
1477 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
1478 *
1479 * This exists as a separate routine to allow for special locking etc.
1480 * It's used for both the full cleanup on exit, as well as the normal
1481 * setup and teardown.
1482 */
1483static void ipath_ht_put_tid(struct ipath_devdata *dd,
1484 u64 __iomem *tidptr, u32 type,
1485 unsigned long pa)
1486{
1487 if (!dd->ipath_kregbase)
1488 return;
1489
1490 if (pa != dd->ipath_tidinvalid) {
1491 if (unlikely((pa & ~INFINIPATH_RT_ADDR_MASK))) {
1492 dev_info(&dd->pcidev->dev,
1493 "physaddr %lx has more than "
1494 "40 bits, using only 40!!!\n", pa);
1495 pa &= INFINIPATH_RT_ADDR_MASK;
1496 }
1497 if (type == RCVHQ_RCV_TYPE_EAGER)
1498 pa |= dd->ipath_tidtemplate;
1499 else {
1500 /* in words (fixed, full page). */
1501 u64 lenvalid = PAGE_SIZE >> 2;
1502 lenvalid <<= INFINIPATH_RT_BUFSIZE_SHIFT;
1503 pa |= lenvalid | INFINIPATH_RT_VALID;
1504 }
1505 }
1506
1507 writeq(pa, tidptr);
1508}
1509
1510
1511/**
1512 * ipath_ht_clear_tid - clear all TID entries for a port, expected and eager
1513 * @dd: the infinipath device
1514 * @port: the port
1515 *
1516 * Used from ipath_close(), and at chip initialization.
1517 */
1518static void ipath_ht_clear_tids(struct ipath_devdata *dd, unsigned port)
1519{
1520 u64 __iomem *tidbase;
1521 int i;
1522
1523 if (!dd->ipath_kregbase)
1524 return;
1525
1526 ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
1527
1528 /*
1529 * need to invalidate all of the expected TID entries for this
1530 * port, so we don't have valid entries that might somehow get
1531 * used (early in next use of this port, or through some bug)
1532 */
1533 tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
1534 dd->ipath_rcvtidbase +
1535 port * dd->ipath_rcvtidcnt *
1536 sizeof(*tidbase));
1537 for (i = 0; i < dd->ipath_rcvtidcnt; i++)
1538 ipath_ht_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
1539 dd->ipath_tidinvalid);
1540
1541 tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
1542 dd->ipath_rcvegrbase +
1543 port * dd->ipath_rcvegrcnt *
1544 sizeof(*tidbase));
1545
1546 for (i = 0; i < dd->ipath_rcvegrcnt; i++)
1547 ipath_ht_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
1548 dd->ipath_tidinvalid);
1549}
1550
1551/**
1552 * ipath_ht_tidtemplate - setup constants for TID updates
1553 * @dd: the infinipath device
1554 *
1555 * We setup stuff that we use a lot, to avoid calculating each time
1556 */
1557static void ipath_ht_tidtemplate(struct ipath_devdata *dd)
1558{
1559 dd->ipath_tidtemplate = dd->ipath_ibmaxlen >> 2;
1560 dd->ipath_tidtemplate <<= INFINIPATH_RT_BUFSIZE_SHIFT;
1561 dd->ipath_tidtemplate |= INFINIPATH_RT_VALID;
1562
1563 /*
1564 * work around chip errata bug 7358, by marking invalid tids
1565 * as having max length
1566 */
1567 dd->ipath_tidinvalid = (-1LL & INFINIPATH_RT_BUFSIZE_MASK) <<
1568 INFINIPATH_RT_BUFSIZE_SHIFT;
1569}
1570
1571static int ipath_ht_early_init(struct ipath_devdata *dd)
1572{
1573 u32 __iomem *piobuf;
1574 u32 pioincr, val32;
1575 int i;
1576
1577 /*
1578 * one cache line; long IB headers will spill over into received
1579 * buffer
1580 */
1581 dd->ipath_rcvhdrentsize = 16;
1582 dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
1583
1584 /*
1585 * For HT, we allocate a somewhat overly large eager buffer,
1586 * such that we can guarantee that we can receive the largest
1587 * packet that we can send out. To truly support a 4KB MTU,
1588 * we need to bump this to a large value. To date, other than
1589 * testing, we have never encountered an HCA that can really
1590 * send 4KB MTU packets, so we do not handle that (we'll get
1591 * errors interrupts if we ever see one).
1592 */
1593 dd->ipath_rcvegrbufsize = dd->ipath_piosize2k;
1594
1595 /*
1596 * the min() check here is currently a nop, but it may not
1597 * always be, depending on just how we do ipath_rcvegrbufsize
1598 */
1599 dd->ipath_ibmaxlen = min(dd->ipath_piosize2k,
1600 dd->ipath_rcvegrbufsize);
1601 dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
1602 ipath_ht_tidtemplate(dd);
1603
1604 /*
1605 * zero all the TID entries at startup. We do this for sanity,
1606 * in case of a previous driver crash of some kind, and also
1607 * because the chip powers up with these memories in an unknown
1608 * state. Use portcnt, not cfgports, since this is for the
1609 * full chip, not for current (possibly different) configuration
1610 * value.
1611 * Chip Errata bug 6447
1612 */
1613 for (val32 = 0; val32 < dd->ipath_portcnt; val32++)
1614 ipath_ht_clear_tids(dd, val32);
1615
1616 /*
1617 * write the pbc of each buffer, to be sure it's initialized, then
1618 * cancel all the buffers, and also abort any packets that might
1619 * have been in flight for some reason (the latter is for driver
1620 * unload/reload, but isn't a bad idea at first init). PIO send
1621 * isn't enabled at this point, so there is no danger of sending
1622 * these out on the wire.
1623 * Chip Errata bug 6610
1624 */
1625 piobuf = (u32 __iomem *) (((char __iomem *)(dd->ipath_kregbase)) +
1626 dd->ipath_piobufbase);
1627 pioincr = dd->ipath_palign / sizeof(*piobuf);
1628 for (i = 0; i < dd->ipath_piobcnt2k; i++) {
1629 /*
1630 * reasonable word count, just to init pbc
1631 */
1632 writel(16, piobuf);
1633 piobuf += pioincr;
1634 }
1635
1636 ipath_get_eeprom_info(dd);
1637 if (dd->ipath_boardrev == 5) {
1638 /*
1639 * Later production QHT7040 has same changes as QHT7140, so
1640 * can use GPIO interrupts. They have serial #'s starting
1641 * with 128, rather than 112.
1642 */
1643 if (dd->ipath_serial[0] == '1' &&
1644 dd->ipath_serial[1] == '2' &&
1645 dd->ipath_serial[2] == '8')
1646 dd->ipath_flags |= IPATH_GPIO_INTR;
1647 else {
1648 ipath_dev_err(dd, "Unsupported InfiniPath board "
1649 "(serial number %.16s)!\n",
1650 dd->ipath_serial);
1651 return 1;
1652 }
1653 }
1654
1655 if (dd->ipath_minrev >= 4) {
1656 /* Rev4+ reports extra errors via internal GPIO pins */
1657 dd->ipath_flags |= IPATH_GPIO_ERRINTRS;
1658 dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK;
1659 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1660 dd->ipath_gpio_mask);
1661 }
1662
1663 return 0;
1664}
1665
1666
1667/**
1668 * ipath_init_ht_get_base_info - set chip-specific flags for user code
1669 * @dd: the infinipath device
1670 * @kbase: ipath_base_info pointer
1671 *
1672 * We set the PCIE flag because the lower bandwidth on PCIe vs
1673 * HyperTransport can affect some user packet algorithms.
1674 */
1675static int ipath_ht_get_base_info(struct ipath_portdata *pd, void *kbase)
1676{
1677 struct ipath_base_info *kinfo = kbase;
1678
1679 kinfo->spi_runtime_flags |= IPATH_RUNTIME_HT |
1680 IPATH_RUNTIME_PIO_REGSWAPPED;
1681
1682 if (pd->port_dd->ipath_minrev < 4)
1683 kinfo->spi_runtime_flags |= IPATH_RUNTIME_RCVHDR_COPY;
1684
1685 return 0;
1686}
1687
1688static void ipath_ht_free_irq(struct ipath_devdata *dd)
1689{
1690 free_irq(dd->ipath_irq, dd);
1691 ht_destroy_irq(dd->ipath_irq);
1692 dd->ipath_irq = 0;
1693 dd->ipath_intconfig = 0;
1694}
1695
1696static struct ipath_message_header *
1697ipath_ht_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
1698{
1699 return (struct ipath_message_header *)
1700 &rhf_addr[sizeof(u64) / sizeof(u32)];
1701}
1702
1703static void ipath_ht_config_ports(struct ipath_devdata *dd, ushort cfgports)
1704{
1705 dd->ipath_portcnt =
1706 ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
1707 dd->ipath_p0_rcvegrcnt =
1708 ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
1709}
1710
1711static void ipath_ht_read_counters(struct ipath_devdata *dd,
1712 struct infinipath_counters *cntrs)
1713{
1714 cntrs->LBIntCnt =
1715 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBIntCnt));
1716 cntrs->LBFlowStallCnt =
1717 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBFlowStallCnt));
1718 cntrs->TxSDmaDescCnt = 0;
1719 cntrs->TxUnsupVLErrCnt =
1720 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnsupVLErrCnt));
1721 cntrs->TxDataPktCnt =
1722 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDataPktCnt));
1723 cntrs->TxFlowPktCnt =
1724 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowPktCnt));
1725 cntrs->TxDwordCnt =
1726 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDwordCnt));
1727 cntrs->TxLenErrCnt =
1728 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxLenErrCnt));
1729 cntrs->TxMaxMinLenErrCnt =
1730 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt));
1731 cntrs->TxUnderrunCnt =
1732 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnderrunCnt));
1733 cntrs->TxFlowStallCnt =
1734 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowStallCnt));
1735 cntrs->TxDroppedPktCnt =
1736 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDroppedPktCnt));
1737 cntrs->RxDroppedPktCnt =
1738 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDroppedPktCnt));
1739 cntrs->RxDataPktCnt =
1740 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDataPktCnt));
1741 cntrs->RxFlowPktCnt =
1742 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowPktCnt));
1743 cntrs->RxDwordCnt =
1744 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDwordCnt));
1745 cntrs->RxLenErrCnt =
1746 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLenErrCnt));
1747 cntrs->RxMaxMinLenErrCnt =
1748 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt));
1749 cntrs->RxICRCErrCnt =
1750 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxICRCErrCnt));
1751 cntrs->RxVCRCErrCnt =
1752 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxVCRCErrCnt));
1753 cntrs->RxFlowCtrlErrCnt =
1754 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt));
1755 cntrs->RxBadFormatCnt =
1756 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBadFormatCnt));
1757 cntrs->RxLinkProblemCnt =
1758 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLinkProblemCnt));
1759 cntrs->RxEBPCnt =
1760 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxEBPCnt));
1761 cntrs->RxLPCRCErrCnt =
1762 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLPCRCErrCnt));
1763 cntrs->RxBufOvflCnt =
1764 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBufOvflCnt));
1765 cntrs->RxTIDFullErrCnt =
1766 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDFullErrCnt));
1767 cntrs->RxTIDValidErrCnt =
1768 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDValidErrCnt));
1769 cntrs->RxPKeyMismatchCnt =
1770 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxPKeyMismatchCnt));
1771 cntrs->RxP0HdrEgrOvflCnt =
1772 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt));
1773 cntrs->RxP1HdrEgrOvflCnt =
1774 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt));
1775 cntrs->RxP2HdrEgrOvflCnt =
1776 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt));
1777 cntrs->RxP3HdrEgrOvflCnt =
1778 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt));
1779 cntrs->RxP4HdrEgrOvflCnt =
1780 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt));
1781 cntrs->RxP5HdrEgrOvflCnt =
1782 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP5HdrEgrOvflCnt));
1783 cntrs->RxP6HdrEgrOvflCnt =
1784 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP6HdrEgrOvflCnt));
1785 cntrs->RxP7HdrEgrOvflCnt =
1786 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP7HdrEgrOvflCnt));
1787 cntrs->RxP8HdrEgrOvflCnt =
1788 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP8HdrEgrOvflCnt));
1789 cntrs->RxP9HdrEgrOvflCnt = 0;
1790 cntrs->RxP10HdrEgrOvflCnt = 0;
1791 cntrs->RxP11HdrEgrOvflCnt = 0;
1792 cntrs->RxP12HdrEgrOvflCnt = 0;
1793 cntrs->RxP13HdrEgrOvflCnt = 0;
1794 cntrs->RxP14HdrEgrOvflCnt = 0;
1795 cntrs->RxP15HdrEgrOvflCnt = 0;
1796 cntrs->RxP16HdrEgrOvflCnt = 0;
1797 cntrs->IBStatusChangeCnt =
1798 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBStatusChangeCnt));
1799 cntrs->IBLinkErrRecoveryCnt =
1800 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt));
1801 cntrs->IBLinkDownedCnt =
1802 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkDownedCnt));
1803 cntrs->IBSymbolErrCnt =
1804 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBSymbolErrCnt));
1805 cntrs->RxVL15DroppedPktCnt = 0;
1806 cntrs->RxOtherLocalPhyErrCnt = 0;
1807 cntrs->PcieRetryBufDiagQwordCnt = 0;
1808 cntrs->ExcessBufferOvflCnt = dd->ipath_overrun_thresh_errs;
1809 cntrs->LocalLinkIntegrityErrCnt =
1810 (dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
1811 dd->ipath_lli_errs : dd->ipath_lli_errors;
1812 cntrs->RxVlErrCnt = 0;
1813 cntrs->RxDlidFltrCnt = 0;
1814}
1815
1816
1817/* no interrupt fallback for these chips */
1818static int ipath_ht_nointr_fallback(struct ipath_devdata *dd)
1819{
1820 return 0;
1821}
1822
1823
1824/*
1825 * reset the XGXS (between serdes and IBC). Slightly less intrusive
1826 * than resetting the IBC or external link state, and useful in some
1827 * cases to cause some retraining. To do this right, we reset IBC
1828 * as well.
1829 */
1830static void ipath_ht_xgxs_reset(struct ipath_devdata *dd)
1831{
1832 u64 val, prev_val;
1833
1834 prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
1835 val = prev_val | INFINIPATH_XGXS_RESET;
1836 prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
1837 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
1838 dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
1839 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
1840 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1841 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
1842 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
1843 dd->ipath_control);
1844}
1845
1846
1847static int ipath_ht_get_ib_cfg(struct ipath_devdata *dd, int which)
1848{
1849 int ret;
1850
1851 switch (which) {
1852 case IPATH_IB_CFG_LWID:
1853 ret = dd->ipath_link_width_active;
1854 break;
1855 case IPATH_IB_CFG_SPD:
1856 ret = dd->ipath_link_speed_active;
1857 break;
1858 case IPATH_IB_CFG_LWID_ENB:
1859 ret = dd->ipath_link_width_enabled;
1860 break;
1861 case IPATH_IB_CFG_SPD_ENB:
1862 ret = dd->ipath_link_speed_enabled;
1863 break;
1864 default:
1865 ret = -ENOTSUPP;
1866 break;
1867 }
1868 return ret;
1869}
1870
1871
1872/* we assume range checking is already done, if needed */
1873static int ipath_ht_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
1874{
1875 int ret = 0;
1876
1877 if (which == IPATH_IB_CFG_LWID_ENB)
1878 dd->ipath_link_width_enabled = val;
1879 else if (which == IPATH_IB_CFG_SPD_ENB)
1880 dd->ipath_link_speed_enabled = val;
1881 else
1882 ret = -ENOTSUPP;
1883 return ret;
1884}
1885
1886
1887static void ipath_ht_config_jint(struct ipath_devdata *dd, u16 a, u16 b)
1888{
1889}
1890
1891
1892static int ipath_ht_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
1893{
1894 ipath_setup_ht_setextled(dd, ipath_ib_linkstate(dd, ibcs),
1895 ipath_ib_linktrstate(dd, ibcs));
1896 return 0;
1897}
1898
1899
1900/**
1901 * ipath_init_iba6110_funcs - set up the chip-specific function pointers
1902 * @dd: the infinipath device
1903 *
1904 * This is global, and is called directly at init to set up the
1905 * chip-specific function pointers for later use.
1906 */
1907void ipath_init_iba6110_funcs(struct ipath_devdata *dd)
1908{
1909 dd->ipath_f_intrsetup = ipath_ht_intconfig;
1910 dd->ipath_f_bus = ipath_setup_ht_config;
1911 dd->ipath_f_reset = ipath_setup_ht_reset;
1912 dd->ipath_f_get_boardname = ipath_ht_boardname;
1913 dd->ipath_f_init_hwerrors = ipath_ht_init_hwerrors;
1914 dd->ipath_f_early_init = ipath_ht_early_init;
1915 dd->ipath_f_handle_hwerrors = ipath_ht_handle_hwerrors;
1916 dd->ipath_f_quiet_serdes = ipath_ht_quiet_serdes;
1917 dd->ipath_f_bringup_serdes = ipath_ht_bringup_serdes;
1918 dd->ipath_f_clear_tids = ipath_ht_clear_tids;
1919 dd->ipath_f_put_tid = ipath_ht_put_tid;
1920 dd->ipath_f_cleanup = ipath_setup_ht_cleanup;
1921 dd->ipath_f_setextled = ipath_setup_ht_setextled;
1922 dd->ipath_f_get_base_info = ipath_ht_get_base_info;
1923 dd->ipath_f_free_irq = ipath_ht_free_irq;
1924 dd->ipath_f_tidtemplate = ipath_ht_tidtemplate;
1925 dd->ipath_f_intr_fallback = ipath_ht_nointr_fallback;
1926 dd->ipath_f_get_msgheader = ipath_ht_get_msgheader;
1927 dd->ipath_f_config_ports = ipath_ht_config_ports;
1928 dd->ipath_f_read_counters = ipath_ht_read_counters;
1929 dd->ipath_f_xgxs_reset = ipath_ht_xgxs_reset;
1930 dd->ipath_f_get_ib_cfg = ipath_ht_get_ib_cfg;
1931 dd->ipath_f_set_ib_cfg = ipath_ht_set_ib_cfg;
1932 dd->ipath_f_config_jint = ipath_ht_config_jint;
1933 dd->ipath_f_ib_updown = ipath_ht_ib_updown;
1934
1935 /*
1936 * initialize chip-specific variables
1937 */
1938 ipath_init_ht_variables(dd);
1939}
diff --git a/drivers/staging/rdma/ipath/ipath_init_chip.c b/drivers/staging/rdma/ipath/ipath_init_chip.c
deleted file mode 100644
index a5eea199f733..000000000000
--- a/drivers/staging/rdma/ipath/ipath_init_chip.c
+++ /dev/null
@@ -1,1062 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/pci.h>
35#include <linux/netdevice.h>
36#include <linux/moduleparam.h>
37#include <linux/slab.h>
38#include <linux/stat.h>
39#include <linux/vmalloc.h>
40
41#include "ipath_kernel.h"
42#include "ipath_common.h"
43
44/*
45 * min buffers we want to have per port, after driver
46 */
47#define IPATH_MIN_USER_PORT_BUFCNT 7
48
49/*
50 * Number of ports we are configured to use (to allow for more pio
51 * buffers per port, etc.) Zero means use chip value.
52 */
53static ushort ipath_cfgports;
54
55module_param_named(cfgports, ipath_cfgports, ushort, S_IRUGO);
56MODULE_PARM_DESC(cfgports, "Set max number of ports to use");
57
58/*
59 * Number of buffers reserved for driver (verbs and layered drivers.)
60 * Initialized based on number of PIO buffers if not set via module interface.
61 * The problem with this is that it's global, but we'll use different
62 * numbers for different chip types.
63 */
64static ushort ipath_kpiobufs;
65
66static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp);
67
68module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_ushort,
69 &ipath_kpiobufs, S_IWUSR | S_IRUGO);
70MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver");
71
72/**
73 * create_port0_egr - allocate the eager TID buffers
74 * @dd: the infinipath device
75 *
76 * This code is now quite different for user and kernel, because
77 * the kernel uses skb's, for the accelerated network performance.
78 * This is the kernel (port0) version.
79 *
80 * Allocate the eager TID buffers and program them into infinipath.
81 * We use the network layer alloc_skb() allocator to allocate the
82 * memory, and either use the buffers as is for things like verbs
83 * packets, or pass the buffers up to the ipath layered driver and
84 * thence the network layer, replacing them as we do so (see
85 * ipath_rcv_layer()).
86 */
87static int create_port0_egr(struct ipath_devdata *dd)
88{
89 unsigned e, egrcnt;
90 struct ipath_skbinfo *skbinfo;
91 int ret;
92
93 egrcnt = dd->ipath_p0_rcvegrcnt;
94
95 skbinfo = vmalloc(sizeof(*dd->ipath_port0_skbinfo) * egrcnt);
96 if (skbinfo == NULL) {
97 ipath_dev_err(dd, "allocation error for eager TID "
98 "skb array\n");
99 ret = -ENOMEM;
100 goto bail;
101 }
102 for (e = 0; e < egrcnt; e++) {
103 /*
104 * This is a bit tricky in that we allocate extra
105 * space for 2 bytes of the 14 byte ethernet header.
106 * These two bytes are passed in the ipath header so
107 * the rest of the data is word aligned. We allocate
108 * 4 bytes so that the data buffer stays word aligned.
109 * See ipath_kreceive() for more details.
110 */
111 skbinfo[e].skb = ipath_alloc_skb(dd, GFP_KERNEL);
112 if (!skbinfo[e].skb) {
113 ipath_dev_err(dd, "SKB allocation error for "
114 "eager TID %u\n", e);
115 while (e != 0)
116 dev_kfree_skb(skbinfo[--e].skb);
117 vfree(skbinfo);
118 ret = -ENOMEM;
119 goto bail;
120 }
121 }
122 /*
123 * After loop above, so we can test non-NULL to see if ready
124 * to use at receive, etc.
125 */
126 dd->ipath_port0_skbinfo = skbinfo;
127
128 for (e = 0; e < egrcnt; e++) {
129 dd->ipath_port0_skbinfo[e].phys =
130 ipath_map_single(dd->pcidev,
131 dd->ipath_port0_skbinfo[e].skb->data,
132 dd->ipath_ibmaxlen, PCI_DMA_FROMDEVICE);
133 dd->ipath_f_put_tid(dd, e + (u64 __iomem *)
134 ((char __iomem *) dd->ipath_kregbase +
135 dd->ipath_rcvegrbase),
136 RCVHQ_RCV_TYPE_EAGER,
137 dd->ipath_port0_skbinfo[e].phys);
138 }
139
140 ret = 0;
141
142bail:
143 return ret;
144}
145
146static int bringup_link(struct ipath_devdata *dd)
147{
148 u64 val, ibc;
149 int ret = 0;
150
151 /* hold IBC in reset */
152 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
153 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
154 dd->ipath_control);
155
156 /*
157 * set initial max size pkt IBC will send, including ICRC; it's the
158 * PIO buffer size in dwords, less 1; also see ipath_set_mtu()
159 */
160 val = (dd->ipath_ibmaxlen >> 2) + 1;
161 ibc = val << dd->ibcc_mpl_shift;
162
163 /* flowcontrolwatermark is in units of KBytes */
164 ibc |= 0x5ULL << INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT;
165 /*
166 * How often flowctrl sent. More or less in usecs; balance against
167 * watermark value, so that in theory senders always get a flow
168 * control update in time to not let the IB link go idle.
169 */
170 ibc |= 0x3ULL << INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT;
171 /* max error tolerance */
172 ibc |= 0xfULL << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
173 /* use "real" buffer space for */
174 ibc |= 4ULL << INFINIPATH_IBCC_CREDITSCALE_SHIFT;
175 /* IB credit flow control. */
176 ibc |= 0xfULL << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
177 /* initially come up waiting for TS1, without sending anything. */
178 dd->ipath_ibcctrl = ibc;
179 /*
180 * Want to start out with both LINKCMD and LINKINITCMD in NOP
181 * (0 and 0). Don't put linkinitcmd in ipath_ibcctrl, want that
182 * to stay a NOP. Flag that we are disabled, for the (unlikely)
183 * case that some recovery path is trying to bring the link up
184 * before we are ready.
185 */
186 ibc |= INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
187 INFINIPATH_IBCC_LINKINITCMD_SHIFT;
188 dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
189 ipath_cdbg(VERBOSE, "Writing 0x%llx to ibcctrl\n",
190 (unsigned long long) ibc);
191 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ibc);
192
193 // be sure chip saw it
194 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
195
196 ret = dd->ipath_f_bringup_serdes(dd);
197
198 if (ret)
199 dev_info(&dd->pcidev->dev, "Could not initialize SerDes, "
200 "not usable\n");
201 else {
202 /* enable IBC */
203 dd->ipath_control |= INFINIPATH_C_LINKENABLE;
204 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
205 dd->ipath_control);
206 }
207
208 return ret;
209}
210
211static struct ipath_portdata *create_portdata0(struct ipath_devdata *dd)
212{
213 struct ipath_portdata *pd;
214
215 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
216 if (pd) {
217 pd->port_dd = dd;
218 pd->port_cnt = 1;
219 /* The port 0 pkey table is used by the layer interface. */
220 pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY;
221 pd->port_seq_cnt = 1;
222 }
223 return pd;
224}
225
226static int init_chip_first(struct ipath_devdata *dd)
227{
228 struct ipath_portdata *pd;
229 int ret = 0;
230 u64 val;
231
232 spin_lock_init(&dd->ipath_kernel_tid_lock);
233 spin_lock_init(&dd->ipath_user_tid_lock);
234 spin_lock_init(&dd->ipath_sendctrl_lock);
235 spin_lock_init(&dd->ipath_uctxt_lock);
236 spin_lock_init(&dd->ipath_sdma_lock);
237 spin_lock_init(&dd->ipath_gpio_lock);
238 spin_lock_init(&dd->ipath_eep_st_lock);
239 spin_lock_init(&dd->ipath_sdepb_lock);
240 mutex_init(&dd->ipath_eep_lock);
241
242 /*
243 * skip cfgports stuff because we are not allocating memory,
244 * and we don't want problems if the portcnt changed due to
245 * cfgports. We do still check and report a difference, if
246 * not same (should be impossible).
247 */
248 dd->ipath_f_config_ports(dd, ipath_cfgports);
249 if (!ipath_cfgports)
250 dd->ipath_cfgports = dd->ipath_portcnt;
251 else if (ipath_cfgports <= dd->ipath_portcnt) {
252 dd->ipath_cfgports = ipath_cfgports;
253 ipath_dbg("Configured to use %u ports out of %u in chip\n",
254 dd->ipath_cfgports, ipath_read_kreg32(dd,
255 dd->ipath_kregs->kr_portcnt));
256 } else {
257 dd->ipath_cfgports = dd->ipath_portcnt;
258 ipath_dbg("Tried to configured to use %u ports; chip "
259 "only supports %u\n", ipath_cfgports,
260 ipath_read_kreg32(dd,
261 dd->ipath_kregs->kr_portcnt));
262 }
263 /*
264 * Allocate full portcnt array, rather than just cfgports, because
265 * cleanup iterates across all possible ports.
266 */
267 dd->ipath_pd = kcalloc(dd->ipath_portcnt, sizeof(*dd->ipath_pd),
268 GFP_KERNEL);
269
270 if (!dd->ipath_pd) {
271 ipath_dev_err(dd, "Unable to allocate portdata array, "
272 "failing\n");
273 ret = -ENOMEM;
274 goto done;
275 }
276
277 pd = create_portdata0(dd);
278 if (!pd) {
279 ipath_dev_err(dd, "Unable to allocate portdata for port "
280 "0, failing\n");
281 ret = -ENOMEM;
282 goto done;
283 }
284 dd->ipath_pd[0] = pd;
285
286 dd->ipath_rcvtidcnt =
287 ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
288 dd->ipath_rcvtidbase =
289 ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase);
290 dd->ipath_rcvegrcnt =
291 ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
292 dd->ipath_rcvegrbase =
293 ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase);
294 dd->ipath_palign =
295 ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
296 dd->ipath_piobufbase =
297 ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufbase);
298 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize);
299 dd->ipath_piosize2k = val & ~0U;
300 dd->ipath_piosize4k = val >> 32;
301 if (dd->ipath_piosize4k == 0 && ipath_mtu4096)
302 ipath_mtu4096 = 0; /* 4KB not supported by this chip */
303 dd->ipath_ibmtu = ipath_mtu4096 ? 4096 : 2048;
304 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt);
305 dd->ipath_piobcnt2k = val & ~0U;
306 dd->ipath_piobcnt4k = val >> 32;
307 dd->ipath_pio2kbase =
308 (u32 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
309 (dd->ipath_piobufbase & 0xffffffff));
310 if (dd->ipath_piobcnt4k) {
311 dd->ipath_pio4kbase = (u32 __iomem *)
312 (((char __iomem *) dd->ipath_kregbase) +
313 (dd->ipath_piobufbase >> 32));
314 /*
315 * 4K buffers take 2 pages; we use roundup just to be
316 * paranoid; we calculate it once here, rather than on
317 * ever buf allocate
318 */
319 dd->ipath_4kalign = ALIGN(dd->ipath_piosize4k,
320 dd->ipath_palign);
321 ipath_dbg("%u 2k(%x) piobufs @ %p, %u 4k(%x) @ %p "
322 "(%x aligned)\n",
323 dd->ipath_piobcnt2k, dd->ipath_piosize2k,
324 dd->ipath_pio2kbase, dd->ipath_piobcnt4k,
325 dd->ipath_piosize4k, dd->ipath_pio4kbase,
326 dd->ipath_4kalign);
327 } else {
328 ipath_dbg("%u 2k piobufs @ %p\n",
329 dd->ipath_piobcnt2k, dd->ipath_pio2kbase);
330 }
331done:
332 return ret;
333}
334
335/**
336 * init_chip_reset - re-initialize after a reset, or enable
337 * @dd: the infinipath device
338 *
339 * sanity check at least some of the values after reset, and
340 * ensure no receive or transmit (explicitly, in case reset
341 * failed
342 */
343static int init_chip_reset(struct ipath_devdata *dd)
344{
345 u32 rtmp;
346 int i;
347 unsigned long flags;
348
349 /*
350 * ensure chip does no sends or receives, tail updates, or
351 * pioavail updates while we re-initialize
352 */
353 dd->ipath_rcvctrl &= ~(1ULL << dd->ipath_r_tailupd_shift);
354 for (i = 0; i < dd->ipath_portcnt; i++) {
355 clear_bit(dd->ipath_r_portenable_shift + i,
356 &dd->ipath_rcvctrl);
357 clear_bit(dd->ipath_r_intravail_shift + i,
358 &dd->ipath_rcvctrl);
359 }
360 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
361 dd->ipath_rcvctrl);
362
363 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
364 dd->ipath_sendctrl = 0U; /* no sdma, etc */
365 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
366 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
367 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
368
369 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
370
371 rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
372 if (rtmp != dd->ipath_rcvtidcnt)
373 dev_info(&dd->pcidev->dev, "tidcnt was %u before "
374 "reset, now %u, using original\n",
375 dd->ipath_rcvtidcnt, rtmp);
376 rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase);
377 if (rtmp != dd->ipath_rcvtidbase)
378 dev_info(&dd->pcidev->dev, "tidbase was %u before "
379 "reset, now %u, using original\n",
380 dd->ipath_rcvtidbase, rtmp);
381 rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
382 if (rtmp != dd->ipath_rcvegrcnt)
383 dev_info(&dd->pcidev->dev, "egrcnt was %u before "
384 "reset, now %u, using original\n",
385 dd->ipath_rcvegrcnt, rtmp);
386 rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase);
387 if (rtmp != dd->ipath_rcvegrbase)
388 dev_info(&dd->pcidev->dev, "egrbase was %u before "
389 "reset, now %u, using original\n",
390 dd->ipath_rcvegrbase, rtmp);
391
392 return 0;
393}
394
395static int init_pioavailregs(struct ipath_devdata *dd)
396{
397 int ret;
398
399 dd->ipath_pioavailregs_dma = dma_alloc_coherent(
400 &dd->pcidev->dev, PAGE_SIZE, &dd->ipath_pioavailregs_phys,
401 GFP_KERNEL);
402 if (!dd->ipath_pioavailregs_dma) {
403 ipath_dev_err(dd, "failed to allocate PIOavail reg area "
404 "in memory\n");
405 ret = -ENOMEM;
406 goto done;
407 }
408
409 /*
410 * we really want L2 cache aligned, but for current CPUs of
411 * interest, they are the same.
412 */
413 dd->ipath_statusp = (u64 *)
414 ((char *)dd->ipath_pioavailregs_dma +
415 ((2 * L1_CACHE_BYTES +
416 dd->ipath_pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
417 /* copy the current value now that it's really allocated */
418 *dd->ipath_statusp = dd->_ipath_status;
419 /*
420 * setup buffer to hold freeze msg, accessible to apps,
421 * following statusp
422 */
423 dd->ipath_freezemsg = (char *)&dd->ipath_statusp[1];
424 /* and its length */
425 dd->ipath_freezelen = L1_CACHE_BYTES - sizeof(dd->ipath_statusp[0]);
426
427 ret = 0;
428
429done:
430 return ret;
431}
432
433/**
434 * init_shadow_tids - allocate the shadow TID array
435 * @dd: the infinipath device
436 *
437 * allocate the shadow TID array, so we can ipath_munlock previous
438 * entries. It may make more sense to move the pageshadow to the
439 * port data structure, so we only allocate memory for ports actually
440 * in use, since we at 8k per port, now.
441 */
442static void init_shadow_tids(struct ipath_devdata *dd)
443{
444 struct page **pages;
445 dma_addr_t *addrs;
446
447 pages = vzalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
448 sizeof(struct page *));
449 if (!pages) {
450 ipath_dev_err(dd, "failed to allocate shadow page * "
451 "array, no expected sends!\n");
452 dd->ipath_pageshadow = NULL;
453 return;
454 }
455
456 addrs = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
457 sizeof(dma_addr_t));
458 if (!addrs) {
459 ipath_dev_err(dd, "failed to allocate shadow dma handle "
460 "array, no expected sends!\n");
461 vfree(pages);
462 dd->ipath_pageshadow = NULL;
463 return;
464 }
465
466 dd->ipath_pageshadow = pages;
467 dd->ipath_physshadow = addrs;
468}
469
470static void enable_chip(struct ipath_devdata *dd, int reinit)
471{
472 u32 val;
473 u64 rcvmask;
474 unsigned long flags;
475 int i;
476
477 if (!reinit)
478 init_waitqueue_head(&ipath_state_wait);
479
480 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
481 dd->ipath_rcvctrl);
482
483 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
484 /* Enable PIO send, and update of PIOavail regs to memory. */
485 dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
486 INFINIPATH_S_PIOBUFAVAILUPD;
487
488 /*
489 * Set the PIO avail update threshold to host memory
490 * on chips that support it.
491 */
492 if (dd->ipath_pioupd_thresh)
493 dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
494 << INFINIPATH_S_UPDTHRESH_SHIFT;
495 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
496 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
497 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
498
499 /*
500 * Enable kernel ports' receive and receive interrupt.
501 * Other ports done as user opens and inits them.
502 */
503 rcvmask = 1ULL;
504 dd->ipath_rcvctrl |= (rcvmask << dd->ipath_r_portenable_shift) |
505 (rcvmask << dd->ipath_r_intravail_shift);
506 if (!(dd->ipath_flags & IPATH_NODMA_RTAIL))
507 dd->ipath_rcvctrl |= (1ULL << dd->ipath_r_tailupd_shift);
508
509 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
510 dd->ipath_rcvctrl);
511
512 /*
513 * now ready for use. this should be cleared whenever we
514 * detect a reset, or initiate one.
515 */
516 dd->ipath_flags |= IPATH_INITTED;
517
518 /*
519 * Init our shadow copies of head from tail values,
520 * and write head values to match.
521 */
522 val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0);
523 ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
524
525 /* Initialize so we interrupt on next packet received */
526 ipath_write_ureg(dd, ur_rcvhdrhead,
527 dd->ipath_rhdrhead_intr_off |
528 dd->ipath_pd[0]->port_head, 0);
529
530 /*
531 * by now pioavail updates to memory should have occurred, so
532 * copy them into our working/shadow registers; this is in
533 * case something went wrong with abort, but mostly to get the
534 * initial values of the generation bit correct.
535 */
536 for (i = 0; i < dd->ipath_pioavregs; i++) {
537 __le64 pioavail;
538
539 /*
540 * Chip Errata bug 6641; even and odd qwords>3 are swapped.
541 */
542 if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
543 pioavail = dd->ipath_pioavailregs_dma[i ^ 1];
544 else
545 pioavail = dd->ipath_pioavailregs_dma[i];
546 /*
547 * don't need to worry about ipath_pioavailkernel here
548 * because we will call ipath_chg_pioavailkernel() later
549 * in initialization, to busy out buffers as needed
550 */
551 dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail);
552 }
553 /* can get counters, stats, etc. */
554 dd->ipath_flags |= IPATH_PRESENT;
555}
556
557static int init_housekeeping(struct ipath_devdata *dd, int reinit)
558{
559 char boardn[40];
560 int ret = 0;
561
562 /*
563 * have to clear shadow copies of registers at init that are
564 * not otherwise set here, or all kinds of bizarre things
565 * happen with driver on chip reset
566 */
567 dd->ipath_rcvhdrsize = 0;
568
569 /*
570 * Don't clear ipath_flags as 8bit mode was set before
571 * entering this func. However, we do set the linkstate to
572 * unknown, so we can watch for a transition.
573 * PRESENT is set because we want register reads to work,
574 * and the kernel infrastructure saw it in config space;
575 * We clear it if we have failures.
576 */
577 dd->ipath_flags |= IPATH_LINKUNK | IPATH_PRESENT;
578 dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED |
579 IPATH_LINKDOWN | IPATH_LINKINIT);
580
581 ipath_cdbg(VERBOSE, "Try to read spc chip revision\n");
582 dd->ipath_revision =
583 ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
584
585 /*
586 * set up fundamental info we need to use the chip; we assume
587 * if the revision reg and these regs are OK, we don't need to
588 * special case the rest
589 */
590 dd->ipath_sregbase =
591 ipath_read_kreg32(dd, dd->ipath_kregs->kr_sendregbase);
592 dd->ipath_cregbase =
593 ipath_read_kreg32(dd, dd->ipath_kregs->kr_counterregbase);
594 dd->ipath_uregbase =
595 ipath_read_kreg32(dd, dd->ipath_kregs->kr_userregbase);
596 ipath_cdbg(VERBOSE, "ipath_kregbase %p, sendbase %x usrbase %x, "
597 "cntrbase %x\n", dd->ipath_kregbase, dd->ipath_sregbase,
598 dd->ipath_uregbase, dd->ipath_cregbase);
599 if ((dd->ipath_revision & 0xffffffff) == 0xffffffff
600 || (dd->ipath_sregbase & 0xffffffff) == 0xffffffff
601 || (dd->ipath_cregbase & 0xffffffff) == 0xffffffff
602 || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) {
603 ipath_dev_err(dd, "Register read failures from chip, "
604 "giving up initialization\n");
605 dd->ipath_flags &= ~IPATH_PRESENT;
606 ret = -ENODEV;
607 goto done;
608 }
609
610
611 /* clear diagctrl register, in case diags were running and crashed */
612 ipath_write_kreg (dd, dd->ipath_kregs->kr_hwdiagctrl, 0);
613
614 /* clear the initial reset flag, in case first driver load */
615 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
616 INFINIPATH_E_RESET);
617
618 ipath_cdbg(VERBOSE, "Revision %llx (PCI %x)\n",
619 (unsigned long long) dd->ipath_revision,
620 dd->ipath_pcirev);
621
622 if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) &
623 INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) {
624 ipath_dev_err(dd, "Driver only handles version %d, "
625 "chip swversion is %d (%llx), failng\n",
626 IPATH_CHIP_SWVERSION,
627 (int)(dd->ipath_revision >>
628 INFINIPATH_R_SOFTWARE_SHIFT) &
629 INFINIPATH_R_SOFTWARE_MASK,
630 (unsigned long long) dd->ipath_revision);
631 ret = -ENOSYS;
632 goto done;
633 }
634 dd->ipath_majrev = (u8) ((dd->ipath_revision >>
635 INFINIPATH_R_CHIPREVMAJOR_SHIFT) &
636 INFINIPATH_R_CHIPREVMAJOR_MASK);
637 dd->ipath_minrev = (u8) ((dd->ipath_revision >>
638 INFINIPATH_R_CHIPREVMINOR_SHIFT) &
639 INFINIPATH_R_CHIPREVMINOR_MASK);
640 dd->ipath_boardrev = (u8) ((dd->ipath_revision >>
641 INFINIPATH_R_BOARDID_SHIFT) &
642 INFINIPATH_R_BOARDID_MASK);
643
644 ret = dd->ipath_f_get_boardname(dd, boardn, sizeof boardn);
645
646 snprintf(dd->ipath_boardversion, sizeof(dd->ipath_boardversion),
647 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, PCI %u, "
648 "SW Compat %u\n",
649 IPATH_CHIP_VERS_MAJ, IPATH_CHIP_VERS_MIN, boardn,
650 (unsigned)(dd->ipath_revision >> INFINIPATH_R_ARCH_SHIFT) &
651 INFINIPATH_R_ARCH_MASK,
652 dd->ipath_majrev, dd->ipath_minrev, dd->ipath_pcirev,
653 (unsigned)(dd->ipath_revision >>
654 INFINIPATH_R_SOFTWARE_SHIFT) &
655 INFINIPATH_R_SOFTWARE_MASK);
656
657 ipath_dbg("%s", dd->ipath_boardversion);
658
659 if (ret)
660 goto done;
661
662 if (reinit)
663 ret = init_chip_reset(dd);
664 else
665 ret = init_chip_first(dd);
666
667done:
668 return ret;
669}
670
671static void verify_interrupt(unsigned long opaque)
672{
673 struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
674
675 if (!dd)
676 return; /* being torn down */
677
678 /*
679 * If we don't have any interrupts, let the user know and
680 * don't bother checking again.
681 */
682 if (dd->ipath_int_counter == 0) {
683 if (!dd->ipath_f_intr_fallback(dd))
684 dev_err(&dd->pcidev->dev, "No interrupts detected, "
685 "not usable.\n");
686 else /* re-arm the timer to see if fallback works */
687 mod_timer(&dd->ipath_intrchk_timer, jiffies + HZ/2);
688 } else
689 ipath_cdbg(VERBOSE, "%u interrupts at timer check\n",
690 dd->ipath_int_counter);
691}
692
693/**
694 * ipath_init_chip - do the actual initialization sequence on the chip
695 * @dd: the infinipath device
696 * @reinit: reinitializing, so don't allocate new memory
697 *
698 * Do the actual initialization sequence on the chip. This is done
699 * both from the init routine called from the PCI infrastructure, and
700 * when we reset the chip, or detect that it was reset internally,
701 * or it's administratively re-enabled.
702 *
703 * Memory allocation here and in called routines is only done in
704 * the first case (reinit == 0). We have to be careful, because even
705 * without memory allocation, we need to re-write all the chip registers
706 * TIDs, etc. after the reset or enable has completed.
707 */
708int ipath_init_chip(struct ipath_devdata *dd, int reinit)
709{
710 int ret = 0;
711 u32 kpiobufs, defkbufs;
712 u32 piobufs, uports;
713 u64 val;
714 struct ipath_portdata *pd;
715 gfp_t gfp_flags = GFP_USER | __GFP_COMP;
716
717 ret = init_housekeeping(dd, reinit);
718 if (ret)
719 goto done;
720
721 /*
722 * We could bump this to allow for full rcvegrcnt + rcvtidcnt,
723 * but then it no longer nicely fits power of two, and since
724 * we now use routines that backend onto __get_free_pages, the
725 * rest would be wasted.
726 */
727 dd->ipath_rcvhdrcnt = max(dd->ipath_p0_rcvegrcnt, dd->ipath_rcvegrcnt);
728 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt,
729 dd->ipath_rcvhdrcnt);
730
731 /*
732 * Set up the shadow copies of the piobufavail registers,
733 * which we compare against the chip registers for now, and
734 * the in memory DMA'ed copies of the registers. This has to
735 * be done early, before we calculate lastport, etc.
736 */
737 piobufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
738 /*
739 * calc number of pioavail registers, and save it; we have 2
740 * bits per buffer.
741 */
742 dd->ipath_pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2)
743 / (sizeof(u64) * BITS_PER_BYTE / 2);
744 uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0;
745 if (piobufs > 144)
746 defkbufs = 32 + dd->ipath_pioreserved;
747 else
748 defkbufs = 16 + dd->ipath_pioreserved;
749
750 if (ipath_kpiobufs && (ipath_kpiobufs +
751 (uports * IPATH_MIN_USER_PORT_BUFCNT)) > piobufs) {
752 int i = (int) piobufs -
753 (int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
754 if (i < 1)
755 i = 1;
756 dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of "
757 "%d for kernel leaves too few for %d user ports "
758 "(%d each); using %u\n", ipath_kpiobufs,
759 piobufs, uports, IPATH_MIN_USER_PORT_BUFCNT, i);
760 /*
761 * shouldn't change ipath_kpiobufs, because could be
762 * different for different devices...
763 */
764 kpiobufs = i;
765 } else if (ipath_kpiobufs)
766 kpiobufs = ipath_kpiobufs;
767 else
768 kpiobufs = defkbufs;
769 dd->ipath_lastport_piobuf = piobufs - kpiobufs;
770 dd->ipath_pbufsport =
771 uports ? dd->ipath_lastport_piobuf / uports : 0;
772 /* if not an even divisor, some user ports get extra buffers */
773 dd->ipath_ports_extrabuf = dd->ipath_lastport_piobuf -
774 (dd->ipath_pbufsport * uports);
775 if (dd->ipath_ports_extrabuf)
776 ipath_dbg("%u pbufs/port leaves some unused, add 1 buffer to "
777 "ports <= %u\n", dd->ipath_pbufsport,
778 dd->ipath_ports_extrabuf);
779 dd->ipath_lastpioindex = 0;
780 dd->ipath_lastpioindexl = dd->ipath_piobcnt2k;
781 /* ipath_pioavailshadow initialized earlier */
782 ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
783 "each for %u user ports\n", kpiobufs,
784 piobufs, dd->ipath_pbufsport, uports);
785 ret = dd->ipath_f_early_init(dd);
786 if (ret) {
787 ipath_dev_err(dd, "Early initialization failure\n");
788 goto done;
789 }
790
791 /*
792 * Early_init sets rcvhdrentsize and rcvhdrsize, so this must be
793 * done after early_init.
794 */
795 dd->ipath_hdrqlast =
796 dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1);
797 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize,
798 dd->ipath_rcvhdrentsize);
799 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
800 dd->ipath_rcvhdrsize);
801
802 if (!reinit) {
803 ret = init_pioavailregs(dd);
804 init_shadow_tids(dd);
805 if (ret)
806 goto done;
807 }
808
809 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr,
810 dd->ipath_pioavailregs_phys);
811
812 /*
813 * this is to detect s/w errors, which the h/w works around by
814 * ignoring the low 6 bits of address, if it wasn't aligned.
815 */
816 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpioavailaddr);
817 if (val != dd->ipath_pioavailregs_phys) {
818 ipath_dev_err(dd, "Catastrophic software error, "
819 "SendPIOAvailAddr written as %lx, "
820 "read back as %llx\n",
821 (unsigned long) dd->ipath_pioavailregs_phys,
822 (unsigned long long) val);
823 ret = -EINVAL;
824 goto done;
825 }
826
827 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvbthqp, IPATH_KD_QP);
828
829 /*
830 * make sure we are not in freeze, and PIO send enabled, so
831 * writes to pbc happen
832 */
833 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, 0ULL);
834 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
835 ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
836 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
837
838 /*
839 * before error clears, since we expect serdes pll errors during
840 * this, the first time after reset
841 */
842 if (bringup_link(dd)) {
843 dev_info(&dd->pcidev->dev, "Failed to bringup IB link\n");
844 ret = -ENETDOWN;
845 goto done;
846 }
847
848 /*
849 * clear any "expected" hwerrs from reset and/or initialization
850 * clear any that aren't enabled (at least this once), and then
851 * set the enable mask
852 */
853 dd->ipath_f_init_hwerrors(dd);
854 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
855 ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
856 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
857 dd->ipath_hwerrmask);
858
859 /* clear all */
860 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
861 /* enable errors that are masked, at least this first time. */
862 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
863 ~dd->ipath_maskederrs);
864 dd->ipath_maskederrs = 0; /* don't re-enable ignored in timer */
865 dd->ipath_errormask =
866 ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);
867 /* clear any interrupts up to this point (ints still not enabled) */
868 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
869
870 dd->ipath_f_tidtemplate(dd);
871
872 /*
873 * Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing
874 * re-init, the simplest way to handle this is to free
875 * existing, and re-allocate.
876 * Need to re-create rest of port 0 portdata as well.
877 */
878 pd = dd->ipath_pd[0];
879 if (reinit) {
880 struct ipath_portdata *npd;
881
882 /*
883 * Alloc and init new ipath_portdata for port0,
884 * Then free old pd. Could lead to fragmentation, but also
885 * makes later support for hot-swap easier.
886 */
887 npd = create_portdata0(dd);
888 if (npd) {
889 ipath_free_pddata(dd, pd);
890 dd->ipath_pd[0] = npd;
891 pd = npd;
892 } else {
893 ipath_dev_err(dd, "Unable to allocate portdata"
894 " for port 0, failing\n");
895 ret = -ENOMEM;
896 goto done;
897 }
898 }
899 ret = ipath_create_rcvhdrq(dd, pd);
900 if (!ret)
901 ret = create_port0_egr(dd);
902 if (ret) {
903 ipath_dev_err(dd, "failed to allocate kernel port's "
904 "rcvhdrq and/or egr bufs\n");
905 goto done;
906 } else {
907 enable_chip(dd, reinit);
908 }
909
910 /* after enable_chip, so pioavailshadow setup */
911 ipath_chg_pioavailkernel(dd, 0, piobufs, 1);
912
913 /*
914 * Cancel any possible active sends from early driver load.
915 * Follows early_init because some chips have to initialize
916 * PIO buffers in early_init to avoid false parity errors.
917 * After enable and ipath_chg_pioavailkernel so we can safely
918 * enable pioavail updates and PIOENABLE; packets are now
919 * ready to go out.
920 */
921 ipath_cancel_sends(dd, 1);
922
923 if (!reinit) {
924 /*
925 * Used when we close a port, for DMA already in flight
926 * at close.
927 */
928 dd->ipath_dummy_hdrq = dma_alloc_coherent(
929 &dd->pcidev->dev, dd->ipath_pd[0]->port_rcvhdrq_size,
930 &dd->ipath_dummy_hdrq_phys,
931 gfp_flags);
932 if (!dd->ipath_dummy_hdrq) {
933 dev_info(&dd->pcidev->dev,
934 "Couldn't allocate 0x%lx bytes for dummy hdrq\n",
935 dd->ipath_pd[0]->port_rcvhdrq_size);
936 /* fallback to just 0'ing */
937 dd->ipath_dummy_hdrq_phys = 0UL;
938 }
939 }
940
941 /*
942 * cause retrigger of pending interrupts ignored during init,
943 * even if we had errors
944 */
945 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
946
947 if (!dd->ipath_stats_timer_active) {
948 /*
949 * first init, or after an admin disable/enable
950 * set up stats retrieval timer, even if we had errors
951 * in last portion of setup
952 */
953 setup_timer(&dd->ipath_stats_timer, ipath_get_faststats,
954 (unsigned long)dd);
955 /* every 5 seconds; */
956 dd->ipath_stats_timer.expires = jiffies + 5 * HZ;
957 /* takes ~16 seconds to overflow at full IB 4x bandwdith */
958 add_timer(&dd->ipath_stats_timer);
959 dd->ipath_stats_timer_active = 1;
960 }
961
962 /* Set up SendDMA if chip supports it */
963 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
964 ret = setup_sdma(dd);
965
966 /* Set up HoL state */
967 setup_timer(&dd->ipath_hol_timer, ipath_hol_event, (unsigned long)dd);
968
969 dd->ipath_hol_state = IPATH_HOL_UP;
970
971done:
972 if (!ret) {
973 *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT;
974 if (!dd->ipath_f_intrsetup(dd)) {
975 /* now we can enable all interrupts from the chip */
976 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,
977 -1LL);
978 /* force re-interrupt of any pending interrupts. */
979 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear,
980 0ULL);
981 /* chip is usable; mark it as initialized */
982 *dd->ipath_statusp |= IPATH_STATUS_INITTED;
983
984 /*
985 * setup to verify we get an interrupt, and fallback
986 * to an alternate if necessary and possible
987 */
988 if (!reinit) {
989 setup_timer(&dd->ipath_intrchk_timer,
990 verify_interrupt,
991 (unsigned long)dd);
992 }
993 dd->ipath_intrchk_timer.expires = jiffies + HZ/2;
994 add_timer(&dd->ipath_intrchk_timer);
995 } else
996 ipath_dev_err(dd, "No interrupts enabled, couldn't "
997 "setup interrupt address\n");
998
999 if (dd->ipath_cfgports > ipath_stats.sps_nports)
1000 /*
1001 * sps_nports is a global, so, we set it to
1002 * the highest number of ports of any of the
1003 * chips we find; we never decrement it, at
1004 * least for now. Since this might have changed
1005 * over disable/enable or prior to reset, always
1006 * do the check and potentially adjust.
1007 */
1008 ipath_stats.sps_nports = dd->ipath_cfgports;
1009 } else
1010 ipath_dbg("Failed (%d) to initialize chip\n", ret);
1011
1012 /* if ret is non-zero, we probably should do some cleanup
1013 here... */
1014 return ret;
1015}
1016
1017static int ipath_set_kpiobufs(const char *str, struct kernel_param *kp)
1018{
1019 struct ipath_devdata *dd;
1020 unsigned long flags;
1021 unsigned short val;
1022 int ret;
1023
1024 ret = ipath_parse_ushort(str, &val);
1025
1026 spin_lock_irqsave(&ipath_devs_lock, flags);
1027
1028 if (ret < 0)
1029 goto bail;
1030
1031 if (val == 0) {
1032 ret = -EINVAL;
1033 goto bail;
1034 }
1035
1036 list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
1037 if (dd->ipath_kregbase)
1038 continue;
1039 if (val > (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k -
1040 (dd->ipath_cfgports *
1041 IPATH_MIN_USER_PORT_BUFCNT)))
1042 {
1043 ipath_dev_err(
1044 dd,
1045 "Allocating %d PIO bufs for kernel leaves "
1046 "too few for %d user ports (%d each)\n",
1047 val, dd->ipath_cfgports - 1,
1048 IPATH_MIN_USER_PORT_BUFCNT);
1049 ret = -EINVAL;
1050 goto bail;
1051 }
1052 dd->ipath_lastport_piobuf =
1053 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - val;
1054 }
1055
1056 ipath_kpiobufs = val;
1057 ret = 0;
1058bail:
1059 spin_unlock_irqrestore(&ipath_devs_lock, flags);
1060
1061 return ret;
1062}
diff --git a/drivers/staging/rdma/ipath/ipath_intr.c b/drivers/staging/rdma/ipath/ipath_intr.c
deleted file mode 100644
index 0403fa28ed8d..000000000000
--- a/drivers/staging/rdma/ipath/ipath_intr.c
+++ /dev/null
@@ -1,1271 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/pci.h>
35#include <linux/delay.h>
36
37#include "ipath_kernel.h"
38#include "ipath_verbs.h"
39#include "ipath_common.h"
40
41
42/*
43 * Called when we might have an error that is specific to a particular
44 * PIO buffer, and may need to cancel that buffer, so it can be re-used.
45 */
46void ipath_disarm_senderrbufs(struct ipath_devdata *dd)
47{
48 u32 piobcnt;
49 unsigned long sbuf[4];
50 /*
51 * it's possible that sendbuffererror could have bits set; might
52 * have already done this as a result of hardware error handling
53 */
54 piobcnt = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
55 /* read these before writing errorclear */
56 sbuf[0] = ipath_read_kreg64(
57 dd, dd->ipath_kregs->kr_sendbuffererror);
58 sbuf[1] = ipath_read_kreg64(
59 dd, dd->ipath_kregs->kr_sendbuffererror + 1);
60 if (piobcnt > 128)
61 sbuf[2] = ipath_read_kreg64(
62 dd, dd->ipath_kregs->kr_sendbuffererror + 2);
63 if (piobcnt > 192)
64 sbuf[3] = ipath_read_kreg64(
65 dd, dd->ipath_kregs->kr_sendbuffererror + 3);
66 else
67 sbuf[3] = 0;
68
69 if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {
70 int i;
71 if (ipath_debug & (__IPATH_PKTDBG|__IPATH_DBG) &&
72 time_after(dd->ipath_lastcancel, jiffies)) {
73 __IPATH_DBG_WHICH(__IPATH_PKTDBG|__IPATH_DBG,
74 "SendbufErrs %lx %lx", sbuf[0],
75 sbuf[1]);
76 if (ipath_debug & __IPATH_PKTDBG && piobcnt > 128)
77 printk(" %lx %lx ", sbuf[2], sbuf[3]);
78 printk("\n");
79 }
80
81 for (i = 0; i < piobcnt; i++)
82 if (test_bit(i, sbuf))
83 ipath_disarm_piobufs(dd, i, 1);
84 /* ignore armlaunch errs for a bit */
85 dd->ipath_lastcancel = jiffies+3;
86 }
87}
88
89
90/* These are all rcv-related errors which we want to count for stats */
91#define E_SUM_PKTERRS \
92 (INFINIPATH_E_RHDRLEN | INFINIPATH_E_RBADTID | \
93 INFINIPATH_E_RBADVERSION | INFINIPATH_E_RHDR | \
94 INFINIPATH_E_RLONGPKTLEN | INFINIPATH_E_RSHORTPKTLEN | \
95 INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RMINPKTLEN | \
96 INFINIPATH_E_RFORMATERR | INFINIPATH_E_RUNSUPVL | \
97 INFINIPATH_E_RUNEXPCHAR | INFINIPATH_E_REBP)
98
99/* These are all send-related errors which we want to count for stats */
100#define E_SUM_ERRS \
101 (INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | \
102 INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
103 INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNSUPVL | \
104 INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
105 INFINIPATH_E_INVALIDADDR)
106
107/*
108 * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
109 * errors not related to freeze and cancelling buffers. Can't ignore
110 * armlaunch because could get more while still cleaning up, and need
111 * to cancel those as they happen.
112 */
113#define E_SPKT_ERRS_IGNORE \
114 (INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
115 INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SMINPKTLEN | \
116 INFINIPATH_E_SPKTLEN)
117
118/*
119 * these are errors that can occur when the link changes state while
120 * a packet is being sent or received. This doesn't cover things
121 * like EBP or VCRC that can be the result of a sending having the
122 * link change state, so we receive a "known bad" packet.
123 */
124#define E_SUM_LINK_PKTERRS \
125 (INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
126 INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
127 INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RMINPKTLEN | \
128 INFINIPATH_E_RUNEXPCHAR)
129
130static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
131{
132 u64 ignore_this_time = 0;
133
134 ipath_disarm_senderrbufs(dd);
135 if ((errs & E_SUM_LINK_PKTERRS) &&
136 !(dd->ipath_flags & IPATH_LINKACTIVE)) {
137 /*
138 * This can happen when SMA is trying to bring the link
139 * up, but the IB link changes state at the "wrong" time.
140 * The IB logic then complains that the packet isn't
141 * valid. We don't want to confuse people, so we just
142 * don't print them, except at debug
143 */
144 ipath_dbg("Ignoring packet errors %llx, because link not "
145 "ACTIVE\n", (unsigned long long) errs);
146 ignore_this_time = errs & E_SUM_LINK_PKTERRS;
147 }
148
149 return ignore_this_time;
150}
151
152/* generic hw error messages... */
153#define INFINIPATH_HWE_TXEMEMPARITYERR_MSG(a) \
154 { \
155 .mask = ( INFINIPATH_HWE_TXEMEMPARITYERR_##a << \
156 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT ), \
157 .msg = "TXE " #a " Memory Parity" \
158 }
159#define INFINIPATH_HWE_RXEMEMPARITYERR_MSG(a) \
160 { \
161 .mask = ( INFINIPATH_HWE_RXEMEMPARITYERR_##a << \
162 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT ), \
163 .msg = "RXE " #a " Memory Parity" \
164 }
165
166static const struct ipath_hwerror_msgs ipath_generic_hwerror_msgs[] = {
167 INFINIPATH_HWE_MSG(IBCBUSFRSPCPARITYERR, "IPATH2IB Parity"),
168 INFINIPATH_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2IPATH Parity"),
169
170 INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOBUF),
171 INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOPBC),
172 INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOLAUNCHFIFO),
173
174 INFINIPATH_HWE_RXEMEMPARITYERR_MSG(RCVBUF),
175 INFINIPATH_HWE_RXEMEMPARITYERR_MSG(LOOKUPQ),
176 INFINIPATH_HWE_RXEMEMPARITYERR_MSG(EAGERTID),
177 INFINIPATH_HWE_RXEMEMPARITYERR_MSG(EXPTID),
178 INFINIPATH_HWE_RXEMEMPARITYERR_MSG(FLAGBUF),
179 INFINIPATH_HWE_RXEMEMPARITYERR_MSG(DATAINFO),
180 INFINIPATH_HWE_RXEMEMPARITYERR_MSG(HDRINFO),
181};
182
183/**
184 * ipath_format_hwmsg - format a single hwerror message
185 * @msg message buffer
186 * @msgl length of message buffer
187 * @hwmsg message to add to message buffer
188 */
189static void ipath_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
190{
191 strlcat(msg, "[", msgl);
192 strlcat(msg, hwmsg, msgl);
193 strlcat(msg, "]", msgl);
194}
195
196/**
197 * ipath_format_hwerrors - format hardware error messages for display
198 * @hwerrs hardware errors bit vector
199 * @hwerrmsgs hardware error descriptions
200 * @nhwerrmsgs number of hwerrmsgs
201 * @msg message buffer
202 * @msgl message buffer length
203 */
204void ipath_format_hwerrors(u64 hwerrs,
205 const struct ipath_hwerror_msgs *hwerrmsgs,
206 size_t nhwerrmsgs,
207 char *msg, size_t msgl)
208{
209 int i;
210 const int glen =
211 ARRAY_SIZE(ipath_generic_hwerror_msgs);
212
213 for (i=0; i<glen; i++) {
214 if (hwerrs & ipath_generic_hwerror_msgs[i].mask) {
215 ipath_format_hwmsg(msg, msgl,
216 ipath_generic_hwerror_msgs[i].msg);
217 }
218 }
219
220 for (i=0; i<nhwerrmsgs; i++) {
221 if (hwerrs & hwerrmsgs[i].mask) {
222 ipath_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
223 }
224 }
225}
226
227/* return the strings for the most common link states */
228static char *ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
229{
230 char *ret;
231 u32 state;
232
233 state = ipath_ib_state(dd, ibcs);
234 if (state == dd->ib_init)
235 ret = "Init";
236 else if (state == dd->ib_arm)
237 ret = "Arm";
238 else if (state == dd->ib_active)
239 ret = "Active";
240 else
241 ret = "Down";
242 return ret;
243}
244
245void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev)
246{
247 struct ib_event event;
248
249 event.device = &dd->verbs_dev->ibdev;
250 event.element.port_num = 1;
251 event.event = ev;
252 ib_dispatch_event(&event);
253}
254
255static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
256 ipath_err_t errs)
257{
258 u32 ltstate, lstate, ibstate, lastlstate;
259 u32 init = dd->ib_init;
260 u32 arm = dd->ib_arm;
261 u32 active = dd->ib_active;
262 const u64 ibcs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
263
264 lstate = ipath_ib_linkstate(dd, ibcs); /* linkstate */
265 ibstate = ipath_ib_state(dd, ibcs);
266 /* linkstate at last interrupt */
267 lastlstate = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
268 ltstate = ipath_ib_linktrstate(dd, ibcs); /* linktrainingtate */
269
270 /*
271 * Since going into a recovery state causes the link state to go
272 * down and since recovery is transitory, it is better if we "miss"
273 * ever seeing the link training state go into recovery (i.e.,
274 * ignore this transition for link state special handling purposes)
275 * without even updating ipath_lastibcstat.
276 */
277 if ((ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN) ||
278 (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT) ||
279 (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERIDLE))
280 goto done;
281
282 /*
283 * if linkstate transitions into INIT from any of the various down
284 * states, or if it transitions from any of the up (INIT or better)
285 * states into any of the down states (except link recovery), then
286 * call the chip-specific code to take appropriate actions.
287 */
288 if (lstate >= INFINIPATH_IBCS_L_STATE_INIT &&
289 lastlstate == INFINIPATH_IBCS_L_STATE_DOWN) {
290 /* transitioned to UP */
291 if (dd->ipath_f_ib_updown(dd, 1, ibcs)) {
292 /* link came up, so we must no longer be disabled */
293 dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
294 ipath_cdbg(LINKVERB, "LinkUp handled, skipped\n");
295 goto skip_ibchange; /* chip-code handled */
296 }
297 } else if ((lastlstate >= INFINIPATH_IBCS_L_STATE_INIT ||
298 (dd->ipath_flags & IPATH_IB_FORCE_NOTIFY)) &&
299 ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT &&
300 ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
301 int handled;
302 handled = dd->ipath_f_ib_updown(dd, 0, ibcs);
303 dd->ipath_flags &= ~IPATH_IB_FORCE_NOTIFY;
304 if (handled) {
305 ipath_cdbg(LINKVERB, "LinkDown handled, skipped\n");
306 goto skip_ibchange; /* chip-code handled */
307 }
308 }
309
310 /*
311 * Significant enough to always print and get into logs, if it was
312 * unexpected. If it was a requested state change, we'll have
313 * already cleared the flags, so we won't print this warning
314 */
315 if ((ibstate != arm && ibstate != active) &&
316 (dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) {
317 dev_info(&dd->pcidev->dev, "Link state changed from %s "
318 "to %s\n", (dd->ipath_flags & IPATH_LINKARMED) ?
319 "ARM" : "ACTIVE", ib_linkstate(dd, ibcs));
320 }
321
322 if (ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
323 ltstate == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
324 u32 lastlts;
325 lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
326 /*
327 * Ignore cycling back and forth from Polling.Active to
328 * Polling.Quiet while waiting for the other end of the link
329 * to come up, except to try and decide if we are connected
330 * to a live IB device or not. We will cycle back and
331 * forth between them if no cable is plugged in, the other
332 * device is powered off or disabled, etc.
333 */
334 if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
335 lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
336 if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
337 (++dd->ipath_ibpollcnt == 40)) {
338 dd->ipath_flags |= IPATH_NOCABLE;
339 *dd->ipath_statusp |=
340 IPATH_STATUS_IB_NOCABLE;
341 ipath_cdbg(LINKVERB, "Set NOCABLE\n");
342 }
343 ipath_cdbg(LINKVERB, "POLL change to %s (%x)\n",
344 ipath_ibcstatus_str[ltstate], ibstate);
345 goto skip_ibchange;
346 }
347 }
348
349 dd->ipath_ibpollcnt = 0; /* not poll*, now */
350 ipath_stats.sps_iblink++;
351
352 if (ibstate != init && dd->ipath_lastlinkrecov && ipath_linkrecovery) {
353 u64 linkrecov;
354 linkrecov = ipath_snap_cntr(dd,
355 dd->ipath_cregs->cr_iblinkerrrecovcnt);
356 if (linkrecov != dd->ipath_lastlinkrecov) {
357 ipath_dbg("IB linkrecov up %Lx (%s %s) recov %Lu\n",
358 (unsigned long long) ibcs,
359 ib_linkstate(dd, ibcs),
360 ipath_ibcstatus_str[ltstate],
361 (unsigned long long) linkrecov);
362 /* and no more until active again */
363 dd->ipath_lastlinkrecov = 0;
364 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
365 goto skip_ibchange;
366 }
367 }
368
369 if (ibstate == init || ibstate == arm || ibstate == active) {
370 *dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE;
371 if (ibstate == init || ibstate == arm) {
372 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
373 if (dd->ipath_flags & IPATH_LINKACTIVE)
374 signal_ib_event(dd, IB_EVENT_PORT_ERR);
375 }
376 if (ibstate == arm) {
377 dd->ipath_flags |= IPATH_LINKARMED;
378 dd->ipath_flags &= ~(IPATH_LINKUNK |
379 IPATH_LINKINIT | IPATH_LINKDOWN |
380 IPATH_LINKACTIVE | IPATH_NOCABLE);
381 ipath_hol_down(dd);
382 } else if (ibstate == init) {
383 /*
384 * set INIT and DOWN. Down is checked by
385 * most of the other code, but INIT is
386 * useful to know in a few places.
387 */
388 dd->ipath_flags |= IPATH_LINKINIT |
389 IPATH_LINKDOWN;
390 dd->ipath_flags &= ~(IPATH_LINKUNK |
391 IPATH_LINKARMED | IPATH_LINKACTIVE |
392 IPATH_NOCABLE);
393 ipath_hol_down(dd);
394 } else { /* active */
395 dd->ipath_lastlinkrecov = ipath_snap_cntr(dd,
396 dd->ipath_cregs->cr_iblinkerrrecovcnt);
397 *dd->ipath_statusp |=
398 IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;
399 dd->ipath_flags |= IPATH_LINKACTIVE;
400 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
401 | IPATH_LINKDOWN | IPATH_LINKARMED |
402 IPATH_NOCABLE);
403 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
404 ipath_restart_sdma(dd);
405 signal_ib_event(dd, IB_EVENT_PORT_ACTIVE);
406 /* LED active not handled in chip _f_updown */
407 dd->ipath_f_setextled(dd, lstate, ltstate);
408 ipath_hol_up(dd);
409 }
410
411 /*
412 * print after we've already done the work, so as not to
413 * delay the state changes and notifications, for debugging
414 */
415 if (lstate == lastlstate)
416 ipath_cdbg(LINKVERB, "Unchanged from last: %s "
417 "(%x)\n", ib_linkstate(dd, ibcs), ibstate);
418 else
419 ipath_cdbg(VERBOSE, "Unit %u: link up to %s %s (%x)\n",
420 dd->ipath_unit, ib_linkstate(dd, ibcs),
421 ipath_ibcstatus_str[ltstate], ibstate);
422 } else { /* down */
423 if (dd->ipath_flags & IPATH_LINKACTIVE)
424 signal_ib_event(dd, IB_EVENT_PORT_ERR);
425 dd->ipath_flags |= IPATH_LINKDOWN;
426 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
427 | IPATH_LINKACTIVE |
428 IPATH_LINKARMED);
429 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
430 dd->ipath_lli_counter = 0;
431
432 if (lastlstate != INFINIPATH_IBCS_L_STATE_DOWN)
433 ipath_cdbg(VERBOSE, "Unit %u link state down "
434 "(state 0x%x), from %s\n",
435 dd->ipath_unit, lstate,
436 ib_linkstate(dd, dd->ipath_lastibcstat));
437 else
438 ipath_cdbg(LINKVERB, "Unit %u link state changed "
439 "to %s (0x%x) from down (%x)\n",
440 dd->ipath_unit,
441 ipath_ibcstatus_str[ltstate],
442 ibstate, lastlstate);
443 }
444
445skip_ibchange:
446 dd->ipath_lastibcstat = ibcs;
447done:
448 return;
449}
450
451static void handle_supp_msgs(struct ipath_devdata *dd,
452 unsigned supp_msgs, char *msg, u32 msgsz)
453{
454 /*
455 * Print the message unless it's ibc status change only, which
456 * happens so often we never want to count it.
457 */
458 if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
459 int iserr;
460 ipath_err_t mask;
461 iserr = ipath_decode_err(dd, msg, msgsz,
462 dd->ipath_lasterror &
463 ~INFINIPATH_E_IBSTATUSCHANGED);
464
465 mask = INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
466 INFINIPATH_E_PKTERRS | INFINIPATH_E_SDMADISABLED;
467
468 /* if we're in debug, then don't mask SDMADISABLED msgs */
469 if (ipath_debug & __IPATH_DBG)
470 mask &= ~INFINIPATH_E_SDMADISABLED;
471
472 if (dd->ipath_lasterror & ~mask)
473 ipath_dev_err(dd, "Suppressed %u messages for "
474 "fast-repeating errors (%s) (%llx)\n",
475 supp_msgs, msg,
476 (unsigned long long)
477 dd->ipath_lasterror);
478 else {
479 /*
480 * rcvegrfull and rcvhdrqfull are "normal", for some
481 * types of processes (mostly benchmarks) that send
482 * huge numbers of messages, while not processing
483 * them. So only complain about these at debug
484 * level.
485 */
486 if (iserr)
487 ipath_dbg("Suppressed %u messages for %s\n",
488 supp_msgs, msg);
489 else
490 ipath_cdbg(ERRPKT,
491 "Suppressed %u messages for %s\n",
492 supp_msgs, msg);
493 }
494 }
495}
496
497static unsigned handle_frequent_errors(struct ipath_devdata *dd,
498 ipath_err_t errs, char *msg,
499 u32 msgsz, int *noprint)
500{
501 unsigned long nc;
502 static unsigned long nextmsg_time;
503 static unsigned nmsgs, supp_msgs;
504
505 /*
506 * Throttle back "fast" messages to no more than 10 per 5 seconds.
507 * This isn't perfect, but it's a reasonable heuristic. If we get
508 * more than 10, give a 6x longer delay.
509 */
510 nc = jiffies;
511 if (nmsgs > 10) {
512 if (time_before(nc, nextmsg_time)) {
513 *noprint = 1;
514 if (!supp_msgs++)
515 nextmsg_time = nc + HZ * 3;
516 } else if (supp_msgs) {
517 handle_supp_msgs(dd, supp_msgs, msg, msgsz);
518 supp_msgs = 0;
519 nmsgs = 0;
520 }
521 } else if (!nmsgs++ || time_after(nc, nextmsg_time)) {
522 nextmsg_time = nc + HZ / 2;
523 }
524
525 return supp_msgs;
526}
527
528static void handle_sdma_errors(struct ipath_devdata *dd, ipath_err_t errs)
529{
530 unsigned long flags;
531 int expected;
532
533 if (ipath_debug & __IPATH_DBG) {
534 char msg[128];
535 ipath_decode_err(dd, msg, sizeof msg, errs &
536 INFINIPATH_E_SDMAERRS);
537 ipath_dbg("errors %lx (%s)\n", (unsigned long)errs, msg);
538 }
539 if (ipath_debug & __IPATH_VERBDBG) {
540 unsigned long tl, hd, status, lengen;
541 tl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
542 hd = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
543 status = ipath_read_kreg64(dd
544 , dd->ipath_kregs->kr_senddmastatus);
545 lengen = ipath_read_kreg64(dd,
546 dd->ipath_kregs->kr_senddmalengen);
547 ipath_cdbg(VERBOSE, "sdma tl 0x%lx hd 0x%lx status 0x%lx "
548 "lengen 0x%lx\n", tl, hd, status, lengen);
549 }
550
551 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
552 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
553 expected = test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
554 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
555 if (!expected)
556 ipath_cancel_sends(dd, 1);
557}
558
559static void handle_sdma_intr(struct ipath_devdata *dd, u64 istat)
560{
561 unsigned long flags;
562 int expected;
563
564 if ((istat & INFINIPATH_I_SDMAINT) &&
565 !test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
566 ipath_sdma_intr(dd);
567
568 if (istat & INFINIPATH_I_SDMADISABLED) {
569 expected = test_bit(IPATH_SDMA_ABORTING,
570 &dd->ipath_sdma_status);
571 ipath_dbg("%s SDmaDisabled intr\n",
572 expected ? "expected" : "unexpected");
573 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
574 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
575 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
576 if (!expected)
577 ipath_cancel_sends(dd, 1);
578 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
579 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
580 }
581}
582
583static int handle_hdrq_full(struct ipath_devdata *dd)
584{
585 int chkerrpkts = 0;
586 u32 hd, tl;
587 u32 i;
588
589 ipath_stats.sps_hdrqfull++;
590 for (i = 0; i < dd->ipath_cfgports; i++) {
591 struct ipath_portdata *pd = dd->ipath_pd[i];
592
593 if (i == 0) {
594 /*
595 * For kernel receive queues, we just want to know
596 * if there are packets in the queue that we can
597 * process.
598 */
599 if (pd->port_head != ipath_get_hdrqtail(pd))
600 chkerrpkts |= 1 << i;
601 continue;
602 }
603
604 /* Skip if user context is not open */
605 if (!pd || !pd->port_cnt)
606 continue;
607
608 /* Don't report the same point multiple times. */
609 if (dd->ipath_flags & IPATH_NODMA_RTAIL)
610 tl = ipath_read_ureg32(dd, ur_rcvhdrtail, i);
611 else
612 tl = ipath_get_rcvhdrtail(pd);
613 if (tl == pd->port_lastrcvhdrqtail)
614 continue;
615
616 hd = ipath_read_ureg32(dd, ur_rcvhdrhead, i);
617 if (hd == (tl + 1) || (!hd && tl == dd->ipath_hdrqlast)) {
618 pd->port_lastrcvhdrqtail = tl;
619 pd->port_hdrqfull++;
620 /* flush hdrqfull so that poll() sees it */
621 wmb();
622 wake_up_interruptible(&pd->port_wait);
623 }
624 }
625
626 return chkerrpkts;
627}
628
629static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
630{
631 char msg[128];
632 u64 ignore_this_time = 0;
633 u64 iserr = 0;
634 int chkerrpkts = 0, noprint = 0;
635 unsigned supp_msgs;
636 int log_idx;
637
638 /*
639 * don't report errors that are masked, either at init
640 * (not set in ipath_errormask), or temporarily (set in
641 * ipath_maskederrs)
642 */
643 errs &= dd->ipath_errormask & ~dd->ipath_maskederrs;
644
645 supp_msgs = handle_frequent_errors(dd, errs, msg, (u32)sizeof msg,
646 &noprint);
647
648 /* do these first, they are most important */
649 if (errs & INFINIPATH_E_HARDWARE) {
650 /* reuse same msg buf */
651 dd->ipath_f_handle_hwerrors(dd, msg, sizeof msg);
652 } else {
653 u64 mask;
654 for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx) {
655 mask = dd->ipath_eep_st_masks[log_idx].errs_to_log;
656 if (errs & mask)
657 ipath_inc_eeprom_err(dd, log_idx, 1);
658 }
659 }
660
661 if (errs & INFINIPATH_E_SDMAERRS)
662 handle_sdma_errors(dd, errs);
663
664 if (!noprint && (errs & ~dd->ipath_e_bitsextant))
665 ipath_dev_err(dd, "error interrupt with unknown errors "
666 "%llx set\n", (unsigned long long)
667 (errs & ~dd->ipath_e_bitsextant));
668
669 if (errs & E_SUM_ERRS)
670 ignore_this_time = handle_e_sum_errs(dd, errs);
671 else if ((errs & E_SUM_LINK_PKTERRS) &&
672 !(dd->ipath_flags & IPATH_LINKACTIVE)) {
673 /*
674 * This can happen when SMA is trying to bring the link
675 * up, but the IB link changes state at the "wrong" time.
676 * The IB logic then complains that the packet isn't
677 * valid. We don't want to confuse people, so we just
678 * don't print them, except at debug
679 */
680 ipath_dbg("Ignoring packet errors %llx, because link not "
681 "ACTIVE\n", (unsigned long long) errs);
682 ignore_this_time = errs & E_SUM_LINK_PKTERRS;
683 }
684
685 if (supp_msgs == 250000) {
686 int s_iserr;
687 /*
688 * It's not entirely reasonable assuming that the errors set
689 * in the last clear period are all responsible for the
690 * problem, but the alternative is to assume it's the only
691 * ones on this particular interrupt, which also isn't great
692 */
693 dd->ipath_maskederrs |= dd->ipath_lasterror | errs;
694
695 dd->ipath_errormask &= ~dd->ipath_maskederrs;
696 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
697 dd->ipath_errormask);
698 s_iserr = ipath_decode_err(dd, msg, sizeof msg,
699 dd->ipath_maskederrs);
700
701 if (dd->ipath_maskederrs &
702 ~(INFINIPATH_E_RRCVEGRFULL |
703 INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
704 ipath_dev_err(dd, "Temporarily disabling "
705 "error(s) %llx reporting; too frequent (%s)\n",
706 (unsigned long long) dd->ipath_maskederrs,
707 msg);
708 else {
709 /*
710 * rcvegrfull and rcvhdrqfull are "normal",
711 * for some types of processes (mostly benchmarks)
712 * that send huge numbers of messages, while not
713 * processing them. So only complain about
714 * these at debug level.
715 */
716 if (s_iserr)
717 ipath_dbg("Temporarily disabling reporting "
718 "too frequent queue full errors (%s)\n",
719 msg);
720 else
721 ipath_cdbg(ERRPKT,
722 "Temporarily disabling reporting too"
723 " frequent packet errors (%s)\n",
724 msg);
725 }
726
727 /*
728 * Re-enable the masked errors after around 3 minutes. in
729 * ipath_get_faststats(). If we have a series of fast
730 * repeating but different errors, the interval will keep
731 * stretching out, but that's OK, as that's pretty
732 * catastrophic.
733 */
734 dd->ipath_unmasktime = jiffies + HZ * 180;
735 }
736
737 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, errs);
738 if (ignore_this_time)
739 errs &= ~ignore_this_time;
740 if (errs & ~dd->ipath_lasterror) {
741 errs &= ~dd->ipath_lasterror;
742 /* never suppress duplicate hwerrors or ibstatuschange */
743 dd->ipath_lasterror |= errs &
744 ~(INFINIPATH_E_HARDWARE |
745 INFINIPATH_E_IBSTATUSCHANGED);
746 }
747
748 if (errs & INFINIPATH_E_SENDSPECIALTRIGGER) {
749 dd->ipath_spectriggerhit++;
750 ipath_dbg("%lu special trigger hits\n",
751 dd->ipath_spectriggerhit);
752 }
753
754 /* likely due to cancel; so suppress message unless verbose */
755 if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) &&
756 time_after(dd->ipath_lastcancel, jiffies)) {
757 /* armlaunch takes precedence; it often causes both. */
758 ipath_cdbg(VERBOSE,
759 "Suppressed %s error (%llx) after sendbuf cancel\n",
760 (errs & INFINIPATH_E_SPIOARMLAUNCH) ?
761 "armlaunch" : "sendpktlen", (unsigned long long)errs);
762 errs &= ~(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SPKTLEN);
763 }
764
765 if (!errs)
766 return 0;
767
768 if (!noprint) {
769 ipath_err_t mask;
770 /*
771 * The ones we mask off are handled specially below
772 * or above. Also mask SDMADISABLED by default as it
773 * is too chatty.
774 */
775 mask = INFINIPATH_E_IBSTATUSCHANGED |
776 INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
777 INFINIPATH_E_HARDWARE | INFINIPATH_E_SDMADISABLED;
778
779 /* if we're in debug, then don't mask SDMADISABLED msgs */
780 if (ipath_debug & __IPATH_DBG)
781 mask &= ~INFINIPATH_E_SDMADISABLED;
782
783 ipath_decode_err(dd, msg, sizeof msg, errs & ~mask);
784 } else
785 /* so we don't need if (!noprint) at strlcat's below */
786 *msg = 0;
787
788 if (errs & E_SUM_PKTERRS) {
789 ipath_stats.sps_pkterrs++;
790 chkerrpkts = 1;
791 }
792 if (errs & E_SUM_ERRS)
793 ipath_stats.sps_errs++;
794
795 if (errs & (INFINIPATH_E_RICRC | INFINIPATH_E_RVCRC)) {
796 ipath_stats.sps_crcerrs++;
797 chkerrpkts = 1;
798 }
799 iserr = errs & ~(E_SUM_PKTERRS | INFINIPATH_E_PKTERRS);
800
801
802 /*
803 * We don't want to print these two as they happen, or we can make
804 * the situation even worse, because it takes so long to print
805 * messages to serial consoles. Kernel ports get printed from
806 * fast_stats, no more than every 5 seconds, user ports get printed
807 * on close
808 */
809 if (errs & INFINIPATH_E_RRCVHDRFULL)
810 chkerrpkts |= handle_hdrq_full(dd);
811 if (errs & INFINIPATH_E_RRCVEGRFULL) {
812 struct ipath_portdata *pd = dd->ipath_pd[0];
813
814 /*
815 * since this is of less importance and not likely to
816 * happen without also getting hdrfull, only count
817 * occurrences; don't check each port (or even the kernel
818 * vs user)
819 */
820 ipath_stats.sps_etidfull++;
821 if (pd->port_head != ipath_get_hdrqtail(pd))
822 chkerrpkts |= 1;
823 }
824
825 /*
826 * do this before IBSTATUSCHANGED, in case both bits set in a single
827 * interrupt; we want the STATUSCHANGE to "win", so we do our
828 * internal copy of state machine correctly
829 */
830 if (errs & INFINIPATH_E_RIBLOSTLINK) {
831 /*
832 * force through block below
833 */
834 errs |= INFINIPATH_E_IBSTATUSCHANGED;
835 ipath_stats.sps_iblink++;
836 dd->ipath_flags |= IPATH_LINKDOWN;
837 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
838 | IPATH_LINKARMED | IPATH_LINKACTIVE);
839 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
840
841 ipath_dbg("Lost link, link now down (%s)\n",
842 ipath_ibcstatus_str[ipath_read_kreg64(dd,
843 dd->ipath_kregs->kr_ibcstatus) & 0xf]);
844 }
845 if (errs & INFINIPATH_E_IBSTATUSCHANGED)
846 handle_e_ibstatuschanged(dd, errs);
847
848 if (errs & INFINIPATH_E_RESET) {
849 if (!noprint)
850 ipath_dev_err(dd, "Got reset, requires re-init "
851 "(unload and reload driver)\n");
852 dd->ipath_flags &= ~IPATH_INITTED; /* needs re-init */
853 /* mark as having had error */
854 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
855 *dd->ipath_statusp &= ~IPATH_STATUS_IB_CONF;
856 }
857
858 if (!noprint && *msg) {
859 if (iserr)
860 ipath_dev_err(dd, "%s error\n", msg);
861 }
862 if (dd->ipath_state_wanted & dd->ipath_flags) {
863 ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "
864 "waking\n", dd->ipath_state_wanted,
865 dd->ipath_flags);
866 wake_up_interruptible(&ipath_state_wait);
867 }
868
869 return chkerrpkts;
870}
871
872/*
873 * try to cleanup as much as possible for anything that might have gone
874 * wrong while in freeze mode, such as pio buffers being written by user
875 * processes (causing armlaunch), send errors due to going into freeze mode,
876 * etc., and try to avoid causing extra interrupts while doing so.
877 * Forcibly update the in-memory pioavail register copies after cleanup
878 * because the chip won't do it while in freeze mode (the register values
879 * themselves are kept correct).
880 * Make sure that we don't lose any important interrupts by using the chip
881 * feature that says that writing 0 to a bit in *clear that is set in
882 * *status will cause an interrupt to be generated again (if allowed by
883 * the *mask value).
884 */
885void ipath_clear_freeze(struct ipath_devdata *dd)
886{
887 /* disable error interrupts, to avoid confusion */
888 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
889
890 /* also disable interrupts; errormask is sometimes overwriten */
891 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
892
893 ipath_cancel_sends(dd, 1);
894
895 /* clear the freeze, and be sure chip saw it */
896 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
897 dd->ipath_control);
898 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
899
900 /* force in-memory update now we are out of freeze */
901 ipath_force_pio_avail_update(dd);
902
903 /*
904 * force new interrupt if any hwerr, error or interrupt bits are
905 * still set, and clear "safe" send packet errors related to freeze
906 * and cancelling sends. Re-enable error interrupts before possible
907 * force of re-interrupt on pending interrupts.
908 */
909 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
910 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
911 E_SPKT_ERRS_IGNORE);
912 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
913 dd->ipath_errormask);
914 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, -1LL);
915 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
916}
917
918
919/* this is separate to allow for better optimization of ipath_intr() */
920
921static noinline void ipath_bad_intr(struct ipath_devdata *dd, u32 *unexpectp)
922{
923 /*
924 * sometimes happen during driver init and unload, don't want
925 * to process any interrupts at that point
926 */
927
928 /* this is just a bandaid, not a fix, if something goes badly
929 * wrong */
930 if (++*unexpectp > 100) {
931 if (++*unexpectp > 105) {
932 /*
933 * ok, we must be taking somebody else's interrupts,
934 * due to a messed up mptable and/or PIRQ table, so
935 * unregister the interrupt. We've seen this during
936 * linuxbios development work, and it may happen in
937 * the future again.
938 */
939 if (dd->pcidev && dd->ipath_irq) {
940 ipath_dev_err(dd, "Now %u unexpected "
941 "interrupts, unregistering "
942 "interrupt handler\n",
943 *unexpectp);
944 ipath_dbg("free_irq of irq %d\n",
945 dd->ipath_irq);
946 dd->ipath_f_free_irq(dd);
947 }
948 }
949 if (ipath_read_ireg(dd, dd->ipath_kregs->kr_intmask)) {
950 ipath_dev_err(dd, "%u unexpected interrupts, "
951 "disabling interrupts completely\n",
952 *unexpectp);
953 /*
954 * disable all interrupts, something is very wrong
955 */
956 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,
957 0ULL);
958 }
959 } else if (*unexpectp > 1)
960 ipath_dbg("Interrupt when not ready, should not happen, "
961 "ignoring\n");
962}
963
964static noinline void ipath_bad_regread(struct ipath_devdata *dd)
965{
966 static int allbits;
967
968 /* separate routine, for better optimization of ipath_intr() */
969
970 /*
971 * We print the message and disable interrupts, in hope of
972 * having a better chance of debugging the problem.
973 */
974 ipath_dev_err(dd,
975 "Read of interrupt status failed (all bits set)\n");
976 if (allbits++) {
977 /* disable all interrupts, something is very wrong */
978 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
979 if (allbits == 2) {
980 ipath_dev_err(dd, "Still bad interrupt status, "
981 "unregistering interrupt\n");
982 dd->ipath_f_free_irq(dd);
983 } else if (allbits > 2) {
984 if ((allbits % 10000) == 0)
985 printk(".");
986 } else
987 ipath_dev_err(dd, "Disabling interrupts, "
988 "multiple errors\n");
989 }
990}
991
992static void handle_layer_pioavail(struct ipath_devdata *dd)
993{
994 unsigned long flags;
995 int ret;
996
997 ret = ipath_ib_piobufavail(dd->verbs_dev);
998 if (ret > 0)
999 goto set;
1000
1001 return;
1002set:
1003 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1004 dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
1005 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1006 dd->ipath_sendctrl);
1007 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1008 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1009}
1010
1011/*
1012 * Handle receive interrupts for user ports; this means a user
1013 * process was waiting for a packet to arrive, and didn't want
1014 * to poll
1015 */
1016static void handle_urcv(struct ipath_devdata *dd, u64 istat)
1017{
1018 u64 portr;
1019 int i;
1020 int rcvdint = 0;
1021
1022 /*
1023 * test_and_clear_bit(IPATH_PORT_WAITING_RCV) and
1024 * test_and_clear_bit(IPATH_PORT_WAITING_URG) below
1025 * would both like timely updates of the bits so that
1026 * we don't pass them by unnecessarily. the rmb()
1027 * here ensures that we see them promptly -- the
1028 * corresponding wmb()'s are in ipath_poll_urgent()
1029 * and ipath_poll_next()...
1030 */
1031 rmb();
1032 portr = ((istat >> dd->ipath_i_rcvavail_shift) &
1033 dd->ipath_i_rcvavail_mask) |
1034 ((istat >> dd->ipath_i_rcvurg_shift) &
1035 dd->ipath_i_rcvurg_mask);
1036 for (i = 1; i < dd->ipath_cfgports; i++) {
1037 struct ipath_portdata *pd = dd->ipath_pd[i];
1038
1039 if (portr & (1 << i) && pd && pd->port_cnt) {
1040 if (test_and_clear_bit(IPATH_PORT_WAITING_RCV,
1041 &pd->port_flag)) {
1042 clear_bit(i + dd->ipath_r_intravail_shift,
1043 &dd->ipath_rcvctrl);
1044 wake_up_interruptible(&pd->port_wait);
1045 rcvdint = 1;
1046 } else if (test_and_clear_bit(IPATH_PORT_WAITING_URG,
1047 &pd->port_flag)) {
1048 pd->port_urgent++;
1049 wake_up_interruptible(&pd->port_wait);
1050 }
1051 }
1052 }
1053 if (rcvdint) {
1054 /* only want to take one interrupt, so turn off the rcv
1055 * interrupt for all the ports that we set the rcv_waiting
1056 * (but never for kernel port)
1057 */
1058 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1059 dd->ipath_rcvctrl);
1060 }
1061}
1062
1063irqreturn_t ipath_intr(int irq, void *data)
1064{
1065 struct ipath_devdata *dd = data;
1066 u64 istat, chk0rcv = 0;
1067 ipath_err_t estat = 0;
1068 irqreturn_t ret;
1069 static unsigned unexpected = 0;
1070 u64 kportrbits;
1071
1072 ipath_stats.sps_ints++;
1073
1074 if (dd->ipath_int_counter != (u32) -1)
1075 dd->ipath_int_counter++;
1076
1077 if (!(dd->ipath_flags & IPATH_PRESENT)) {
1078 /*
1079 * This return value is not great, but we do not want the
1080 * interrupt core code to remove our interrupt handler
1081 * because we don't appear to be handling an interrupt
1082 * during a chip reset.
1083 */
1084 return IRQ_HANDLED;
1085 }
1086
1087 /*
1088 * this needs to be flags&initted, not statusp, so we keep
1089 * taking interrupts even after link goes down, etc.
1090 * Also, we *must* clear the interrupt at some point, or we won't
1091 * take it again, which can be real bad for errors, etc...
1092 */
1093
1094 if (!(dd->ipath_flags & IPATH_INITTED)) {
1095 ipath_bad_intr(dd, &unexpected);
1096 ret = IRQ_NONE;
1097 goto bail;
1098 }
1099
1100 istat = ipath_read_ireg(dd, dd->ipath_kregs->kr_intstatus);
1101
1102 if (unlikely(!istat)) {
1103 ipath_stats.sps_nullintr++;
1104 ret = IRQ_NONE; /* not our interrupt, or already handled */
1105 goto bail;
1106 }
1107 if (unlikely(istat == -1)) {
1108 ipath_bad_regread(dd);
1109 /* don't know if it was our interrupt or not */
1110 ret = IRQ_NONE;
1111 goto bail;
1112 }
1113
1114 if (unexpected)
1115 unexpected = 0;
1116
1117 if (unlikely(istat & ~dd->ipath_i_bitsextant))
1118 ipath_dev_err(dd,
1119 "interrupt with unknown interrupts %Lx set\n",
1120 (unsigned long long)
1121 istat & ~dd->ipath_i_bitsextant);
1122 else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */
1123 ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n",
1124 (unsigned long long) istat);
1125
1126 if (istat & INFINIPATH_I_ERROR) {
1127 ipath_stats.sps_errints++;
1128 estat = ipath_read_kreg64(dd,
1129 dd->ipath_kregs->kr_errorstatus);
1130 if (!estat)
1131 dev_info(&dd->pcidev->dev, "error interrupt (%Lx), "
1132 "but no error bits set!\n",
1133 (unsigned long long) istat);
1134 else if (estat == -1LL)
1135 /*
1136 * should we try clearing all, or hope next read
1137 * works?
1138 */
1139 ipath_dev_err(dd, "Read of error status failed "
1140 "(all bits set); ignoring\n");
1141 else
1142 chk0rcv |= handle_errors(dd, estat);
1143 }
1144
1145 if (istat & INFINIPATH_I_GPIO) {
1146 /*
1147 * GPIO interrupts fall in two broad classes:
1148 * GPIO_2 indicates (on some HT4xx boards) that a packet
1149 * has arrived for Port 0. Checking for this
1150 * is controlled by flag IPATH_GPIO_INTR.
1151 * GPIO_3..5 on IBA6120 Rev2 and IBA6110 Rev4 chips indicate
1152 * errors that we need to count. Checking for this
1153 * is controlled by flag IPATH_GPIO_ERRINTRS.
1154 */
1155 u32 gpiostatus;
1156 u32 to_clear = 0;
1157
1158 gpiostatus = ipath_read_kreg32(
1159 dd, dd->ipath_kregs->kr_gpio_status);
1160 /* First the error-counter case. */
1161 if ((gpiostatus & IPATH_GPIO_ERRINTR_MASK) &&
1162 (dd->ipath_flags & IPATH_GPIO_ERRINTRS)) {
1163 /* want to clear the bits we see asserted. */
1164 to_clear |= (gpiostatus & IPATH_GPIO_ERRINTR_MASK);
1165
1166 /*
1167 * Count appropriately, clear bits out of our copy,
1168 * as they have been "handled".
1169 */
1170 if (gpiostatus & (1 << IPATH_GPIO_RXUVL_BIT)) {
1171 ipath_dbg("FlowCtl on UnsupVL\n");
1172 dd->ipath_rxfc_unsupvl_errs++;
1173 }
1174 if (gpiostatus & (1 << IPATH_GPIO_OVRUN_BIT)) {
1175 ipath_dbg("Overrun Threshold exceeded\n");
1176 dd->ipath_overrun_thresh_errs++;
1177 }
1178 if (gpiostatus & (1 << IPATH_GPIO_LLI_BIT)) {
1179 ipath_dbg("Local Link Integrity error\n");
1180 dd->ipath_lli_errs++;
1181 }
1182 gpiostatus &= ~IPATH_GPIO_ERRINTR_MASK;
1183 }
1184 /* Now the Port0 Receive case */
1185 if ((gpiostatus & (1 << IPATH_GPIO_PORT0_BIT)) &&
1186 (dd->ipath_flags & IPATH_GPIO_INTR)) {
1187 /*
1188 * GPIO status bit 2 is set, and we expected it.
1189 * clear it and indicate in p0bits.
1190 * This probably only happens if a Port0 pkt
1191 * arrives at _just_ the wrong time, and we
1192 * handle that by seting chk0rcv;
1193 */
1194 to_clear |= (1 << IPATH_GPIO_PORT0_BIT);
1195 gpiostatus &= ~(1 << IPATH_GPIO_PORT0_BIT);
1196 chk0rcv = 1;
1197 }
1198 if (gpiostatus) {
1199 /*
1200 * Some unexpected bits remain. If they could have
1201 * caused the interrupt, complain and clear.
1202 * To avoid repetition of this condition, also clear
1203 * the mask. It is almost certainly due to error.
1204 */
1205 const u32 mask = (u32) dd->ipath_gpio_mask;
1206
1207 if (mask & gpiostatus) {
1208 ipath_dbg("Unexpected GPIO IRQ bits %x\n",
1209 gpiostatus & mask);
1210 to_clear |= (gpiostatus & mask);
1211 dd->ipath_gpio_mask &= ~(gpiostatus & mask);
1212 ipath_write_kreg(dd,
1213 dd->ipath_kregs->kr_gpio_mask,
1214 dd->ipath_gpio_mask);
1215 }
1216 }
1217 if (to_clear) {
1218 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
1219 (u64) to_clear);
1220 }
1221 }
1222
1223 /*
1224 * Clear the interrupt bits we found set, unless they are receive
1225 * related, in which case we already cleared them above, and don't
1226 * want to clear them again, because we might lose an interrupt.
1227 * Clear it early, so we "know" know the chip will have seen this by
1228 * the time we process the queue, and will re-interrupt if necessary.
1229 * The processor itself won't take the interrupt again until we return.
1230 */
1231 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
1232
1233 /*
1234 * Handle kernel receive queues before checking for pio buffers
1235 * available since receives can overflow; piobuf waiters can afford
1236 * a few extra cycles, since they were waiting anyway, and user's
1237 * waiting for receive are at the bottom.
1238 */
1239 kportrbits = (1ULL << dd->ipath_i_rcvavail_shift) |
1240 (1ULL << dd->ipath_i_rcvurg_shift);
1241 if (chk0rcv || (istat & kportrbits)) {
1242 istat &= ~kportrbits;
1243 ipath_kreceive(dd->ipath_pd[0]);
1244 }
1245
1246 if (istat & ((dd->ipath_i_rcvavail_mask << dd->ipath_i_rcvavail_shift) |
1247 (dd->ipath_i_rcvurg_mask << dd->ipath_i_rcvurg_shift)))
1248 handle_urcv(dd, istat);
1249
1250 if (istat & (INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED))
1251 handle_sdma_intr(dd, istat);
1252
1253 if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
1254 unsigned long flags;
1255
1256 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1257 dd->ipath_sendctrl &= ~INFINIPATH_S_PIOINTBUFAVAIL;
1258 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1259 dd->ipath_sendctrl);
1260 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1261 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1262
1263 /* always process; sdma verbs uses PIO for acks and VL15 */
1264 handle_layer_pioavail(dd);
1265 }
1266
1267 ret = IRQ_HANDLED;
1268
1269bail:
1270 return ret;
1271}
diff --git a/drivers/staging/rdma/ipath/ipath_kernel.h b/drivers/staging/rdma/ipath/ipath_kernel.h
deleted file mode 100644
index 66c934a5f839..000000000000
--- a/drivers/staging/rdma/ipath/ipath_kernel.h
+++ /dev/null
@@ -1,1374 +0,0 @@
1#ifndef _IPATH_KERNEL_H
2#define _IPATH_KERNEL_H
3/*
4 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
5 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36/*
37 * This header file is the base header file for infinipath kernel code
38 * ipath_user.h serves a similar purpose for user code.
39 */
40
41#include <linux/interrupt.h>
42#include <linux/pci.h>
43#include <linux/dma-mapping.h>
44#include <linux/mutex.h>
45#include <linux/list.h>
46#include <linux/scatterlist.h>
47#include <linux/sched.h>
48#include <asm/io.h>
49#include <rdma/ib_verbs.h>
50
51#include "ipath_common.h"
52#include "ipath_debug.h"
53#include "ipath_registers.h"
54
55/* only s/w major version of InfiniPath we can handle */
56#define IPATH_CHIP_VERS_MAJ 2U
57
58/* don't care about this except printing */
59#define IPATH_CHIP_VERS_MIN 0U
60
61/* temporary, maybe always */
62extern struct infinipath_stats ipath_stats;
63
64#define IPATH_CHIP_SWVERSION IPATH_CHIP_VERS_MAJ
65/*
66 * First-cut critierion for "device is active" is
67 * two thousand dwords combined Tx, Rx traffic per
68 * 5-second interval. SMA packets are 64 dwords,
69 * and occur "a few per second", presumably each way.
70 */
71#define IPATH_TRAFFIC_ACTIVE_THRESHOLD (2000)
72/*
73 * Struct used to indicate which errors are logged in each of the
74 * error-counters that are logged to EEPROM. A counter is incremented
75 * _once_ (saturating at 255) for each event with any bits set in
76 * the error or hwerror register masks below.
77 */
78#define IPATH_EEP_LOG_CNT (4)
79struct ipath_eep_log_mask {
80 u64 errs_to_log;
81 u64 hwerrs_to_log;
82};
83
84struct ipath_portdata {
85 void **port_rcvegrbuf;
86 dma_addr_t *port_rcvegrbuf_phys;
87 /* rcvhdrq base, needs mmap before useful */
88 void *port_rcvhdrq;
89 /* kernel virtual address where hdrqtail is updated */
90 void *port_rcvhdrtail_kvaddr;
91 /*
92 * temp buffer for expected send setup, allocated at open, instead
93 * of each setup call
94 */
95 void *port_tid_pg_list;
96 /* when waiting for rcv or pioavail */
97 wait_queue_head_t port_wait;
98 /*
99 * rcvegr bufs base, physical, must fit
100 * in 44 bits so 32 bit programs mmap64 44 bit works)
101 */
102 dma_addr_t port_rcvegr_phys;
103 /* mmap of hdrq, must fit in 44 bits */
104 dma_addr_t port_rcvhdrq_phys;
105 dma_addr_t port_rcvhdrqtailaddr_phys;
106 /*
107 * number of opens (including slave subports) on this instance
108 * (ignoring forks, dup, etc. for now)
109 */
110 int port_cnt;
111 /*
112 * how much space to leave at start of eager TID entries for
113 * protocol use, on each TID
114 */
115 /* instead of calculating it */
116 unsigned port_port;
117 /* non-zero if port is being shared. */
118 u16 port_subport_cnt;
119 /* non-zero if port is being shared. */
120 u16 port_subport_id;
121 /* number of pio bufs for this port (all procs, if shared) */
122 u32 port_piocnt;
123 /* first pio buffer for this port */
124 u32 port_pio_base;
125 /* chip offset of PIO buffers for this port */
126 u32 port_piobufs;
127 /* how many alloc_pages() chunks in port_rcvegrbuf_pages */
128 u32 port_rcvegrbuf_chunks;
129 /* how many egrbufs per chunk */
130 u32 port_rcvegrbufs_perchunk;
131 /* order for port_rcvegrbuf_pages */
132 size_t port_rcvegrbuf_size;
133 /* rcvhdrq size (for freeing) */
134 size_t port_rcvhdrq_size;
135 /* next expected TID to check when looking for free */
136 u32 port_tidcursor;
137 /* next expected TID to check */
138 unsigned long port_flag;
139 /* what happened */
140 unsigned long int_flag;
141 /* WAIT_RCV that timed out, no interrupt */
142 u32 port_rcvwait_to;
143 /* WAIT_PIO that timed out, no interrupt */
144 u32 port_piowait_to;
145 /* WAIT_RCV already happened, no wait */
146 u32 port_rcvnowait;
147 /* WAIT_PIO already happened, no wait */
148 u32 port_pionowait;
149 /* total number of rcvhdrqfull errors */
150 u32 port_hdrqfull;
151 /*
152 * Used to suppress multiple instances of same
153 * port staying stuck at same point.
154 */
155 u32 port_lastrcvhdrqtail;
156 /* saved total number of rcvhdrqfull errors for poll edge trigger */
157 u32 port_hdrqfull_poll;
158 /* total number of polled urgent packets */
159 u32 port_urgent;
160 /* saved total number of polled urgent packets for poll edge trigger */
161 u32 port_urgent_poll;
162 /* pid of process using this port */
163 struct pid *port_pid;
164 struct pid *port_subpid[INFINIPATH_MAX_SUBPORT];
165 /* same size as task_struct .comm[] */
166 char port_comm[TASK_COMM_LEN];
167 /* pkeys set by this use of this port */
168 u16 port_pkeys[4];
169 /* so file ops can get at unit */
170 struct ipath_devdata *port_dd;
171 /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
172 void *subport_uregbase;
173 /* An array of pages for the eager receive buffers * N */
174 void *subport_rcvegrbuf;
175 /* An array of pages for the eager header queue entries * N */
176 void *subport_rcvhdr_base;
177 /* The version of the library which opened this port */
178 u32 userversion;
179 /* Bitmask of active slaves */
180 u32 active_slaves;
181 /* Type of packets or conditions we want to poll for */
182 u16 poll_type;
183 /* port rcvhdrq head offset */
184 u32 port_head;
185 /* receive packet sequence counter */
186 u32 port_seq_cnt;
187};
188
189struct sk_buff;
190struct ipath_sge_state;
191struct ipath_verbs_txreq;
192
193/*
194 * control information for layered drivers
195 */
196struct _ipath_layer {
197 void *l_arg;
198};
199
200struct ipath_skbinfo {
201 struct sk_buff *skb;
202 dma_addr_t phys;
203};
204
205struct ipath_sdma_txreq {
206 int flags;
207 int sg_count;
208 union {
209 struct scatterlist *sg;
210 void *map_addr;
211 };
212 void (*callback)(void *, int);
213 void *callback_cookie;
214 int callback_status;
215 u16 start_idx; /* sdma private */
216 u16 next_descq_idx; /* sdma private */
217 struct list_head list; /* sdma private */
218};
219
220struct ipath_sdma_desc {
221 __le64 qw[2];
222};
223
224#define IPATH_SDMA_TXREQ_F_USELARGEBUF 0x1
225#define IPATH_SDMA_TXREQ_F_HEADTOHOST 0x2
226#define IPATH_SDMA_TXREQ_F_INTREQ 0x4
227#define IPATH_SDMA_TXREQ_F_FREEBUF 0x8
228#define IPATH_SDMA_TXREQ_F_FREEDESC 0x10
229#define IPATH_SDMA_TXREQ_F_VL15 0x20
230
231#define IPATH_SDMA_TXREQ_S_OK 0
232#define IPATH_SDMA_TXREQ_S_SENDERROR 1
233#define IPATH_SDMA_TXREQ_S_ABORTED 2
234#define IPATH_SDMA_TXREQ_S_SHUTDOWN 3
235
236#define IPATH_SDMA_STATUS_SCORE_BOARD_DRAIN_IN_PROG (1ull << 63)
237#define IPATH_SDMA_STATUS_ABORT_IN_PROG (1ull << 62)
238#define IPATH_SDMA_STATUS_INTERNAL_SDMA_ENABLE (1ull << 61)
239#define IPATH_SDMA_STATUS_SCB_EMPTY (1ull << 30)
240
241/* max dwords in small buffer packet */
242#define IPATH_SMALLBUF_DWORDS (dd->ipath_piosize2k >> 2)
243
244/*
245 * Possible IB config parameters for ipath_f_get/set_ib_cfg()
246 */
247#define IPATH_IB_CFG_LIDLMC 0 /* Get/set LID (LS16b) and Mask (MS16b) */
248#define IPATH_IB_CFG_HRTBT 1 /* Get/set Heartbeat off/enable/auto */
249#define IPATH_IB_HRTBT_ON 3 /* Heartbeat enabled, sent every 100msec */
250#define IPATH_IB_HRTBT_OFF 0 /* Heartbeat off */
251#define IPATH_IB_CFG_LWID_ENB 2 /* Get/set allowed Link-width */
252#define IPATH_IB_CFG_LWID 3 /* Get currently active Link-width */
253#define IPATH_IB_CFG_SPD_ENB 4 /* Get/set allowed Link speeds */
254#define IPATH_IB_CFG_SPD 5 /* Get current Link spd */
255#define IPATH_IB_CFG_RXPOL_ENB 6 /* Get/set Auto-RX-polarity enable */
256#define IPATH_IB_CFG_LREV_ENB 7 /* Get/set Auto-Lane-reversal enable */
257#define IPATH_IB_CFG_LINKLATENCY 8 /* Get Auto-Lane-reversal enable */
258
259
260struct ipath_devdata {
261 struct list_head ipath_list;
262
263 struct ipath_kregs const *ipath_kregs;
264 struct ipath_cregs const *ipath_cregs;
265
266 /* mem-mapped pointer to base of chip regs */
267 u64 __iomem *ipath_kregbase;
268 /* end of mem-mapped chip space; range checking */
269 u64 __iomem *ipath_kregend;
270 /* physical address of chip for io_remap, etc. */
271 unsigned long ipath_physaddr;
272 /* base of memory alloced for ipath_kregbase, for free */
273 u64 *ipath_kregalloc;
274 /* ipath_cfgports pointers */
275 struct ipath_portdata **ipath_pd;
276 /* sk_buffs used by port 0 eager receive queue */
277 struct ipath_skbinfo *ipath_port0_skbinfo;
278 /* kvirt address of 1st 2k pio buffer */
279 void __iomem *ipath_pio2kbase;
280 /* kvirt address of 1st 4k pio buffer */
281 void __iomem *ipath_pio4kbase;
282 /*
283 * points to area where PIOavail registers will be DMA'ed.
284 * Has to be on a page of it's own, because the page will be
285 * mapped into user program space. This copy is *ONLY* ever
286 * written by DMA, not by the driver! Need a copy per device
287 * when we get to multiple devices
288 */
289 volatile __le64 *ipath_pioavailregs_dma;
290 /* physical address where updates occur */
291 dma_addr_t ipath_pioavailregs_phys;
292 struct _ipath_layer ipath_layer;
293 /* setup intr */
294 int (*ipath_f_intrsetup)(struct ipath_devdata *);
295 /* fallback to alternate interrupt type if possible */
296 int (*ipath_f_intr_fallback)(struct ipath_devdata *);
297 /* setup on-chip bus config */
298 int (*ipath_f_bus)(struct ipath_devdata *, struct pci_dev *);
299 /* hard reset chip */
300 int (*ipath_f_reset)(struct ipath_devdata *);
301 int (*ipath_f_get_boardname)(struct ipath_devdata *, char *,
302 size_t);
303 void (*ipath_f_init_hwerrors)(struct ipath_devdata *);
304 void (*ipath_f_handle_hwerrors)(struct ipath_devdata *, char *,
305 size_t);
306 void (*ipath_f_quiet_serdes)(struct ipath_devdata *);
307 int (*ipath_f_bringup_serdes)(struct ipath_devdata *);
308 int (*ipath_f_early_init)(struct ipath_devdata *);
309 void (*ipath_f_clear_tids)(struct ipath_devdata *, unsigned);
310 void (*ipath_f_put_tid)(struct ipath_devdata *, u64 __iomem*,
311 u32, unsigned long);
312 void (*ipath_f_tidtemplate)(struct ipath_devdata *);
313 void (*ipath_f_cleanup)(struct ipath_devdata *);
314 void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64);
315 /* fill out chip-specific fields */
316 int (*ipath_f_get_base_info)(struct ipath_portdata *, void *);
317 /* free irq */
318 void (*ipath_f_free_irq)(struct ipath_devdata *);
319 struct ipath_message_header *(*ipath_f_get_msgheader)
320 (struct ipath_devdata *, __le32 *);
321 void (*ipath_f_config_ports)(struct ipath_devdata *, ushort);
322 int (*ipath_f_get_ib_cfg)(struct ipath_devdata *, int);
323 int (*ipath_f_set_ib_cfg)(struct ipath_devdata *, int, u32);
324 void (*ipath_f_config_jint)(struct ipath_devdata *, u16 , u16);
325 void (*ipath_f_read_counters)(struct ipath_devdata *,
326 struct infinipath_counters *);
327 void (*ipath_f_xgxs_reset)(struct ipath_devdata *);
328 /* per chip actions needed for IB Link up/down changes */
329 int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64);
330
331 unsigned ipath_lastegr_idx;
332 struct ipath_ibdev *verbs_dev;
333 struct timer_list verbs_timer;
334 /* total dwords sent (summed from counter) */
335 u64 ipath_sword;
336 /* total dwords rcvd (summed from counter) */
337 u64 ipath_rword;
338 /* total packets sent (summed from counter) */
339 u64 ipath_spkts;
340 /* total packets rcvd (summed from counter) */
341 u64 ipath_rpkts;
342 /* ipath_statusp initially points to this. */
343 u64 _ipath_status;
344 /* GUID for this interface, in network order */
345 __be64 ipath_guid;
346 /*
347 * aggregrate of error bits reported since last cleared, for
348 * limiting of error reporting
349 */
350 ipath_err_t ipath_lasterror;
351 /*
352 * aggregrate of error bits reported since last cleared, for
353 * limiting of hwerror reporting
354 */
355 ipath_err_t ipath_lasthwerror;
356 /* errors masked because they occur too fast */
357 ipath_err_t ipath_maskederrs;
358 u64 ipath_lastlinkrecov; /* link recoveries at last ACTIVE */
359 /* these 5 fields are used to establish deltas for IB Symbol
360 * errors and linkrecovery errors. They can be reported on
361 * some chips during link negotiation prior to INIT, and with
362 * DDR when faking DDR negotiations with non-IBTA switches.
363 * The chip counters are adjusted at driver unload if there is
364 * a non-zero delta.
365 */
366 u64 ibdeltainprog;
367 u64 ibsymdelta;
368 u64 ibsymsnap;
369 u64 iblnkerrdelta;
370 u64 iblnkerrsnap;
371
372 /* time in jiffies at which to re-enable maskederrs */
373 unsigned long ipath_unmasktime;
374 /* count of egrfull errors, combined for all ports */
375 u64 ipath_last_tidfull;
376 /* for ipath_qcheck() */
377 u64 ipath_lastport0rcv_cnt;
378 /* template for writing TIDs */
379 u64 ipath_tidtemplate;
380 /* value to write to free TIDs */
381 u64 ipath_tidinvalid;
382 /* IBA6120 rcv interrupt setup */
383 u64 ipath_rhdrhead_intr_off;
384
385 /* size of memory at ipath_kregbase */
386 u32 ipath_kregsize;
387 /* number of registers used for pioavail */
388 u32 ipath_pioavregs;
389 /* IPATH_POLL, etc. */
390 u32 ipath_flags;
391 /* ipath_flags driver is waiting for */
392 u32 ipath_state_wanted;
393 /* last buffer for user use, first buf for kernel use is this
394 * index. */
395 u32 ipath_lastport_piobuf;
396 /* is a stats timer active */
397 u32 ipath_stats_timer_active;
398 /* number of interrupts for this device -- saturates... */
399 u32 ipath_int_counter;
400 /* dwords sent read from counter */
401 u32 ipath_lastsword;
402 /* dwords received read from counter */
403 u32 ipath_lastrword;
404 /* sent packets read from counter */
405 u32 ipath_lastspkts;
406 /* received packets read from counter */
407 u32 ipath_lastrpkts;
408 /* pio bufs allocated per port */
409 u32 ipath_pbufsport;
410 /* if remainder on bufs/port, ports < extrabuf get 1 extra */
411 u32 ipath_ports_extrabuf;
412 u32 ipath_pioupd_thresh; /* update threshold, some chips */
413 /*
414 * number of ports configured as max; zero is set to number chip
415 * supports, less gives more pio bufs/port, etc.
416 */
417 u32 ipath_cfgports;
418 /* count of port 0 hdrqfull errors */
419 u32 ipath_p0_hdrqfull;
420 /* port 0 number of receive eager buffers */
421 u32 ipath_p0_rcvegrcnt;
422
423 /*
424 * index of last piobuffer we used. Speeds up searching, by
425 * starting at this point. Doesn't matter if multiple cpu's use and
426 * update, last updater is only write that matters. Whenever it
427 * wraps, we update shadow copies. Need a copy per device when we
428 * get to multiple devices
429 */
430 u32 ipath_lastpioindex;
431 u32 ipath_lastpioindexl;
432 /* max length of freezemsg */
433 u32 ipath_freezelen;
434 /*
435 * consecutive times we wanted a PIO buffer but were unable to
436 * get one
437 */
438 u32 ipath_consec_nopiobuf;
439 /*
440 * hint that we should update ipath_pioavailshadow before
441 * looking for a PIO buffer
442 */
443 u32 ipath_upd_pio_shadow;
444 /* so we can rewrite it after a chip reset */
445 u32 ipath_pcibar0;
446 /* so we can rewrite it after a chip reset */
447 u32 ipath_pcibar1;
448 u32 ipath_x1_fix_tries;
449 u32 ipath_autoneg_tries;
450 u32 serdes_first_init_done;
451
452 struct ipath_relock {
453 atomic_t ipath_relock_timer_active;
454 struct timer_list ipath_relock_timer;
455 unsigned int ipath_relock_interval; /* in jiffies */
456 } ipath_relock_singleton;
457
458 /* interrupt number */
459 int ipath_irq;
460 /* HT/PCI Vendor ID (here for NodeInfo) */
461 u16 ipath_vendorid;
462 /* HT/PCI Device ID (here for NodeInfo) */
463 u16 ipath_deviceid;
464 /* offset in HT config space of slave/primary interface block */
465 u8 ipath_ht_slave_off;
466 /* for write combining settings */
467 int wc_cookie;
468 /* ref count for each pkey */
469 atomic_t ipath_pkeyrefs[4];
470 /* shadow copy of struct page *'s for exp tid pages */
471 struct page **ipath_pageshadow;
472 /* shadow copy of dma handles for exp tid pages */
473 dma_addr_t *ipath_physshadow;
474 u64 __iomem *ipath_egrtidbase;
475 /* lock to workaround chip bug 9437 and others */
476 spinlock_t ipath_kernel_tid_lock;
477 spinlock_t ipath_user_tid_lock;
478 spinlock_t ipath_sendctrl_lock;
479 /* around ipath_pd and (user ports) port_cnt use (intr vs free) */
480 spinlock_t ipath_uctxt_lock;
481
482 /*
483 * IPATH_STATUS_*,
484 * this address is mapped readonly into user processes so they can
485 * get status cheaply, whenever they want.
486 */
487 u64 *ipath_statusp;
488 /* freeze msg if hw error put chip in freeze */
489 char *ipath_freezemsg;
490 /* pci access data structure */
491 struct pci_dev *pcidev;
492 struct cdev *user_cdev;
493 struct cdev *diag_cdev;
494 struct device *user_dev;
495 struct device *diag_dev;
496 /* timer used to prevent stats overflow, error throttling, etc. */
497 struct timer_list ipath_stats_timer;
498 /* timer to verify interrupts work, and fallback if possible */
499 struct timer_list ipath_intrchk_timer;
500 void *ipath_dummy_hdrq; /* used after port close */
501 dma_addr_t ipath_dummy_hdrq_phys;
502
503 /* SendDMA related entries */
504 spinlock_t ipath_sdma_lock;
505 unsigned long ipath_sdma_status;
506 unsigned long ipath_sdma_abort_jiffies;
507 unsigned long ipath_sdma_abort_intr_timeout;
508 unsigned long ipath_sdma_buf_jiffies;
509 struct ipath_sdma_desc *ipath_sdma_descq;
510 u64 ipath_sdma_descq_added;
511 u64 ipath_sdma_descq_removed;
512 int ipath_sdma_desc_nreserved;
513 u16 ipath_sdma_descq_cnt;
514 u16 ipath_sdma_descq_tail;
515 u16 ipath_sdma_descq_head;
516 u16 ipath_sdma_next_intr;
517 u16 ipath_sdma_reset_wait;
518 u8 ipath_sdma_generation;
519 struct tasklet_struct ipath_sdma_abort_task;
520 struct tasklet_struct ipath_sdma_notify_task;
521 struct list_head ipath_sdma_activelist;
522 struct list_head ipath_sdma_notifylist;
523 atomic_t ipath_sdma_vl15_count;
524 struct timer_list ipath_sdma_vl15_timer;
525
526 dma_addr_t ipath_sdma_descq_phys;
527 volatile __le64 *ipath_sdma_head_dma;
528 dma_addr_t ipath_sdma_head_phys;
529
530 unsigned long ipath_ureg_align; /* user register alignment */
531
532 struct delayed_work ipath_autoneg_work;
533 wait_queue_head_t ipath_autoneg_wait;
534
535 /* HoL blocking / user app forward-progress state */
536 unsigned ipath_hol_state;
537 unsigned ipath_hol_next;
538 struct timer_list ipath_hol_timer;
539
540 /*
541 * Shadow copies of registers; size indicates read access size.
542 * Most of them are readonly, but some are write-only register,
543 * where we manipulate the bits in the shadow copy, and then write
544 * the shadow copy to infinipath.
545 *
546 * We deliberately make most of these 32 bits, since they have
547 * restricted range. For any that we read, we won't to generate 32
548 * bit accesses, since Opteron will generate 2 separate 32 bit HT
549 * transactions for a 64 bit read, and we want to avoid unnecessary
550 * HT transactions.
551 */
552
553 /* This is the 64 bit group */
554
555 /*
556 * shadow of pioavail, check to be sure it's large enough at
557 * init time.
558 */
559 unsigned long ipath_pioavailshadow[8];
560 /* bitmap of send buffers available for the kernel to use with PIO. */
561 unsigned long ipath_pioavailkernel[8];
562 /* shadow of kr_gpio_out, for rmw ops */
563 u64 ipath_gpio_out;
564 /* shadow the gpio mask register */
565 u64 ipath_gpio_mask;
566 /* shadow the gpio output enable, etc... */
567 u64 ipath_extctrl;
568 /* kr_revision shadow */
569 u64 ipath_revision;
570 /*
571 * shadow of ibcctrl, for interrupt handling of link changes,
572 * etc.
573 */
574 u64 ipath_ibcctrl;
575 /*
576 * last ibcstatus, to suppress "duplicate" status change messages,
577 * mostly from 2 to 3
578 */
579 u64 ipath_lastibcstat;
580 /* hwerrmask shadow */
581 ipath_err_t ipath_hwerrmask;
582 ipath_err_t ipath_errormask; /* errormask shadow */
583 /* interrupt config reg shadow */
584 u64 ipath_intconfig;
585 /* kr_sendpiobufbase value */
586 u64 ipath_piobufbase;
587 /* kr_ibcddrctrl shadow */
588 u64 ipath_ibcddrctrl;
589
590 /* these are the "32 bit" regs */
591
592 /*
593 * number of GUIDs in the flash for this interface; may need some
594 * rethinking for setting on other ifaces
595 */
596 u32 ipath_nguid;
597 /*
598 * the following two are 32-bit bitmasks, but {test,clear,set}_bit
599 * all expect bit fields to be "unsigned long"
600 */
601 /* shadow kr_rcvctrl */
602 unsigned long ipath_rcvctrl;
603 /* shadow kr_sendctrl */
604 unsigned long ipath_sendctrl;
605 /* to not count armlaunch after cancel */
606 unsigned long ipath_lastcancel;
607 /* count cases where special trigger was needed (double write) */
608 unsigned long ipath_spectriggerhit;
609
610 /* value we put in kr_rcvhdrcnt */
611 u32 ipath_rcvhdrcnt;
612 /* value we put in kr_rcvhdrsize */
613 u32 ipath_rcvhdrsize;
614 /* value we put in kr_rcvhdrentsize */
615 u32 ipath_rcvhdrentsize;
616 /* offset of last entry in rcvhdrq */
617 u32 ipath_hdrqlast;
618 /* kr_portcnt value */
619 u32 ipath_portcnt;
620 /* kr_pagealign value */
621 u32 ipath_palign;
622 /* number of "2KB" PIO buffers */
623 u32 ipath_piobcnt2k;
624 /* size in bytes of "2KB" PIO buffers */
625 u32 ipath_piosize2k;
626 /* number of "4KB" PIO buffers */
627 u32 ipath_piobcnt4k;
628 /* size in bytes of "4KB" PIO buffers */
629 u32 ipath_piosize4k;
630 u32 ipath_pioreserved; /* reserved special-inkernel; */
631 /* kr_rcvegrbase value */
632 u32 ipath_rcvegrbase;
633 /* kr_rcvegrcnt value */
634 u32 ipath_rcvegrcnt;
635 /* kr_rcvtidbase value */
636 u32 ipath_rcvtidbase;
637 /* kr_rcvtidcnt value */
638 u32 ipath_rcvtidcnt;
639 /* kr_sendregbase */
640 u32 ipath_sregbase;
641 /* kr_userregbase */
642 u32 ipath_uregbase;
643 /* kr_counterregbase */
644 u32 ipath_cregbase;
645 /* shadow the control register contents */
646 u32 ipath_control;
647 /* PCI revision register (HTC rev on FPGA) */
648 u32 ipath_pcirev;
649
650 /* chip address space used by 4k pio buffers */
651 u32 ipath_4kalign;
652 /* The MTU programmed for this unit */
653 u32 ipath_ibmtu;
654 /*
655 * The max size IB packet, included IB headers that we can send.
656 * Starts same as ipath_piosize, but is affected when ibmtu is
657 * changed, or by size of eager buffers
658 */
659 u32 ipath_ibmaxlen;
660 /*
661 * ibmaxlen at init time, limited by chip and by receive buffer
662 * size. Not changed after init.
663 */
664 u32 ipath_init_ibmaxlen;
665 /* size of each rcvegrbuffer */
666 u32 ipath_rcvegrbufsize;
667 /* localbus width (1, 2,4,8,16,32) from config space */
668 u32 ipath_lbus_width;
669 /* localbus speed (HT: 200,400,800,1000; PCIe 2500) */
670 u32 ipath_lbus_speed;
671 /*
672 * number of sequential ibcstatus change for polling active/quiet
673 * (i.e., link not coming up).
674 */
675 u32 ipath_ibpollcnt;
676 /* low and high portions of MSI capability/vector */
677 u32 ipath_msi_lo;
678 /* saved after PCIe init for restore after reset */
679 u32 ipath_msi_hi;
680 /* MSI data (vector) saved for restore */
681 u16 ipath_msi_data;
682 /* MLID programmed for this instance */
683 u16 ipath_mlid;
684 /* LID programmed for this instance */
685 u16 ipath_lid;
686 /* list of pkeys programmed; 0 if not set */
687 u16 ipath_pkeys[4];
688 /*
689 * ASCII serial number, from flash, large enough for original
690 * all digit strings, and longer QLogic serial number format
691 */
692 u8 ipath_serial[16];
693 /* human readable board version */
694 u8 ipath_boardversion[96];
695 u8 ipath_lbus_info[32]; /* human readable localbus info */
696 /* chip major rev, from ipath_revision */
697 u8 ipath_majrev;
698 /* chip minor rev, from ipath_revision */
699 u8 ipath_minrev;
700 /* board rev, from ipath_revision */
701 u8 ipath_boardrev;
702 /* saved for restore after reset */
703 u8 ipath_pci_cacheline;
704 /* LID mask control */
705 u8 ipath_lmc;
706 /* link width supported */
707 u8 ipath_link_width_supported;
708 /* link speed supported */
709 u8 ipath_link_speed_supported;
710 u8 ipath_link_width_enabled;
711 u8 ipath_link_speed_enabled;
712 u8 ipath_link_width_active;
713 u8 ipath_link_speed_active;
714 /* Rx Polarity inversion (compensate for ~tx on partner) */
715 u8 ipath_rx_pol_inv;
716
717 u8 ipath_r_portenable_shift;
718 u8 ipath_r_intravail_shift;
719 u8 ipath_r_tailupd_shift;
720 u8 ipath_r_portcfg_shift;
721
722 /* unit # of this chip, if present */
723 int ipath_unit;
724
725 /* local link integrity counter */
726 u32 ipath_lli_counter;
727 /* local link integrity errors */
728 u32 ipath_lli_errors;
729 /*
730 * Above counts only cases where _successive_ LocalLinkIntegrity
731 * errors were seen in the receive headers of kern-packets.
732 * Below are the three (monotonically increasing) counters
733 * maintained via GPIO interrupts on iba6120-rev2.
734 */
735 u32 ipath_rxfc_unsupvl_errs;
736 u32 ipath_overrun_thresh_errs;
737 u32 ipath_lli_errs;
738
739 /*
740 * Not all devices managed by a driver instance are the same
741 * type, so these fields must be per-device.
742 */
743 u64 ipath_i_bitsextant;
744 ipath_err_t ipath_e_bitsextant;
745 ipath_err_t ipath_hwe_bitsextant;
746
747 /*
748 * Below should be computable from number of ports,
749 * since they are never modified.
750 */
751 u64 ipath_i_rcvavail_mask;
752 u64 ipath_i_rcvurg_mask;
753 u16 ipath_i_rcvurg_shift;
754 u16 ipath_i_rcvavail_shift;
755
756 /*
757 * Register bits for selecting i2c direction and values, used for
758 * I2C serial flash.
759 */
760 u8 ipath_gpio_sda_num;
761 u8 ipath_gpio_scl_num;
762 u8 ipath_i2c_chain_type;
763 u64 ipath_gpio_sda;
764 u64 ipath_gpio_scl;
765
766 /* lock for doing RMW of shadows/regs for ExtCtrl and GPIO */
767 spinlock_t ipath_gpio_lock;
768
769 /*
770 * IB link and linktraining states and masks that vary per chip in
771 * some way. Set at init, to avoid each IB status change interrupt
772 */
773 u8 ibcs_ls_shift;
774 u8 ibcs_lts_mask;
775 u32 ibcs_mask;
776 u32 ib_init;
777 u32 ib_arm;
778 u32 ib_active;
779
780 u16 ipath_rhf_offset; /* offset of RHF within receive header entry */
781
782 /*
783 * shift/mask for linkcmd, linkinitcmd, maxpktlen in ibccontol
784 * reg. Changes for IBA7220
785 */
786 u8 ibcc_lic_mask; /* LinkInitCmd */
787 u8 ibcc_lc_shift; /* LinkCmd */
788 u8 ibcc_mpl_shift; /* Maxpktlen */
789
790 u8 delay_mult;
791
792 /* used to override LED behavior */
793 u8 ipath_led_override; /* Substituted for normal value, if non-zero */
794 u16 ipath_led_override_timeoff; /* delta to next timer event */
795 u8 ipath_led_override_vals[2]; /* Alternates per blink-frame */
796 u8 ipath_led_override_phase; /* Just counts, LSB picks from vals[] */
797 atomic_t ipath_led_override_timer_active;
798 /* Used to flash LEDs in override mode */
799 struct timer_list ipath_led_override_timer;
800
801 /* Support (including locks) for EEPROM logging of errors and time */
802 /* control access to actual counters, timer */
803 spinlock_t ipath_eep_st_lock;
804 /* control high-level access to EEPROM */
805 struct mutex ipath_eep_lock;
806 /* Below inc'd by ipath_snap_cntrs(), locked by ipath_eep_st_lock */
807 uint64_t ipath_traffic_wds;
808 /* active time is kept in seconds, but logged in hours */
809 atomic_t ipath_active_time;
810 /* Below are nominal shadow of EEPROM, new since last EEPROM update */
811 uint8_t ipath_eep_st_errs[IPATH_EEP_LOG_CNT];
812 uint8_t ipath_eep_st_new_errs[IPATH_EEP_LOG_CNT];
813 uint16_t ipath_eep_hrs;
814 /*
815 * masks for which bits of errs, hwerrs that cause
816 * each of the counters to increment.
817 */
818 struct ipath_eep_log_mask ipath_eep_st_masks[IPATH_EEP_LOG_CNT];
819
820 /* interrupt mitigation reload register info */
821 u16 ipath_jint_idle_ticks; /* idle clock ticks */
822 u16 ipath_jint_max_packets; /* max packets across all ports */
823
824 /*
825 * lock for access to SerDes, and flags to sequence preset
826 * versus steady-state. 7220-only at the moment.
827 */
828 spinlock_t ipath_sdepb_lock;
829 u8 ipath_presets_needed; /* Set if presets to be restored next DOWN */
830};
831
832/* ipath_hol_state values (stopping/starting user proc, send flushing) */
833#define IPATH_HOL_UP 0
834#define IPATH_HOL_DOWN 1
835/* ipath_hol_next toggle values, used when hol_state IPATH_HOL_DOWN */
836#define IPATH_HOL_DOWNSTOP 0
837#define IPATH_HOL_DOWNCONT 1
838
839/* bit positions for sdma_status */
840#define IPATH_SDMA_ABORTING 0
841#define IPATH_SDMA_DISARMED 1
842#define IPATH_SDMA_DISABLED 2
843#define IPATH_SDMA_LAYERBUF 3
844#define IPATH_SDMA_RUNNING 30
845#define IPATH_SDMA_SHUTDOWN 31
846
847/* bit combinations that correspond to abort states */
848#define IPATH_SDMA_ABORT_NONE 0
849#define IPATH_SDMA_ABORT_ABORTING (1UL << IPATH_SDMA_ABORTING)
850#define IPATH_SDMA_ABORT_DISARMED ((1UL << IPATH_SDMA_ABORTING) | \
851 (1UL << IPATH_SDMA_DISARMED))
852#define IPATH_SDMA_ABORT_DISABLED ((1UL << IPATH_SDMA_ABORTING) | \
853 (1UL << IPATH_SDMA_DISABLED))
854#define IPATH_SDMA_ABORT_ABORTED ((1UL << IPATH_SDMA_ABORTING) | \
855 (1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
856#define IPATH_SDMA_ABORT_MASK ((1UL<<IPATH_SDMA_ABORTING) | \
857 (1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
858
859#define IPATH_SDMA_BUF_NONE 0
860#define IPATH_SDMA_BUF_MASK (1UL<<IPATH_SDMA_LAYERBUF)
861
862/* Private data for file operations */
863struct ipath_filedata {
864 struct ipath_portdata *pd;
865 unsigned subport;
866 unsigned tidcursor;
867 struct ipath_user_sdma_queue *pq;
868};
869extern struct list_head ipath_dev_list;
870extern spinlock_t ipath_devs_lock;
871extern struct ipath_devdata *ipath_lookup(int unit);
872
873int ipath_init_chip(struct ipath_devdata *, int);
874int ipath_enable_wc(struct ipath_devdata *dd);
875void ipath_disable_wc(struct ipath_devdata *dd);
876int ipath_count_units(int *npresentp, int *nupp, int *maxportsp);
877void ipath_shutdown_device(struct ipath_devdata *);
878void ipath_clear_freeze(struct ipath_devdata *);
879
880struct file_operations;
881int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
882 struct cdev **cdevp, struct device **devp);
883void ipath_cdev_cleanup(struct cdev **cdevp,
884 struct device **devp);
885
886int ipath_diag_add(struct ipath_devdata *);
887void ipath_diag_remove(struct ipath_devdata *);
888
889extern wait_queue_head_t ipath_state_wait;
890
891int ipath_user_add(struct ipath_devdata *dd);
892void ipath_user_remove(struct ipath_devdata *dd);
893
894struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
895
896extern int ipath_diag_inuse;
897
898irqreturn_t ipath_intr(int irq, void *devid);
899int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
900 ipath_err_t err);
901#if __IPATH_INFO || __IPATH_DBG
902extern const char *ipath_ibcstatus_str[];
903#endif
904
905/* clean up any per-chip chip-specific stuff */
906void ipath_chip_cleanup(struct ipath_devdata *);
907/* clean up any chip type-specific stuff */
908void ipath_chip_done(void);
909
910void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
911 unsigned cnt);
912void ipath_cancel_sends(struct ipath_devdata *, int);
913
914int ipath_create_rcvhdrq(struct ipath_devdata *, struct ipath_portdata *);
915void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
916
917int ipath_parse_ushort(const char *str, unsigned short *valp);
918
919void ipath_kreceive(struct ipath_portdata *);
920int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned);
921int ipath_reset_device(int);
922void ipath_get_faststats(unsigned long);
923int ipath_wait_linkstate(struct ipath_devdata *, u32, int);
924int ipath_set_linkstate(struct ipath_devdata *, u8);
925int ipath_set_mtu(struct ipath_devdata *, u16);
926int ipath_set_lid(struct ipath_devdata *, u32, u8);
927int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
928void ipath_enable_armlaunch(struct ipath_devdata *);
929void ipath_disable_armlaunch(struct ipath_devdata *);
930void ipath_hol_down(struct ipath_devdata *);
931void ipath_hol_up(struct ipath_devdata *);
932void ipath_hol_event(unsigned long);
933void ipath_toggle_rclkrls(struct ipath_devdata *);
934void ipath_sd7220_clr_ibpar(struct ipath_devdata *);
935void ipath_set_relock_poll(struct ipath_devdata *, int);
936void ipath_shutdown_relock_poll(struct ipath_devdata *);
937
938/* for use in system calls, where we want to know device type, etc. */
939#define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd
940#define subport_fp(fp) \
941 ((struct ipath_filedata *)(fp)->private_data)->subport
942#define tidcursor_fp(fp) \
943 ((struct ipath_filedata *)(fp)->private_data)->tidcursor
944#define user_sdma_queue_fp(fp) \
945 ((struct ipath_filedata *)(fp)->private_data)->pq
946
947/*
948 * values for ipath_flags
949 */
950 /* chip can report link latency (IB 1.2) */
951#define IPATH_HAS_LINK_LATENCY 0x1
952 /* The chip is up and initted */
953#define IPATH_INITTED 0x2
954 /* set if any user code has set kr_rcvhdrsize */
955#define IPATH_RCVHDRSZ_SET 0x4
956 /* The chip is present and valid for accesses */
957#define IPATH_PRESENT 0x8
958 /* HT link0 is only 8 bits wide, ignore upper byte crc
959 * errors, etc. */
960#define IPATH_8BIT_IN_HT0 0x10
961 /* HT link1 is only 8 bits wide, ignore upper byte crc
962 * errors, etc. */
963#define IPATH_8BIT_IN_HT1 0x20
964 /* The link is down */
965#define IPATH_LINKDOWN 0x40
966 /* The link level is up (0x11) */
967#define IPATH_LINKINIT 0x80
968 /* The link is in the armed (0x21) state */
969#define IPATH_LINKARMED 0x100
970 /* The link is in the active (0x31) state */
971#define IPATH_LINKACTIVE 0x200
972 /* link current state is unknown */
973#define IPATH_LINKUNK 0x400
974 /* Write combining flush needed for PIO */
975#define IPATH_PIO_FLUSH_WC 0x1000
976 /* DMA Receive tail pointer */
977#define IPATH_NODMA_RTAIL 0x2000
978 /* no IB cable, or no device on IB cable */
979#define IPATH_NOCABLE 0x4000
980 /* Supports port zero per packet receive interrupts via
981 * GPIO */
982#define IPATH_GPIO_INTR 0x8000
983 /* uses the coded 4byte TID, not 8 byte */
984#define IPATH_4BYTE_TID 0x10000
985 /* packet/word counters are 32 bit, else those 4 counters
986 * are 64bit */
987#define IPATH_32BITCOUNTERS 0x20000
988 /* Interrupt register is 64 bits */
989#define IPATH_INTREG_64 0x40000
990 /* can miss port0 rx interrupts */
991#define IPATH_DISABLED 0x80000 /* administratively disabled */
992 /* Use GPIO interrupts for new counters */
993#define IPATH_GPIO_ERRINTRS 0x100000
994#define IPATH_SWAP_PIOBUFS 0x200000
995 /* Supports Send DMA */
996#define IPATH_HAS_SEND_DMA 0x400000
997 /* Supports Send Count (not just word count) in PBC */
998#define IPATH_HAS_PBC_CNT 0x800000
999 /* Suppress heartbeat, even if turning off loopback */
1000#define IPATH_NO_HRTBT 0x1000000
1001#define IPATH_HAS_THRESH_UPDATE 0x4000000
1002#define IPATH_HAS_MULT_IB_SPEED 0x8000000
1003#define IPATH_IB_AUTONEG_INPROG 0x10000000
1004#define IPATH_IB_AUTONEG_FAILED 0x20000000
1005 /* Linkdown-disable intentionally, Do not attempt to bring up */
1006#define IPATH_IB_LINK_DISABLED 0x40000000
1007#define IPATH_IB_FORCE_NOTIFY 0x80000000 /* force notify on next ib change */
1008
1009/* Bits in GPIO for the added interrupts */
1010#define IPATH_GPIO_PORT0_BIT 2
1011#define IPATH_GPIO_RXUVL_BIT 3
1012#define IPATH_GPIO_OVRUN_BIT 4
1013#define IPATH_GPIO_LLI_BIT 5
1014#define IPATH_GPIO_ERRINTR_MASK 0x38
1015
1016/* portdata flag bit offsets */
1017 /* waiting for a packet to arrive */
1018#define IPATH_PORT_WAITING_RCV 2
1019 /* master has not finished initializing */
1020#define IPATH_PORT_MASTER_UNINIT 4
1021 /* waiting for an urgent packet to arrive */
1022#define IPATH_PORT_WAITING_URG 5
1023
1024/* free up any allocated data at closes */
1025void ipath_free_data(struct ipath_portdata *dd);
1026u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
1027void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1028 unsigned len, int avail);
1029void ipath_init_iba6110_funcs(struct ipath_devdata *);
1030void ipath_get_eeprom_info(struct ipath_devdata *);
1031int ipath_update_eeprom_log(struct ipath_devdata *dd);
1032void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
1033u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
1034void ipath_disarm_senderrbufs(struct ipath_devdata *);
1035void ipath_force_pio_avail_update(struct ipath_devdata *);
1036void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
1037
1038/*
1039 * Set LED override, only the two LSBs have "public" meaning, but
1040 * any non-zero value substitutes them for the Link and LinkTrain
1041 * LED states.
1042 */
1043#define IPATH_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
1044#define IPATH_LED_LOG 2 /* Logical (link) YELLOW LED */
1045void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val);
1046
1047/* send dma routines */
1048int setup_sdma(struct ipath_devdata *);
1049void teardown_sdma(struct ipath_devdata *);
1050void ipath_restart_sdma(struct ipath_devdata *);
1051void ipath_sdma_intr(struct ipath_devdata *);
1052int ipath_sdma_verbs_send(struct ipath_devdata *, struct ipath_sge_state *,
1053 u32, struct ipath_verbs_txreq *);
1054/* ipath_sdma_lock should be locked before calling this. */
1055int ipath_sdma_make_progress(struct ipath_devdata *dd);
1056
1057/* must be called under ipath_sdma_lock */
1058static inline u16 ipath_sdma_descq_freecnt(const struct ipath_devdata *dd)
1059{
1060 return dd->ipath_sdma_descq_cnt -
1061 (dd->ipath_sdma_descq_added - dd->ipath_sdma_descq_removed) -
1062 1 - dd->ipath_sdma_desc_nreserved;
1063}
1064
1065static inline void ipath_sdma_desc_reserve(struct ipath_devdata *dd, u16 cnt)
1066{
1067 dd->ipath_sdma_desc_nreserved += cnt;
1068}
1069
1070static inline void ipath_sdma_desc_unreserve(struct ipath_devdata *dd, u16 cnt)
1071{
1072 dd->ipath_sdma_desc_nreserved -= cnt;
1073}
1074
1075/*
1076 * number of words used for protocol header if not set by ipath_userinit();
1077 */
1078#define IPATH_DFLT_RCVHDRSIZE 9
1079
1080int ipath_get_user_pages(unsigned long, size_t, struct page **);
1081void ipath_release_user_pages(struct page **, size_t);
1082void ipath_release_user_pages_on_close(struct page **, size_t);
1083int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int);
1084int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int);
1085int ipath_tempsense_read(struct ipath_devdata *, u8 regnum);
1086int ipath_tempsense_write(struct ipath_devdata *, u8 regnum, u8 data);
1087
1088/* these are used for the registers that vary with port */
1089void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
1090 unsigned, u64);
1091
1092/*
1093 * We could have a single register get/put routine, that takes a group type,
1094 * but this is somewhat clearer and cleaner. It also gives us some error
1095 * checking. 64 bit register reads should always work, but are inefficient
1096 * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
1097 * so we use kreg32 wherever possible. User register and counter register
1098 * reads are always 32 bit reads, so only one form of those routines.
1099 */
1100
1101/*
1102 * At the moment, none of the s-registers are writable, so no
1103 * ipath_write_sreg().
1104 */
1105
1106/**
1107 * ipath_read_ureg32 - read 32-bit virtualized per-port register
1108 * @dd: device
1109 * @regno: register number
1110 * @port: port number
1111 *
1112 * Return the contents of a register that is virtualized to be per port.
1113 * Returns -1 on errors (not distinguishable from valid contents at
1114 * runtime; we may add a separate error variable at some point).
1115 */
1116static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
1117 ipath_ureg regno, int port)
1118{
1119 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1120 return 0;
1121
1122 return readl(regno + (u64 __iomem *)
1123 (dd->ipath_uregbase +
1124 (char __iomem *)dd->ipath_kregbase +
1125 dd->ipath_ureg_align * port));
1126}
1127
1128/**
1129 * ipath_write_ureg - write 32-bit virtualized per-port register
1130 * @dd: device
1131 * @regno: register number
1132 * @value: value
1133 * @port: port
1134 *
1135 * Write the contents of a register that is virtualized to be per port.
1136 */
1137static inline void ipath_write_ureg(const struct ipath_devdata *dd,
1138 ipath_ureg regno, u64 value, int port)
1139{
1140 u64 __iomem *ubase = (u64 __iomem *)
1141 (dd->ipath_uregbase + (char __iomem *) dd->ipath_kregbase +
1142 dd->ipath_ureg_align * port);
1143 if (dd->ipath_kregbase)
1144 writeq(value, &ubase[regno]);
1145}
1146
1147static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd,
1148 ipath_kreg regno)
1149{
1150 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1151 return -1;
1152 return readl((u32 __iomem *) & dd->ipath_kregbase[regno]);
1153}
1154
1155static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd,
1156 ipath_kreg regno)
1157{
1158 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1159 return -1;
1160
1161 return readq(&dd->ipath_kregbase[regno]);
1162}
1163
1164static inline void ipath_write_kreg(const struct ipath_devdata *dd,
1165 ipath_kreg regno, u64 value)
1166{
1167 if (dd->ipath_kregbase)
1168 writeq(value, &dd->ipath_kregbase[regno]);
1169}
1170
1171static inline u64 ipath_read_creg(const struct ipath_devdata *dd,
1172 ipath_sreg regno)
1173{
1174 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1175 return 0;
1176
1177 return readq(regno + (u64 __iomem *)
1178 (dd->ipath_cregbase +
1179 (char __iomem *)dd->ipath_kregbase));
1180}
1181
1182static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
1183 ipath_sreg regno)
1184{
1185 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1186 return 0;
1187 return readl(regno + (u64 __iomem *)
1188 (dd->ipath_cregbase +
1189 (char __iomem *)dd->ipath_kregbase));
1190}
1191
1192static inline void ipath_write_creg(const struct ipath_devdata *dd,
1193 ipath_creg regno, u64 value)
1194{
1195 if (dd->ipath_kregbase)
1196 writeq(value, regno + (u64 __iomem *)
1197 (dd->ipath_cregbase +
1198 (char __iomem *)dd->ipath_kregbase));
1199}
1200
1201static inline void ipath_clear_rcvhdrtail(const struct ipath_portdata *pd)
1202{
1203 *((u64 *) pd->port_rcvhdrtail_kvaddr) = 0ULL;
1204}
1205
1206static inline u32 ipath_get_rcvhdrtail(const struct ipath_portdata *pd)
1207{
1208 return (u32) le64_to_cpu(*((volatile __le64 *)
1209 pd->port_rcvhdrtail_kvaddr));
1210}
1211
1212static inline u32 ipath_get_hdrqtail(const struct ipath_portdata *pd)
1213{
1214 const struct ipath_devdata *dd = pd->port_dd;
1215 u32 hdrqtail;
1216
1217 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1218 __le32 *rhf_addr;
1219 u32 seq;
1220
1221 rhf_addr = (__le32 *) pd->port_rcvhdrq +
1222 pd->port_head + dd->ipath_rhf_offset;
1223 seq = ipath_hdrget_seq(rhf_addr);
1224 hdrqtail = pd->port_head;
1225 if (seq == pd->port_seq_cnt)
1226 hdrqtail++;
1227 } else
1228 hdrqtail = ipath_get_rcvhdrtail(pd);
1229
1230 return hdrqtail;
1231}
1232
1233static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r)
1234{
1235 return (dd->ipath_flags & IPATH_INTREG_64) ?
1236 ipath_read_kreg64(dd, r) : ipath_read_kreg32(dd, r);
1237}
1238
1239/*
1240 * from contents of IBCStatus (or a saved copy), return linkstate
1241 * Report ACTIVE_DEFER as ACTIVE, because we treat them the same
1242 * everywhere, anyway (and should be, for almost all purposes).
1243 */
1244static inline u32 ipath_ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
1245{
1246 u32 state = (u32)(ibcs >> dd->ibcs_ls_shift) &
1247 INFINIPATH_IBCS_LINKSTATE_MASK;
1248 if (state == INFINIPATH_IBCS_L_STATE_ACT_DEFER)
1249 state = INFINIPATH_IBCS_L_STATE_ACTIVE;
1250 return state;
1251}
1252
1253/* from contents of IBCStatus (or a saved copy), return linktrainingstate */
1254static inline u32 ipath_ib_linktrstate(struct ipath_devdata *dd, u64 ibcs)
1255{
1256 return (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
1257 dd->ibcs_lts_mask;
1258}
1259
1260/*
1261 * from contents of IBCStatus (or a saved copy), return logical link state
1262 * combination of link state and linktraining state (down, active, init,
1263 * arm, etc.
1264 */
1265static inline u32 ipath_ib_state(struct ipath_devdata *dd, u64 ibcs)
1266{
1267 u32 ibs;
1268 ibs = (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
1269 dd->ibcs_lts_mask;
1270 ibs |= (u32)(ibcs &
1271 (INFINIPATH_IBCS_LINKSTATE_MASK << dd->ibcs_ls_shift));
1272 return ibs;
1273}
1274
1275/*
1276 * sysfs interface.
1277 */
1278
1279struct device_driver;
1280
1281extern const char ib_ipath_version[];
1282
1283extern const struct attribute_group *ipath_driver_attr_groups[];
1284
1285int ipath_device_create_group(struct device *, struct ipath_devdata *);
1286void ipath_device_remove_group(struct device *, struct ipath_devdata *);
1287int ipath_expose_reset(struct device *);
1288
1289int ipath_init_ipathfs(void);
1290void ipath_exit_ipathfs(void);
1291int ipathfs_add_device(struct ipath_devdata *);
1292int ipathfs_remove_device(struct ipath_devdata *);
1293
1294/*
1295 * dma_addr wrappers - all 0's invalid for hw
1296 */
1297dma_addr_t ipath_map_page(struct pci_dev *, struct page *, unsigned long,
1298 size_t, int);
1299dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int);
1300const char *ipath_get_unit_name(int unit);
1301
1302/*
1303 * Flush write combining store buffers (if present) and perform a write
1304 * barrier.
1305 */
1306#if defined(CONFIG_X86_64)
1307#define ipath_flush_wc() asm volatile("sfence" ::: "memory")
1308#else
1309#define ipath_flush_wc() wmb()
1310#endif
1311
1312extern unsigned ipath_debug; /* debugging bit mask */
1313extern unsigned ipath_linkrecovery;
1314extern unsigned ipath_mtu4096;
1315extern struct mutex ipath_mutex;
1316
1317#define IPATH_DRV_NAME "ib_ipath"
1318#define IPATH_MAJOR 233
1319#define IPATH_USER_MINOR_BASE 0
1320#define IPATH_DIAGPKT_MINOR 127
1321#define IPATH_DIAG_MINOR_BASE 129
1322#define IPATH_NMINORS 255
1323
1324#define ipath_dev_err(dd,fmt,...) \
1325 do { \
1326 const struct ipath_devdata *__dd = (dd); \
1327 if (__dd->pcidev) \
1328 dev_err(&__dd->pcidev->dev, "%s: " fmt, \
1329 ipath_get_unit_name(__dd->ipath_unit), \
1330 ##__VA_ARGS__); \
1331 else \
1332 printk(KERN_ERR IPATH_DRV_NAME ": %s: " fmt, \
1333 ipath_get_unit_name(__dd->ipath_unit), \
1334 ##__VA_ARGS__); \
1335 } while (0)
1336
1337#if _IPATH_DEBUGGING
1338
1339# define __IPATH_DBG_WHICH(which,fmt,...) \
1340 do { \
1341 if (unlikely(ipath_debug & (which))) \
1342 printk(KERN_DEBUG IPATH_DRV_NAME ": %s: " fmt, \
1343 __func__,##__VA_ARGS__); \
1344 } while(0)
1345
1346# define ipath_dbg(fmt,...) \
1347 __IPATH_DBG_WHICH(__IPATH_DBG,fmt,##__VA_ARGS__)
1348# define ipath_cdbg(which,fmt,...) \
1349 __IPATH_DBG_WHICH(__IPATH_##which##DBG,fmt,##__VA_ARGS__)
1350
1351#else /* ! _IPATH_DEBUGGING */
1352
1353# define ipath_dbg(fmt,...)
1354# define ipath_cdbg(which,fmt,...)
1355
1356#endif /* _IPATH_DEBUGGING */
1357
1358/*
1359 * this is used for formatting hw error messages...
1360 */
1361struct ipath_hwerror_msgs {
1362 u64 mask;
1363 const char *msg;
1364};
1365
1366#define INFINIPATH_HWE_MSG(a, b) { .mask = INFINIPATH_HWE_##a, .msg = b }
1367
1368/* in ipath_intr.c... */
1369void ipath_format_hwerrors(u64 hwerrs,
1370 const struct ipath_hwerror_msgs *hwerrmsgs,
1371 size_t nhwerrmsgs,
1372 char *msg, size_t lmsg);
1373
1374#endif /* _IPATH_KERNEL_H */
diff --git a/drivers/staging/rdma/ipath/ipath_keys.c b/drivers/staging/rdma/ipath/ipath_keys.c
deleted file mode 100644
index c0e933fec218..000000000000
--- a/drivers/staging/rdma/ipath/ipath_keys.c
+++ /dev/null
@@ -1,270 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <asm/io.h>
35
36#include "ipath_verbs.h"
37#include "ipath_kernel.h"
38
39/**
40 * ipath_alloc_lkey - allocate an lkey
41 * @rkt: lkey table in which to allocate the lkey
42 * @mr: memory region that this lkey protects
43 *
44 * Returns 1 if successful, otherwise returns 0.
45 */
46
47int ipath_alloc_lkey(struct ipath_lkey_table *rkt, struct ipath_mregion *mr)
48{
49 unsigned long flags;
50 u32 r;
51 u32 n;
52 int ret;
53
54 spin_lock_irqsave(&rkt->lock, flags);
55
56 /* Find the next available LKEY */
57 r = n = rkt->next;
58 for (;;) {
59 if (rkt->table[r] == NULL)
60 break;
61 r = (r + 1) & (rkt->max - 1);
62 if (r == n) {
63 spin_unlock_irqrestore(&rkt->lock, flags);
64 ipath_dbg("LKEY table full\n");
65 ret = 0;
66 goto bail;
67 }
68 }
69 rkt->next = (r + 1) & (rkt->max - 1);
70 /*
71 * Make sure lkey is never zero which is reserved to indicate an
72 * unrestricted LKEY.
73 */
74 rkt->gen++;
75 mr->lkey = (r << (32 - ib_ipath_lkey_table_size)) |
76 ((((1 << (24 - ib_ipath_lkey_table_size)) - 1) & rkt->gen)
77 << 8);
78 if (mr->lkey == 0) {
79 mr->lkey |= 1 << 8;
80 rkt->gen++;
81 }
82 rkt->table[r] = mr;
83 spin_unlock_irqrestore(&rkt->lock, flags);
84
85 ret = 1;
86
87bail:
88 return ret;
89}
90
91/**
92 * ipath_free_lkey - free an lkey
93 * @rkt: table from which to free the lkey
94 * @lkey: lkey id to free
95 */
96void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey)
97{
98 unsigned long flags;
99 u32 r;
100
101 if (lkey == 0)
102 return;
103 r = lkey >> (32 - ib_ipath_lkey_table_size);
104 spin_lock_irqsave(&rkt->lock, flags);
105 rkt->table[r] = NULL;
106 spin_unlock_irqrestore(&rkt->lock, flags);
107}
108
109/**
110 * ipath_lkey_ok - check IB SGE for validity and initialize
111 * @rkt: table containing lkey to check SGE against
112 * @isge: outgoing internal SGE
113 * @sge: SGE to check
114 * @acc: access flags
115 *
116 * Return 1 if valid and successful, otherwise returns 0.
117 *
118 * Check the IB SGE for validity and initialize our internal version
119 * of it.
120 */
121int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
122 struct ib_sge *sge, int acc)
123{
124 struct ipath_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
125 struct ipath_mregion *mr;
126 unsigned n, m;
127 size_t off;
128 int ret;
129
130 /*
131 * We use LKEY == zero for kernel virtual addresses
132 * (see ipath_get_dma_mr and ipath_dma.c).
133 */
134 if (sge->lkey == 0) {
135 /* always a kernel port, no locking needed */
136 struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
137
138 if (pd->user) {
139 ret = 0;
140 goto bail;
141 }
142 isge->mr = NULL;
143 isge->vaddr = (void *) sge->addr;
144 isge->length = sge->length;
145 isge->sge_length = sge->length;
146 ret = 1;
147 goto bail;
148 }
149 mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))];
150 if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
151 qp->ibqp.pd != mr->pd)) {
152 ret = 0;
153 goto bail;
154 }
155
156 off = sge->addr - mr->user_base;
157 if (unlikely(sge->addr < mr->user_base ||
158 off + sge->length > mr->length ||
159 (mr->access_flags & acc) != acc)) {
160 ret = 0;
161 goto bail;
162 }
163
164 off += mr->offset;
165 m = 0;
166 n = 0;
167 while (off >= mr->map[m]->segs[n].length) {
168 off -= mr->map[m]->segs[n].length;
169 n++;
170 if (n >= IPATH_SEGSZ) {
171 m++;
172 n = 0;
173 }
174 }
175 isge->mr = mr;
176 isge->vaddr = mr->map[m]->segs[n].vaddr + off;
177 isge->length = mr->map[m]->segs[n].length - off;
178 isge->sge_length = sge->length;
179 isge->m = m;
180 isge->n = n;
181
182 ret = 1;
183
184bail:
185 return ret;
186}
187
188/**
189 * ipath_rkey_ok - check the IB virtual address, length, and RKEY
190 * @dev: infiniband device
191 * @ss: SGE state
192 * @len: length of data
193 * @vaddr: virtual address to place data
194 * @rkey: rkey to check
195 * @acc: access flags
196 *
197 * Return 1 if successful, otherwise 0.
198 */
199int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
200 u32 len, u64 vaddr, u32 rkey, int acc)
201{
202 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
203 struct ipath_lkey_table *rkt = &dev->lk_table;
204 struct ipath_sge *sge = &ss->sge;
205 struct ipath_mregion *mr;
206 unsigned n, m;
207 size_t off;
208 int ret;
209
210 /*
211 * We use RKEY == zero for kernel virtual addresses
212 * (see ipath_get_dma_mr and ipath_dma.c).
213 */
214 if (rkey == 0) {
215 /* always a kernel port, no locking needed */
216 struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
217
218 if (pd->user) {
219 ret = 0;
220 goto bail;
221 }
222 sge->mr = NULL;
223 sge->vaddr = (void *) vaddr;
224 sge->length = len;
225 sge->sge_length = len;
226 ss->sg_list = NULL;
227 ss->num_sge = 1;
228 ret = 1;
229 goto bail;
230 }
231
232 mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))];
233 if (unlikely(mr == NULL || mr->lkey != rkey ||
234 qp->ibqp.pd != mr->pd)) {
235 ret = 0;
236 goto bail;
237 }
238
239 off = vaddr - mr->iova;
240 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
241 (mr->access_flags & acc) == 0)) {
242 ret = 0;
243 goto bail;
244 }
245
246 off += mr->offset;
247 m = 0;
248 n = 0;
249 while (off >= mr->map[m]->segs[n].length) {
250 off -= mr->map[m]->segs[n].length;
251 n++;
252 if (n >= IPATH_SEGSZ) {
253 m++;
254 n = 0;
255 }
256 }
257 sge->mr = mr;
258 sge->vaddr = mr->map[m]->segs[n].vaddr + off;
259 sge->length = mr->map[m]->segs[n].length - off;
260 sge->sge_length = len;
261 sge->m = m;
262 sge->n = n;
263 ss->sg_list = NULL;
264 ss->num_sge = 1;
265
266 ret = 1;
267
268bail:
269 return ret;
270}
diff --git a/drivers/staging/rdma/ipath/ipath_mad.c b/drivers/staging/rdma/ipath/ipath_mad.c
deleted file mode 100644
index ad3a926ab3c5..000000000000
--- a/drivers/staging/rdma/ipath/ipath_mad.c
+++ /dev/null
@@ -1,1521 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <rdma/ib_smi.h>
35#include <rdma/ib_pma.h>
36
37#include "ipath_kernel.h"
38#include "ipath_verbs.h"
39#include "ipath_common.h"
40
41#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
42#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
43#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
44#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
45
46static int reply(struct ib_smp *smp)
47{
48 /*
49 * The verbs framework will handle the directed/LID route
50 * packet changes.
51 */
52 smp->method = IB_MGMT_METHOD_GET_RESP;
53 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
54 smp->status |= IB_SMP_DIRECTION;
55 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
56}
57
58static int recv_subn_get_nodedescription(struct ib_smp *smp,
59 struct ib_device *ibdev)
60{
61 if (smp->attr_mod)
62 smp->status |= IB_SMP_INVALID_FIELD;
63
64 memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
65
66 return reply(smp);
67}
68
69struct nodeinfo {
70 u8 base_version;
71 u8 class_version;
72 u8 node_type;
73 u8 num_ports;
74 __be64 sys_guid;
75 __be64 node_guid;
76 __be64 port_guid;
77 __be16 partition_cap;
78 __be16 device_id;
79 __be32 revision;
80 u8 local_port_num;
81 u8 vendor_id[3];
82} __attribute__ ((packed));
83
84static int recv_subn_get_nodeinfo(struct ib_smp *smp,
85 struct ib_device *ibdev, u8 port)
86{
87 struct nodeinfo *nip = (struct nodeinfo *)&smp->data;
88 struct ipath_devdata *dd = to_idev(ibdev)->dd;
89 u32 vendor, majrev, minrev;
90
91 /* GUID 0 is illegal */
92 if (smp->attr_mod || (dd->ipath_guid == 0))
93 smp->status |= IB_SMP_INVALID_FIELD;
94
95 nip->base_version = 1;
96 nip->class_version = 1;
97 nip->node_type = 1; /* channel adapter */
98 /*
99 * XXX The num_ports value will need a layer function to get
100 * the value if we ever have more than one IB port on a chip.
101 * We will also need to get the GUID for the port.
102 */
103 nip->num_ports = ibdev->phys_port_cnt;
104 /* This is already in network order */
105 nip->sys_guid = to_idev(ibdev)->sys_image_guid;
106 nip->node_guid = dd->ipath_guid;
107 nip->port_guid = dd->ipath_guid;
108 nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd));
109 nip->device_id = cpu_to_be16(dd->ipath_deviceid);
110 majrev = dd->ipath_majrev;
111 minrev = dd->ipath_minrev;
112 nip->revision = cpu_to_be32((majrev << 16) | minrev);
113 nip->local_port_num = port;
114 vendor = dd->ipath_vendorid;
115 nip->vendor_id[0] = IPATH_SRC_OUI_1;
116 nip->vendor_id[1] = IPATH_SRC_OUI_2;
117 nip->vendor_id[2] = IPATH_SRC_OUI_3;
118
119 return reply(smp);
120}
121
122static int recv_subn_get_guidinfo(struct ib_smp *smp,
123 struct ib_device *ibdev)
124{
125 u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
126 __be64 *p = (__be64 *) smp->data;
127
128 /* 32 blocks of 8 64-bit GUIDs per block */
129
130 memset(smp->data, 0, sizeof(smp->data));
131
132 /*
133 * We only support one GUID for now. If this changes, the
134 * portinfo.guid_cap field needs to be updated too.
135 */
136 if (startgx == 0) {
137 __be64 g = to_idev(ibdev)->dd->ipath_guid;
138 if (g == 0)
139 /* GUID 0 is illegal */
140 smp->status |= IB_SMP_INVALID_FIELD;
141 else
142 /* The first is a copy of the read-only HW GUID. */
143 *p = g;
144 } else
145 smp->status |= IB_SMP_INVALID_FIELD;
146
147 return reply(smp);
148}
149
150static void set_link_width_enabled(struct ipath_devdata *dd, u32 w)
151{
152 (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, w);
153}
154
155static void set_link_speed_enabled(struct ipath_devdata *dd, u32 s)
156{
157 (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, s);
158}
159
160static int get_overrunthreshold(struct ipath_devdata *dd)
161{
162 return (dd->ipath_ibcctrl >>
163 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
164 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
165}
166
167/**
168 * set_overrunthreshold - set the overrun threshold
169 * @dd: the infinipath device
170 * @n: the new threshold
171 *
172 * Note that this will only take effect when the link state changes.
173 */
174static int set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
175{
176 unsigned v;
177
178 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
179 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
180 if (v != n) {
181 dd->ipath_ibcctrl &=
182 ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
183 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
184 dd->ipath_ibcctrl |=
185 (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
186 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
187 dd->ipath_ibcctrl);
188 }
189 return 0;
190}
191
192static int get_phyerrthreshold(struct ipath_devdata *dd)
193{
194 return (dd->ipath_ibcctrl >>
195 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
196 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
197}
198
199/**
200 * set_phyerrthreshold - set the physical error threshold
201 * @dd: the infinipath device
202 * @n: the new threshold
203 *
204 * Note that this will only take effect when the link state changes.
205 */
206static int set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
207{
208 unsigned v;
209
210 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
211 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
212 if (v != n) {
213 dd->ipath_ibcctrl &=
214 ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
215 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
216 dd->ipath_ibcctrl |=
217 (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
218 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
219 dd->ipath_ibcctrl);
220 }
221 return 0;
222}
223
224/**
225 * get_linkdowndefaultstate - get the default linkdown state
226 * @dd: the infinipath device
227 *
228 * Returns zero if the default is POLL, 1 if the default is SLEEP.
229 */
230static int get_linkdowndefaultstate(struct ipath_devdata *dd)
231{
232 return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
233}
234
235static int recv_subn_get_portinfo(struct ib_smp *smp,
236 struct ib_device *ibdev, u8 port)
237{
238 struct ipath_ibdev *dev;
239 struct ipath_devdata *dd;
240 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
241 u16 lid;
242 u8 ibcstat;
243 u8 mtu;
244 int ret;
245
246 if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) {
247 smp->status |= IB_SMP_INVALID_FIELD;
248 ret = reply(smp);
249 goto bail;
250 }
251
252 dev = to_idev(ibdev);
253 dd = dev->dd;
254
255 /* Clear all fields. Only set the non-zero fields. */
256 memset(smp->data, 0, sizeof(smp->data));
257
258 /* Only return the mkey if the protection field allows it. */
259 if (smp->method == IB_MGMT_METHOD_SET || dev->mkey == smp->mkey ||
260 dev->mkeyprot == 0)
261 pip->mkey = dev->mkey;
262 pip->gid_prefix = dev->gid_prefix;
263 lid = dd->ipath_lid;
264 pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
265 pip->sm_lid = cpu_to_be16(dev->sm_lid);
266 pip->cap_mask = cpu_to_be32(dev->port_cap_flags);
267 /* pip->diag_code; */
268 pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period);
269 pip->local_port_num = port;
270 pip->link_width_enabled = dd->ipath_link_width_enabled;
271 pip->link_width_supported = dd->ipath_link_width_supported;
272 pip->link_width_active = dd->ipath_link_width_active;
273 pip->linkspeed_portstate = dd->ipath_link_speed_supported << 4;
274 ibcstat = dd->ipath_lastibcstat;
275 /* map LinkState to IB portinfo values. */
276 pip->linkspeed_portstate |= ipath_ib_linkstate(dd, ibcstat) + 1;
277
278 pip->portphysstate_linkdown =
279 (ipath_cvt_physportstate[ibcstat & dd->ibcs_lts_mask] << 4) |
280 (get_linkdowndefaultstate(dd) ? 1 : 2);
281 pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dd->ipath_lmc;
282 pip->linkspeedactive_enabled = (dd->ipath_link_speed_active << 4) |
283 dd->ipath_link_speed_enabled;
284 switch (dd->ipath_ibmtu) {
285 case 4096:
286 mtu = IB_MTU_4096;
287 break;
288 case 2048:
289 mtu = IB_MTU_2048;
290 break;
291 case 1024:
292 mtu = IB_MTU_1024;
293 break;
294 case 512:
295 mtu = IB_MTU_512;
296 break;
297 case 256:
298 mtu = IB_MTU_256;
299 break;
300 default: /* oops, something is wrong */
301 mtu = IB_MTU_2048;
302 break;
303 }
304 pip->neighbormtu_mastersmsl = (mtu << 4) | dev->sm_sl;
305 pip->vlcap_inittype = 0x10; /* VLCap = VL0, InitType = 0 */
306 pip->vl_high_limit = dev->vl_high_limit;
307 /* pip->vl_arb_high_cap; // only one VL */
308 /* pip->vl_arb_low_cap; // only one VL */
309 /* InitTypeReply = 0 */
310 /* our mtu cap depends on whether 4K MTU enabled or not */
311 pip->inittypereply_mtucap = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
312 /* HCAs ignore VLStallCount and HOQLife */
313 /* pip->vlstallcnt_hoqlife; */
314 pip->operationalvl_pei_peo_fpi_fpo = 0x10; /* OVLs = 1 */
315 pip->mkey_violations = cpu_to_be16(dev->mkey_violations);
316 /* P_KeyViolations are counted by hardware. */
317 pip->pkey_violations =
318 cpu_to_be16((ipath_get_cr_errpkey(dd) -
319 dev->z_pkey_violations) & 0xFFFF);
320 pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
321 /* Only the hardware GUID is supported for now */
322 pip->guid_cap = 1;
323 pip->clientrereg_resv_subnetto = dev->subnet_timeout;
324 /* 32.768 usec. response time (guessing) */
325 pip->resv_resptimevalue = 3;
326 pip->localphyerrors_overrunerrors =
327 (get_phyerrthreshold(dd) << 4) |
328 get_overrunthreshold(dd);
329 /* pip->max_credit_hint; */
330 if (dev->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
331 u32 v;
332
333 v = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LINKLATENCY);
334 pip->link_roundtrip_latency[0] = v >> 16;
335 pip->link_roundtrip_latency[1] = v >> 8;
336 pip->link_roundtrip_latency[2] = v;
337 }
338
339 ret = reply(smp);
340
341bail:
342 return ret;
343}
344
345/**
346 * get_pkeys - return the PKEY table for port 0
347 * @dd: the infinipath device
348 * @pkeys: the pkey table is placed here
349 */
350static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
351{
352 /* always a kernel port, no locking needed */
353 struct ipath_portdata *pd = dd->ipath_pd[0];
354
355 memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
356
357 return 0;
358}
359
360static int recv_subn_get_pkeytable(struct ib_smp *smp,
361 struct ib_device *ibdev)
362{
363 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
364 u16 *p = (u16 *) smp->data;
365 __be16 *q = (__be16 *) smp->data;
366
367 /* 64 blocks of 32 16-bit P_Key entries */
368
369 memset(smp->data, 0, sizeof(smp->data));
370 if (startpx == 0) {
371 struct ipath_ibdev *dev = to_idev(ibdev);
372 unsigned i, n = ipath_get_npkeys(dev->dd);
373
374 get_pkeys(dev->dd, p);
375
376 for (i = 0; i < n; i++)
377 q[i] = cpu_to_be16(p[i]);
378 } else
379 smp->status |= IB_SMP_INVALID_FIELD;
380
381 return reply(smp);
382}
383
384static int recv_subn_set_guidinfo(struct ib_smp *smp,
385 struct ib_device *ibdev)
386{
387 /* The only GUID we support is the first read-only entry. */
388 return recv_subn_get_guidinfo(smp, ibdev);
389}
390
391/**
392 * set_linkdowndefaultstate - set the default linkdown state
393 * @dd: the infinipath device
394 * @sleep: the new state
395 *
396 * Note that this will only take effect when the link state changes.
397 */
398static int set_linkdowndefaultstate(struct ipath_devdata *dd, int sleep)
399{
400 if (sleep)
401 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
402 else
403 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
404 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
405 dd->ipath_ibcctrl);
406 return 0;
407}
408
409/**
410 * recv_subn_set_portinfo - set port information
411 * @smp: the incoming SM packet
412 * @ibdev: the infiniband device
413 * @port: the port on the device
414 *
415 * Set Portinfo (see ch. 14.2.5.6).
416 */
417static int recv_subn_set_portinfo(struct ib_smp *smp,
418 struct ib_device *ibdev, u8 port)
419{
420 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
421 struct ib_event event;
422 struct ipath_ibdev *dev;
423 struct ipath_devdata *dd;
424 char clientrereg = 0;
425 u16 lid, smlid;
426 u8 lwe;
427 u8 lse;
428 u8 state;
429 u16 lstate;
430 u32 mtu;
431 int ret, ore;
432
433 if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt)
434 goto err;
435
436 dev = to_idev(ibdev);
437 dd = dev->dd;
438 event.device = ibdev;
439 event.element.port_num = port;
440
441 dev->mkey = pip->mkey;
442 dev->gid_prefix = pip->gid_prefix;
443 dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
444
445 lid = be16_to_cpu(pip->lid);
446 if (dd->ipath_lid != lid ||
447 dd->ipath_lmc != (pip->mkeyprot_resv_lmc & 7)) {
448 /* Must be a valid unicast LID address. */
449 if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE)
450 goto err;
451 ipath_set_lid(dd, lid, pip->mkeyprot_resv_lmc & 7);
452 event.event = IB_EVENT_LID_CHANGE;
453 ib_dispatch_event(&event);
454 }
455
456 smlid = be16_to_cpu(pip->sm_lid);
457 if (smlid != dev->sm_lid) {
458 /* Must be a valid unicast LID address. */
459 if (smlid == 0 || smlid >= IPATH_MULTICAST_LID_BASE)
460 goto err;
461 dev->sm_lid = smlid;
462 event.event = IB_EVENT_SM_CHANGE;
463 ib_dispatch_event(&event);
464 }
465
466 /* Allow 1x or 4x to be set (see 14.2.6.6). */
467 lwe = pip->link_width_enabled;
468 if (lwe) {
469 if (lwe == 0xFF)
470 lwe = dd->ipath_link_width_supported;
471 else if (lwe >= 16 || (lwe & ~dd->ipath_link_width_supported))
472 goto err;
473 set_link_width_enabled(dd, lwe);
474 }
475
476 /* Allow 2.5 or 5.0 Gbs. */
477 lse = pip->linkspeedactive_enabled & 0xF;
478 if (lse) {
479 if (lse == 15)
480 lse = dd->ipath_link_speed_supported;
481 else if (lse >= 8 || (lse & ~dd->ipath_link_speed_supported))
482 goto err;
483 set_link_speed_enabled(dd, lse);
484 }
485
486 /* Set link down default state. */
487 switch (pip->portphysstate_linkdown & 0xF) {
488 case 0: /* NOP */
489 break;
490 case 1: /* SLEEP */
491 if (set_linkdowndefaultstate(dd, 1))
492 goto err;
493 break;
494 case 2: /* POLL */
495 if (set_linkdowndefaultstate(dd, 0))
496 goto err;
497 break;
498 default:
499 goto err;
500 }
501
502 dev->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
503 dev->vl_high_limit = pip->vl_high_limit;
504
505 switch ((pip->neighbormtu_mastersmsl >> 4) & 0xF) {
506 case IB_MTU_256:
507 mtu = 256;
508 break;
509 case IB_MTU_512:
510 mtu = 512;
511 break;
512 case IB_MTU_1024:
513 mtu = 1024;
514 break;
515 case IB_MTU_2048:
516 mtu = 2048;
517 break;
518 case IB_MTU_4096:
519 if (!ipath_mtu4096)
520 goto err;
521 mtu = 4096;
522 break;
523 default:
524 /* XXX We have already partially updated our state! */
525 goto err;
526 }
527 ipath_set_mtu(dd, mtu);
528
529 dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF;
530
531 /* We only support VL0 */
532 if (((pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF) > 1)
533 goto err;
534
535 if (pip->mkey_violations == 0)
536 dev->mkey_violations = 0;
537
538 /*
539 * Hardware counter can't be reset so snapshot and subtract
540 * later.
541 */
542 if (pip->pkey_violations == 0)
543 dev->z_pkey_violations = ipath_get_cr_errpkey(dd);
544
545 if (pip->qkey_violations == 0)
546 dev->qkey_violations = 0;
547
548 ore = pip->localphyerrors_overrunerrors;
549 if (set_phyerrthreshold(dd, (ore >> 4) & 0xF))
550 goto err;
551
552 if (set_overrunthreshold(dd, (ore & 0xF)))
553 goto err;
554
555 dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
556
557 if (pip->clientrereg_resv_subnetto & 0x80) {
558 clientrereg = 1;
559 event.event = IB_EVENT_CLIENT_REREGISTER;
560 ib_dispatch_event(&event);
561 }
562
563 /*
564 * Do the port state change now that the other link parameters
565 * have been set.
566 * Changing the port physical state only makes sense if the link
567 * is down or is being set to down.
568 */
569 state = pip->linkspeed_portstate & 0xF;
570 lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
571 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
572 goto err;
573
574 /*
575 * Only state changes of DOWN, ARM, and ACTIVE are valid
576 * and must be in the correct state to take effect (see 7.2.6).
577 */
578 switch (state) {
579 case IB_PORT_NOP:
580 if (lstate == 0)
581 break;
582 /* FALLTHROUGH */
583 case IB_PORT_DOWN:
584 if (lstate == 0)
585 lstate = IPATH_IB_LINKDOWN_ONLY;
586 else if (lstate == 1)
587 lstate = IPATH_IB_LINKDOWN_SLEEP;
588 else if (lstate == 2)
589 lstate = IPATH_IB_LINKDOWN;
590 else if (lstate == 3)
591 lstate = IPATH_IB_LINKDOWN_DISABLE;
592 else
593 goto err;
594 ipath_set_linkstate(dd, lstate);
595 if (lstate == IPATH_IB_LINKDOWN_DISABLE) {
596 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
597 goto done;
598 }
599 ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED |
600 IPATH_LINKACTIVE, 1000);
601 break;
602 case IB_PORT_ARMED:
603 ipath_set_linkstate(dd, IPATH_IB_LINKARM);
604 break;
605 case IB_PORT_ACTIVE:
606 ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE);
607 break;
608 default:
609 /* XXX We have already partially updated our state! */
610 goto err;
611 }
612
613 ret = recv_subn_get_portinfo(smp, ibdev, port);
614
615 if (clientrereg)
616 pip->clientrereg_resv_subnetto |= 0x80;
617
618 goto done;
619
620err:
621 smp->status |= IB_SMP_INVALID_FIELD;
622 ret = recv_subn_get_portinfo(smp, ibdev, port);
623
624done:
625 return ret;
626}
627
628/**
629 * rm_pkey - decrecment the reference count for the given PKEY
630 * @dd: the infinipath device
631 * @key: the PKEY index
632 *
633 * Return true if this was the last reference and the hardware table entry
634 * needs to be changed.
635 */
636static int rm_pkey(struct ipath_devdata *dd, u16 key)
637{
638 int i;
639 int ret;
640
641 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
642 if (dd->ipath_pkeys[i] != key)
643 continue;
644 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
645 dd->ipath_pkeys[i] = 0;
646 ret = 1;
647 goto bail;
648 }
649 break;
650 }
651
652 ret = 0;
653
654bail:
655 return ret;
656}
657
658/**
659 * add_pkey - add the given PKEY to the hardware table
660 * @dd: the infinipath device
661 * @key: the PKEY
662 *
663 * Return an error code if unable to add the entry, zero if no change,
664 * or 1 if the hardware PKEY register needs to be updated.
665 */
666static int add_pkey(struct ipath_devdata *dd, u16 key)
667{
668 int i;
669 u16 lkey = key & 0x7FFF;
670 int any = 0;
671 int ret;
672
673 if (lkey == 0x7FFF) {
674 ret = 0;
675 goto bail;
676 }
677
678 /* Look for an empty slot or a matching PKEY. */
679 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
680 if (!dd->ipath_pkeys[i]) {
681 any++;
682 continue;
683 }
684 /* If it matches exactly, try to increment the ref count */
685 if (dd->ipath_pkeys[i] == key) {
686 if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
687 ret = 0;
688 goto bail;
689 }
690 /* Lost the race. Look for an empty slot below. */
691 atomic_dec(&dd->ipath_pkeyrefs[i]);
692 any++;
693 }
694 /*
695 * It makes no sense to have both the limited and unlimited
696 * PKEY set at the same time since the unlimited one will
697 * disable the limited one.
698 */
699 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
700 ret = -EEXIST;
701 goto bail;
702 }
703 }
704 if (!any) {
705 ret = -EBUSY;
706 goto bail;
707 }
708 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
709 if (!dd->ipath_pkeys[i] &&
710 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
711 /* for ipathstats, etc. */
712 ipath_stats.sps_pkeys[i] = lkey;
713 dd->ipath_pkeys[i] = key;
714 ret = 1;
715 goto bail;
716 }
717 }
718 ret = -EBUSY;
719
720bail:
721 return ret;
722}
723
724/**
725 * set_pkeys - set the PKEY table for port 0
726 * @dd: the infinipath device
727 * @pkeys: the PKEY table
728 */
729static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys, u8 port)
730{
731 struct ipath_portdata *pd;
732 int i;
733 int changed = 0;
734
735 /* always a kernel port, no locking needed */
736 pd = dd->ipath_pd[0];
737
738 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
739 u16 key = pkeys[i];
740 u16 okey = pd->port_pkeys[i];
741
742 if (key == okey)
743 continue;
744 /*
745 * The value of this PKEY table entry is changing.
746 * Remove the old entry in the hardware's array of PKEYs.
747 */
748 if (okey & 0x7FFF)
749 changed |= rm_pkey(dd, okey);
750 if (key & 0x7FFF) {
751 int ret = add_pkey(dd, key);
752
753 if (ret < 0)
754 key = 0;
755 else
756 changed |= ret;
757 }
758 pd->port_pkeys[i] = key;
759 }
760 if (changed) {
761 u64 pkey;
762 struct ib_event event;
763
764 pkey = (u64) dd->ipath_pkeys[0] |
765 ((u64) dd->ipath_pkeys[1] << 16) |
766 ((u64) dd->ipath_pkeys[2] << 32) |
767 ((u64) dd->ipath_pkeys[3] << 48);
768 ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
769 (unsigned long long) pkey);
770 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
771 pkey);
772
773 event.event = IB_EVENT_PKEY_CHANGE;
774 event.device = &dd->verbs_dev->ibdev;
775 event.element.port_num = port;
776 ib_dispatch_event(&event);
777 }
778 return 0;
779}
780
781static int recv_subn_set_pkeytable(struct ib_smp *smp,
782 struct ib_device *ibdev, u8 port)
783{
784 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
785 __be16 *p = (__be16 *) smp->data;
786 u16 *q = (u16 *) smp->data;
787 struct ipath_ibdev *dev = to_idev(ibdev);
788 unsigned i, n = ipath_get_npkeys(dev->dd);
789
790 for (i = 0; i < n; i++)
791 q[i] = be16_to_cpu(p[i]);
792
793 if (startpx != 0 || set_pkeys(dev->dd, q, port) != 0)
794 smp->status |= IB_SMP_INVALID_FIELD;
795
796 return recv_subn_get_pkeytable(smp, ibdev);
797}
798
799static int recv_pma_get_classportinfo(struct ib_pma_mad *pmp)
800{
801 struct ib_class_port_info *p =
802 (struct ib_class_port_info *)pmp->data;
803
804 memset(pmp->data, 0, sizeof(pmp->data));
805
806 if (pmp->mad_hdr.attr_mod != 0)
807 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
808
809 /* Indicate AllPortSelect is valid (only one port anyway) */
810 p->capability_mask = cpu_to_be16(1 << 8);
811 p->base_version = 1;
812 p->class_version = 1;
813 /*
814 * Expected response time is 4.096 usec. * 2^18 == 1.073741824
815 * sec.
816 */
817 p->resp_time_value = 18;
818
819 return reply((struct ib_smp *) pmp);
820}
821
822/*
823 * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
824 * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
825 * We support 5 counters which only count the mandatory quantities.
826 */
827#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
828#define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \
829 COUNTER_MASK(1, 1) | \
830 COUNTER_MASK(1, 2) | \
831 COUNTER_MASK(1, 3) | \
832 COUNTER_MASK(1, 4))
833
834static int recv_pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
835 struct ib_device *ibdev, u8 port)
836{
837 struct ib_pma_portsamplescontrol *p =
838 (struct ib_pma_portsamplescontrol *)pmp->data;
839 struct ipath_ibdev *dev = to_idev(ibdev);
840 struct ipath_cregs const *crp = dev->dd->ipath_cregs;
841 unsigned long flags;
842 u8 port_select = p->port_select;
843
844 memset(pmp->data, 0, sizeof(pmp->data));
845
846 p->port_select = port_select;
847 if (pmp->mad_hdr.attr_mod != 0 ||
848 (port_select != port && port_select != 0xFF))
849 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
850 /*
851 * Ticks are 10x the link transfer period which for 2.5Gbs is 4
852 * nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample
853 * intervals are counted in ticks. Since we use Linux timers, that
854 * count in jiffies, we can't sample for less than 1000 ticks if HZ
855 * == 1000 (4000 ticks if HZ is 250). link_speed_active returns 2 for
856 * DDR, 1 for SDR, set the tick to 1 for DDR, 0 for SDR on chips that
857 * have hardware support for delaying packets.
858 */
859 if (crp->cr_psstat)
860 p->tick = dev->dd->ipath_link_speed_active - 1;
861 else
862 p->tick = 250; /* 1 usec. */
863 p->counter_width = 4; /* 32 bit counters */
864 p->counter_mask0_9 = COUNTER_MASK0_9;
865 spin_lock_irqsave(&dev->pending_lock, flags);
866 if (crp->cr_psstat)
867 p->sample_status = ipath_read_creg32(dev->dd, crp->cr_psstat);
868 else
869 p->sample_status = dev->pma_sample_status;
870 p->sample_start = cpu_to_be32(dev->pma_sample_start);
871 p->sample_interval = cpu_to_be32(dev->pma_sample_interval);
872 p->tag = cpu_to_be16(dev->pma_tag);
873 p->counter_select[0] = dev->pma_counter_select[0];
874 p->counter_select[1] = dev->pma_counter_select[1];
875 p->counter_select[2] = dev->pma_counter_select[2];
876 p->counter_select[3] = dev->pma_counter_select[3];
877 p->counter_select[4] = dev->pma_counter_select[4];
878 spin_unlock_irqrestore(&dev->pending_lock, flags);
879
880 return reply((struct ib_smp *) pmp);
881}
882
883static int recv_pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
884 struct ib_device *ibdev, u8 port)
885{
886 struct ib_pma_portsamplescontrol *p =
887 (struct ib_pma_portsamplescontrol *)pmp->data;
888 struct ipath_ibdev *dev = to_idev(ibdev);
889 struct ipath_cregs const *crp = dev->dd->ipath_cregs;
890 unsigned long flags;
891 u8 status;
892 int ret;
893
894 if (pmp->mad_hdr.attr_mod != 0 ||
895 (p->port_select != port && p->port_select != 0xFF)) {
896 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
897 ret = reply((struct ib_smp *) pmp);
898 goto bail;
899 }
900
901 spin_lock_irqsave(&dev->pending_lock, flags);
902 if (crp->cr_psstat)
903 status = ipath_read_creg32(dev->dd, crp->cr_psstat);
904 else
905 status = dev->pma_sample_status;
906 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
907 dev->pma_sample_start = be32_to_cpu(p->sample_start);
908 dev->pma_sample_interval = be32_to_cpu(p->sample_interval);
909 dev->pma_tag = be16_to_cpu(p->tag);
910 dev->pma_counter_select[0] = p->counter_select[0];
911 dev->pma_counter_select[1] = p->counter_select[1];
912 dev->pma_counter_select[2] = p->counter_select[2];
913 dev->pma_counter_select[3] = p->counter_select[3];
914 dev->pma_counter_select[4] = p->counter_select[4];
915 if (crp->cr_psstat) {
916 ipath_write_creg(dev->dd, crp->cr_psinterval,
917 dev->pma_sample_interval);
918 ipath_write_creg(dev->dd, crp->cr_psstart,
919 dev->pma_sample_start);
920 } else
921 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
922 }
923 spin_unlock_irqrestore(&dev->pending_lock, flags);
924
925 ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port);
926
927bail:
928 return ret;
929}
930
931static u64 get_counter(struct ipath_ibdev *dev,
932 struct ipath_cregs const *crp,
933 __be16 sel)
934{
935 u64 ret;
936
937 switch (sel) {
938 case IB_PMA_PORT_XMIT_DATA:
939 ret = (crp->cr_psxmitdatacount) ?
940 ipath_read_creg32(dev->dd, crp->cr_psxmitdatacount) :
941 dev->ipath_sword;
942 break;
943 case IB_PMA_PORT_RCV_DATA:
944 ret = (crp->cr_psrcvdatacount) ?
945 ipath_read_creg32(dev->dd, crp->cr_psrcvdatacount) :
946 dev->ipath_rword;
947 break;
948 case IB_PMA_PORT_XMIT_PKTS:
949 ret = (crp->cr_psxmitpktscount) ?
950 ipath_read_creg32(dev->dd, crp->cr_psxmitpktscount) :
951 dev->ipath_spkts;
952 break;
953 case IB_PMA_PORT_RCV_PKTS:
954 ret = (crp->cr_psrcvpktscount) ?
955 ipath_read_creg32(dev->dd, crp->cr_psrcvpktscount) :
956 dev->ipath_rpkts;
957 break;
958 case IB_PMA_PORT_XMIT_WAIT:
959 ret = (crp->cr_psxmitwaitcount) ?
960 ipath_read_creg32(dev->dd, crp->cr_psxmitwaitcount) :
961 dev->ipath_xmit_wait;
962 break;
963 default:
964 ret = 0;
965 }
966
967 return ret;
968}
969
970static int recv_pma_get_portsamplesresult(struct ib_pma_mad *pmp,
971 struct ib_device *ibdev)
972{
973 struct ib_pma_portsamplesresult *p =
974 (struct ib_pma_portsamplesresult *)pmp->data;
975 struct ipath_ibdev *dev = to_idev(ibdev);
976 struct ipath_cregs const *crp = dev->dd->ipath_cregs;
977 u8 status;
978 int i;
979
980 memset(pmp->data, 0, sizeof(pmp->data));
981 p->tag = cpu_to_be16(dev->pma_tag);
982 if (crp->cr_psstat)
983 status = ipath_read_creg32(dev->dd, crp->cr_psstat);
984 else
985 status = dev->pma_sample_status;
986 p->sample_status = cpu_to_be16(status);
987 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
988 p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
989 cpu_to_be32(
990 get_counter(dev, crp, dev->pma_counter_select[i]));
991
992 return reply((struct ib_smp *) pmp);
993}
994
995static int recv_pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
996 struct ib_device *ibdev)
997{
998 struct ib_pma_portsamplesresult_ext *p =
999 (struct ib_pma_portsamplesresult_ext *)pmp->data;
1000 struct ipath_ibdev *dev = to_idev(ibdev);
1001 struct ipath_cregs const *crp = dev->dd->ipath_cregs;
1002 u8 status;
1003 int i;
1004
1005 memset(pmp->data, 0, sizeof(pmp->data));
1006 p->tag = cpu_to_be16(dev->pma_tag);
1007 if (crp->cr_psstat)
1008 status = ipath_read_creg32(dev->dd, crp->cr_psstat);
1009 else
1010 status = dev->pma_sample_status;
1011 p->sample_status = cpu_to_be16(status);
1012 /* 64 bits */
1013 p->extended_width = cpu_to_be32(0x80000000);
1014 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
1015 p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
1016 cpu_to_be64(
1017 get_counter(dev, crp, dev->pma_counter_select[i]));
1018
1019 return reply((struct ib_smp *) pmp);
1020}
1021
1022static int recv_pma_get_portcounters(struct ib_pma_mad *pmp,
1023 struct ib_device *ibdev, u8 port)
1024{
1025 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1026 pmp->data;
1027 struct ipath_ibdev *dev = to_idev(ibdev);
1028 struct ipath_verbs_counters cntrs;
1029 u8 port_select = p->port_select;
1030
1031 ipath_get_counters(dev->dd, &cntrs);
1032
1033 /* Adjust counters for any resets done. */
1034 cntrs.symbol_error_counter -= dev->z_symbol_error_counter;
1035 cntrs.link_error_recovery_counter -=
1036 dev->z_link_error_recovery_counter;
1037 cntrs.link_downed_counter -= dev->z_link_downed_counter;
1038 cntrs.port_rcv_errors += dev->rcv_errors;
1039 cntrs.port_rcv_errors -= dev->z_port_rcv_errors;
1040 cntrs.port_rcv_remphys_errors -= dev->z_port_rcv_remphys_errors;
1041 cntrs.port_xmit_discards -= dev->z_port_xmit_discards;
1042 cntrs.port_xmit_data -= dev->z_port_xmit_data;
1043 cntrs.port_rcv_data -= dev->z_port_rcv_data;
1044 cntrs.port_xmit_packets -= dev->z_port_xmit_packets;
1045 cntrs.port_rcv_packets -= dev->z_port_rcv_packets;
1046 cntrs.local_link_integrity_errors -=
1047 dev->z_local_link_integrity_errors;
1048 cntrs.excessive_buffer_overrun_errors -=
1049 dev->z_excessive_buffer_overrun_errors;
1050 cntrs.vl15_dropped -= dev->z_vl15_dropped;
1051 cntrs.vl15_dropped += dev->n_vl15_dropped;
1052
1053 memset(pmp->data, 0, sizeof(pmp->data));
1054
1055 p->port_select = port_select;
1056 if (pmp->mad_hdr.attr_mod != 0 ||
1057 (port_select != port && port_select != 0xFF))
1058 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1059
1060 if (cntrs.symbol_error_counter > 0xFFFFUL)
1061 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1062 else
1063 p->symbol_error_counter =
1064 cpu_to_be16((u16)cntrs.symbol_error_counter);
1065 if (cntrs.link_error_recovery_counter > 0xFFUL)
1066 p->link_error_recovery_counter = 0xFF;
1067 else
1068 p->link_error_recovery_counter =
1069 (u8)cntrs.link_error_recovery_counter;
1070 if (cntrs.link_downed_counter > 0xFFUL)
1071 p->link_downed_counter = 0xFF;
1072 else
1073 p->link_downed_counter = (u8)cntrs.link_downed_counter;
1074 if (cntrs.port_rcv_errors > 0xFFFFUL)
1075 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1076 else
1077 p->port_rcv_errors =
1078 cpu_to_be16((u16) cntrs.port_rcv_errors);
1079 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1080 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1081 else
1082 p->port_rcv_remphys_errors =
1083 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
1084 if (cntrs.port_xmit_discards > 0xFFFFUL)
1085 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1086 else
1087 p->port_xmit_discards =
1088 cpu_to_be16((u16)cntrs.port_xmit_discards);
1089 if (cntrs.local_link_integrity_errors > 0xFUL)
1090 cntrs.local_link_integrity_errors = 0xFUL;
1091 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1092 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1093 p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
1094 cntrs.excessive_buffer_overrun_errors;
1095 if (cntrs.vl15_dropped > 0xFFFFUL)
1096 p->vl15_dropped = cpu_to_be16(0xFFFF);
1097 else
1098 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1099 if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
1100 p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
1101 else
1102 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
1103 if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
1104 p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
1105 else
1106 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
1107 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
1108 p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
1109 else
1110 p->port_xmit_packets =
1111 cpu_to_be32((u32)cntrs.port_xmit_packets);
1112 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
1113 p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
1114 else
1115 p->port_rcv_packets =
1116 cpu_to_be32((u32) cntrs.port_rcv_packets);
1117
1118 return reply((struct ib_smp *) pmp);
1119}
1120
1121static int recv_pma_get_portcounters_ext(struct ib_pma_mad *pmp,
1122 struct ib_device *ibdev, u8 port)
1123{
1124 struct ib_pma_portcounters_ext *p =
1125 (struct ib_pma_portcounters_ext *)pmp->data;
1126 struct ipath_ibdev *dev = to_idev(ibdev);
1127 u64 swords, rwords, spkts, rpkts, xwait;
1128 u8 port_select = p->port_select;
1129
1130 ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
1131 &rpkts, &xwait);
1132
1133 /* Adjust counters for any resets done. */
1134 swords -= dev->z_port_xmit_data;
1135 rwords -= dev->z_port_rcv_data;
1136 spkts -= dev->z_port_xmit_packets;
1137 rpkts -= dev->z_port_rcv_packets;
1138
1139 memset(pmp->data, 0, sizeof(pmp->data));
1140
1141 p->port_select = port_select;
1142 if (pmp->mad_hdr.attr_mod != 0 ||
1143 (port_select != port && port_select != 0xFF))
1144 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1145
1146 p->port_xmit_data = cpu_to_be64(swords);
1147 p->port_rcv_data = cpu_to_be64(rwords);
1148 p->port_xmit_packets = cpu_to_be64(spkts);
1149 p->port_rcv_packets = cpu_to_be64(rpkts);
1150 p->port_unicast_xmit_packets = cpu_to_be64(dev->n_unicast_xmit);
1151 p->port_unicast_rcv_packets = cpu_to_be64(dev->n_unicast_rcv);
1152 p->port_multicast_xmit_packets = cpu_to_be64(dev->n_multicast_xmit);
1153 p->port_multicast_rcv_packets = cpu_to_be64(dev->n_multicast_rcv);
1154
1155 return reply((struct ib_smp *) pmp);
1156}
1157
1158static int recv_pma_set_portcounters(struct ib_pma_mad *pmp,
1159 struct ib_device *ibdev, u8 port)
1160{
1161 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1162 pmp->data;
1163 struct ipath_ibdev *dev = to_idev(ibdev);
1164 struct ipath_verbs_counters cntrs;
1165
1166 /*
1167 * Since the HW doesn't support clearing counters, we save the
1168 * current count and subtract it from future responses.
1169 */
1170 ipath_get_counters(dev->dd, &cntrs);
1171
1172 if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
1173 dev->z_symbol_error_counter = cntrs.symbol_error_counter;
1174
1175 if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
1176 dev->z_link_error_recovery_counter =
1177 cntrs.link_error_recovery_counter;
1178
1179 if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
1180 dev->z_link_downed_counter = cntrs.link_downed_counter;
1181
1182 if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
1183 dev->z_port_rcv_errors =
1184 cntrs.port_rcv_errors + dev->rcv_errors;
1185
1186 if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
1187 dev->z_port_rcv_remphys_errors =
1188 cntrs.port_rcv_remphys_errors;
1189
1190 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
1191 dev->z_port_xmit_discards = cntrs.port_xmit_discards;
1192
1193 if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
1194 dev->z_local_link_integrity_errors =
1195 cntrs.local_link_integrity_errors;
1196
1197 if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
1198 dev->z_excessive_buffer_overrun_errors =
1199 cntrs.excessive_buffer_overrun_errors;
1200
1201 if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
1202 dev->n_vl15_dropped = 0;
1203 dev->z_vl15_dropped = cntrs.vl15_dropped;
1204 }
1205
1206 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
1207 dev->z_port_xmit_data = cntrs.port_xmit_data;
1208
1209 if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
1210 dev->z_port_rcv_data = cntrs.port_rcv_data;
1211
1212 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
1213 dev->z_port_xmit_packets = cntrs.port_xmit_packets;
1214
1215 if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
1216 dev->z_port_rcv_packets = cntrs.port_rcv_packets;
1217
1218 return recv_pma_get_portcounters(pmp, ibdev, port);
1219}
1220
1221static int recv_pma_set_portcounters_ext(struct ib_pma_mad *pmp,
1222 struct ib_device *ibdev, u8 port)
1223{
1224 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1225 pmp->data;
1226 struct ipath_ibdev *dev = to_idev(ibdev);
1227 u64 swords, rwords, spkts, rpkts, xwait;
1228
1229 ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
1230 &rpkts, &xwait);
1231
1232 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
1233 dev->z_port_xmit_data = swords;
1234
1235 if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
1236 dev->z_port_rcv_data = rwords;
1237
1238 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
1239 dev->z_port_xmit_packets = spkts;
1240
1241 if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
1242 dev->z_port_rcv_packets = rpkts;
1243
1244 if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
1245 dev->n_unicast_xmit = 0;
1246
1247 if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
1248 dev->n_unicast_rcv = 0;
1249
1250 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
1251 dev->n_multicast_xmit = 0;
1252
1253 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
1254 dev->n_multicast_rcv = 0;
1255
1256 return recv_pma_get_portcounters_ext(pmp, ibdev, port);
1257}
1258
1259static int process_subn(struct ib_device *ibdev, int mad_flags,
1260 u8 port_num, const struct ib_mad *in_mad,
1261 struct ib_mad *out_mad)
1262{
1263 struct ib_smp *smp = (struct ib_smp *)out_mad;
1264 struct ipath_ibdev *dev = to_idev(ibdev);
1265 int ret;
1266
1267 *out_mad = *in_mad;
1268 if (smp->class_version != 1) {
1269 smp->status |= IB_SMP_UNSUP_VERSION;
1270 ret = reply(smp);
1271 goto bail;
1272 }
1273
1274 /* Is the mkey in the process of expiring? */
1275 if (dev->mkey_lease_timeout &&
1276 time_after_eq(jiffies, dev->mkey_lease_timeout)) {
1277 /* Clear timeout and mkey protection field. */
1278 dev->mkey_lease_timeout = 0;
1279 dev->mkeyprot = 0;
1280 }
1281
1282 /*
1283 * M_Key checking depends on
1284 * Portinfo:M_Key_protect_bits
1285 */
1286 if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && dev->mkey != 0 &&
1287 dev->mkey != smp->mkey &&
1288 (smp->method == IB_MGMT_METHOD_SET ||
1289 (smp->method == IB_MGMT_METHOD_GET &&
1290 dev->mkeyprot >= 2))) {
1291 if (dev->mkey_violations != 0xFFFF)
1292 ++dev->mkey_violations;
1293 if (dev->mkey_lease_timeout ||
1294 dev->mkey_lease_period == 0) {
1295 ret = IB_MAD_RESULT_SUCCESS |
1296 IB_MAD_RESULT_CONSUMED;
1297 goto bail;
1298 }
1299 dev->mkey_lease_timeout = jiffies +
1300 dev->mkey_lease_period * HZ;
1301 /* Future: Generate a trap notice. */
1302 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1303 goto bail;
1304 } else if (dev->mkey_lease_timeout)
1305 dev->mkey_lease_timeout = 0;
1306
1307 switch (smp->method) {
1308 case IB_MGMT_METHOD_GET:
1309 switch (smp->attr_id) {
1310 case IB_SMP_ATTR_NODE_DESC:
1311 ret = recv_subn_get_nodedescription(smp, ibdev);
1312 goto bail;
1313 case IB_SMP_ATTR_NODE_INFO:
1314 ret = recv_subn_get_nodeinfo(smp, ibdev, port_num);
1315 goto bail;
1316 case IB_SMP_ATTR_GUID_INFO:
1317 ret = recv_subn_get_guidinfo(smp, ibdev);
1318 goto bail;
1319 case IB_SMP_ATTR_PORT_INFO:
1320 ret = recv_subn_get_portinfo(smp, ibdev, port_num);
1321 goto bail;
1322 case IB_SMP_ATTR_PKEY_TABLE:
1323 ret = recv_subn_get_pkeytable(smp, ibdev);
1324 goto bail;
1325 case IB_SMP_ATTR_SM_INFO:
1326 if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
1327 ret = IB_MAD_RESULT_SUCCESS |
1328 IB_MAD_RESULT_CONSUMED;
1329 goto bail;
1330 }
1331 if (dev->port_cap_flags & IB_PORT_SM) {
1332 ret = IB_MAD_RESULT_SUCCESS;
1333 goto bail;
1334 }
1335 /* FALLTHROUGH */
1336 default:
1337 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1338 ret = reply(smp);
1339 goto bail;
1340 }
1341
1342 case IB_MGMT_METHOD_SET:
1343 switch (smp->attr_id) {
1344 case IB_SMP_ATTR_GUID_INFO:
1345 ret = recv_subn_set_guidinfo(smp, ibdev);
1346 goto bail;
1347 case IB_SMP_ATTR_PORT_INFO:
1348 ret = recv_subn_set_portinfo(smp, ibdev, port_num);
1349 goto bail;
1350 case IB_SMP_ATTR_PKEY_TABLE:
1351 ret = recv_subn_set_pkeytable(smp, ibdev, port_num);
1352 goto bail;
1353 case IB_SMP_ATTR_SM_INFO:
1354 if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
1355 ret = IB_MAD_RESULT_SUCCESS |
1356 IB_MAD_RESULT_CONSUMED;
1357 goto bail;
1358 }
1359 if (dev->port_cap_flags & IB_PORT_SM) {
1360 ret = IB_MAD_RESULT_SUCCESS;
1361 goto bail;
1362 }
1363 /* FALLTHROUGH */
1364 default:
1365 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1366 ret = reply(smp);
1367 goto bail;
1368 }
1369
1370 case IB_MGMT_METHOD_TRAP:
1371 case IB_MGMT_METHOD_REPORT:
1372 case IB_MGMT_METHOD_REPORT_RESP:
1373 case IB_MGMT_METHOD_TRAP_REPRESS:
1374 case IB_MGMT_METHOD_GET_RESP:
1375 /*
1376 * The ib_mad module will call us to process responses
1377 * before checking for other consumers.
1378 * Just tell the caller to process it normally.
1379 */
1380 ret = IB_MAD_RESULT_SUCCESS;
1381 goto bail;
1382 default:
1383 smp->status |= IB_SMP_UNSUP_METHOD;
1384 ret = reply(smp);
1385 }
1386
1387bail:
1388 return ret;
1389}
1390
1391static int process_perf(struct ib_device *ibdev, u8 port_num,
1392 const struct ib_mad *in_mad,
1393 struct ib_mad *out_mad)
1394{
1395 struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
1396 int ret;
1397
1398 *out_mad = *in_mad;
1399 if (pmp->mad_hdr.class_version != 1) {
1400 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
1401 ret = reply((struct ib_smp *) pmp);
1402 goto bail;
1403 }
1404
1405 switch (pmp->mad_hdr.method) {
1406 case IB_MGMT_METHOD_GET:
1407 switch (pmp->mad_hdr.attr_id) {
1408 case IB_PMA_CLASS_PORT_INFO:
1409 ret = recv_pma_get_classportinfo(pmp);
1410 goto bail;
1411 case IB_PMA_PORT_SAMPLES_CONTROL:
1412 ret = recv_pma_get_portsamplescontrol(pmp, ibdev,
1413 port_num);
1414 goto bail;
1415 case IB_PMA_PORT_SAMPLES_RESULT:
1416 ret = recv_pma_get_portsamplesresult(pmp, ibdev);
1417 goto bail;
1418 case IB_PMA_PORT_SAMPLES_RESULT_EXT:
1419 ret = recv_pma_get_portsamplesresult_ext(pmp,
1420 ibdev);
1421 goto bail;
1422 case IB_PMA_PORT_COUNTERS:
1423 ret = recv_pma_get_portcounters(pmp, ibdev,
1424 port_num);
1425 goto bail;
1426 case IB_PMA_PORT_COUNTERS_EXT:
1427 ret = recv_pma_get_portcounters_ext(pmp, ibdev,
1428 port_num);
1429 goto bail;
1430 default:
1431 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
1432 ret = reply((struct ib_smp *) pmp);
1433 goto bail;
1434 }
1435
1436 case IB_MGMT_METHOD_SET:
1437 switch (pmp->mad_hdr.attr_id) {
1438 case IB_PMA_PORT_SAMPLES_CONTROL:
1439 ret = recv_pma_set_portsamplescontrol(pmp, ibdev,
1440 port_num);
1441 goto bail;
1442 case IB_PMA_PORT_COUNTERS:
1443 ret = recv_pma_set_portcounters(pmp, ibdev,
1444 port_num);
1445 goto bail;
1446 case IB_PMA_PORT_COUNTERS_EXT:
1447 ret = recv_pma_set_portcounters_ext(pmp, ibdev,
1448 port_num);
1449 goto bail;
1450 default:
1451 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
1452 ret = reply((struct ib_smp *) pmp);
1453 goto bail;
1454 }
1455
1456 case IB_MGMT_METHOD_GET_RESP:
1457 /*
1458 * The ib_mad module will call us to process responses
1459 * before checking for other consumers.
1460 * Just tell the caller to process it normally.
1461 */
1462 ret = IB_MAD_RESULT_SUCCESS;
1463 goto bail;
1464 default:
1465 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
1466 ret = reply((struct ib_smp *) pmp);
1467 }
1468
1469bail:
1470 return ret;
1471}
1472
1473/**
1474 * ipath_process_mad - process an incoming MAD packet
1475 * @ibdev: the infiniband device this packet came in on
1476 * @mad_flags: MAD flags
1477 * @port_num: the port number this packet came in on
1478 * @in_wc: the work completion entry for this packet
1479 * @in_grh: the global route header for this packet
1480 * @in_mad: the incoming MAD
1481 * @out_mad: any outgoing MAD reply
1482 *
1483 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
1484 * interested in processing.
1485 *
1486 * Note that the verbs framework has already done the MAD sanity checks,
1487 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
1488 * MADs.
1489 *
1490 * This is called by the ib_mad module.
1491 */
1492int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
1493 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
1494 const struct ib_mad_hdr *in, size_t in_mad_size,
1495 struct ib_mad_hdr *out, size_t *out_mad_size,
1496 u16 *out_mad_pkey_index)
1497{
1498 int ret;
1499 const struct ib_mad *in_mad = (const struct ib_mad *)in;
1500 struct ib_mad *out_mad = (struct ib_mad *)out;
1501
1502 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
1503 *out_mad_size != sizeof(*out_mad)))
1504 return IB_MAD_RESULT_FAILURE;
1505
1506 switch (in_mad->mad_hdr.mgmt_class) {
1507 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
1508 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
1509 ret = process_subn(ibdev, mad_flags, port_num,
1510 in_mad, out_mad);
1511 goto bail;
1512 case IB_MGMT_CLASS_PERF_MGMT:
1513 ret = process_perf(ibdev, port_num, in_mad, out_mad);
1514 goto bail;
1515 default:
1516 ret = IB_MAD_RESULT_SUCCESS;
1517 }
1518
1519bail:
1520 return ret;
1521}
diff --git a/drivers/staging/rdma/ipath/ipath_mmap.c b/drivers/staging/rdma/ipath/ipath_mmap.c
deleted file mode 100644
index e73274229404..000000000000
--- a/drivers/staging/rdma/ipath/ipath_mmap.c
+++ /dev/null
@@ -1,174 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/module.h>
34#include <linux/vmalloc.h>
35#include <linux/slab.h>
36#include <linux/mm.h>
37#include <linux/errno.h>
38#include <asm/pgtable.h>
39
40#include "ipath_verbs.h"
41
42/**
43 * ipath_release_mmap_info - free mmap info structure
44 * @ref: a pointer to the kref within struct ipath_mmap_info
45 */
46void ipath_release_mmap_info(struct kref *ref)
47{
48 struct ipath_mmap_info *ip =
49 container_of(ref, struct ipath_mmap_info, ref);
50 struct ipath_ibdev *dev = to_idev(ip->context->device);
51
52 spin_lock_irq(&dev->pending_lock);
53 list_del(&ip->pending_mmaps);
54 spin_unlock_irq(&dev->pending_lock);
55
56 vfree(ip->obj);
57 kfree(ip);
58}
59
60/*
61 * open and close keep track of how many times the CQ is mapped,
62 * to avoid releasing it.
63 */
64static void ipath_vma_open(struct vm_area_struct *vma)
65{
66 struct ipath_mmap_info *ip = vma->vm_private_data;
67
68 kref_get(&ip->ref);
69}
70
71static void ipath_vma_close(struct vm_area_struct *vma)
72{
73 struct ipath_mmap_info *ip = vma->vm_private_data;
74
75 kref_put(&ip->ref, ipath_release_mmap_info);
76}
77
78static const struct vm_operations_struct ipath_vm_ops = {
79 .open = ipath_vma_open,
80 .close = ipath_vma_close,
81};
82
83/**
84 * ipath_mmap - create a new mmap region
85 * @context: the IB user context of the process making the mmap() call
86 * @vma: the VMA to be initialized
87 * Return zero if the mmap is OK. Otherwise, return an errno.
88 */
89int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
90{
91 struct ipath_ibdev *dev = to_idev(context->device);
92 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
93 unsigned long size = vma->vm_end - vma->vm_start;
94 struct ipath_mmap_info *ip, *pp;
95 int ret = -EINVAL;
96
97 /*
98 * Search the device's list of objects waiting for a mmap call.
99 * Normally, this list is very short since a call to create a
100 * CQ, QP, or SRQ is soon followed by a call to mmap().
101 */
102 spin_lock_irq(&dev->pending_lock);
103 list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
104 pending_mmaps) {
105 /* Only the creator is allowed to mmap the object */
106 if (context != ip->context || (__u64) offset != ip->offset)
107 continue;
108 /* Don't allow a mmap larger than the object. */
109 if (size > ip->size)
110 break;
111
112 list_del_init(&ip->pending_mmaps);
113 spin_unlock_irq(&dev->pending_lock);
114
115 ret = remap_vmalloc_range(vma, ip->obj, 0);
116 if (ret)
117 goto done;
118 vma->vm_ops = &ipath_vm_ops;
119 vma->vm_private_data = ip;
120 ipath_vma_open(vma);
121 goto done;
122 }
123 spin_unlock_irq(&dev->pending_lock);
124done:
125 return ret;
126}
127
128/*
129 * Allocate information for ipath_mmap
130 */
131struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
132 u32 size,
133 struct ib_ucontext *context,
134 void *obj) {
135 struct ipath_mmap_info *ip;
136
137 ip = kmalloc(sizeof *ip, GFP_KERNEL);
138 if (!ip)
139 goto bail;
140
141 size = PAGE_ALIGN(size);
142
143 spin_lock_irq(&dev->mmap_offset_lock);
144 if (dev->mmap_offset == 0)
145 dev->mmap_offset = PAGE_SIZE;
146 ip->offset = dev->mmap_offset;
147 dev->mmap_offset += size;
148 spin_unlock_irq(&dev->mmap_offset_lock);
149
150 INIT_LIST_HEAD(&ip->pending_mmaps);
151 ip->size = size;
152 ip->context = context;
153 ip->obj = obj;
154 kref_init(&ip->ref);
155
156bail:
157 return ip;
158}
159
160void ipath_update_mmap_info(struct ipath_ibdev *dev,
161 struct ipath_mmap_info *ip,
162 u32 size, void *obj) {
163 size = PAGE_ALIGN(size);
164
165 spin_lock_irq(&dev->mmap_offset_lock);
166 if (dev->mmap_offset == 0)
167 dev->mmap_offset = PAGE_SIZE;
168 ip->offset = dev->mmap_offset;
169 dev->mmap_offset += size;
170 spin_unlock_irq(&dev->mmap_offset_lock);
171
172 ip->size = size;
173 ip->obj = obj;
174}
diff --git a/drivers/staging/rdma/ipath/ipath_mr.c b/drivers/staging/rdma/ipath/ipath_mr.c
deleted file mode 100644
index b76b0ce66709..000000000000
--- a/drivers/staging/rdma/ipath/ipath_mr.c
+++ /dev/null
@@ -1,370 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/slab.h>
35
36#include <rdma/ib_umem.h>
37#include <rdma/ib_pack.h>
38#include <rdma/ib_smi.h>
39
40#include "ipath_verbs.h"
41
42/* Fast memory region */
43struct ipath_fmr {
44 struct ib_fmr ibfmr;
45 u8 page_shift;
46 struct ipath_mregion mr; /* must be last */
47};
48
49static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
50{
51 return container_of(ibfmr, struct ipath_fmr, ibfmr);
52}
53
54/**
55 * ipath_get_dma_mr - get a DMA memory region
56 * @pd: protection domain for this memory region
57 * @acc: access flags
58 *
59 * Returns the memory region on success, otherwise returns an errno.
60 * Note that all DMA addresses should be created via the
61 * struct ib_dma_mapping_ops functions (see ipath_dma.c).
62 */
63struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc)
64{
65 struct ipath_mr *mr;
66 struct ib_mr *ret;
67
68 mr = kzalloc(sizeof *mr, GFP_KERNEL);
69 if (!mr) {
70 ret = ERR_PTR(-ENOMEM);
71 goto bail;
72 }
73
74 mr->mr.access_flags = acc;
75 ret = &mr->ibmr;
76
77bail:
78 return ret;
79}
80
81static struct ipath_mr *alloc_mr(int count,
82 struct ipath_lkey_table *lk_table)
83{
84 struct ipath_mr *mr;
85 int m, i = 0;
86
87 /* Allocate struct plus pointers to first level page tables. */
88 m = (count + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
89 mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
90 if (!mr)
91 goto done;
92
93 /* Allocate first level page tables. */
94 for (; i < m; i++) {
95 mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
96 if (!mr->mr.map[i])
97 goto bail;
98 }
99 mr->mr.mapsz = m;
100
101 if (!ipath_alloc_lkey(lk_table, &mr->mr))
102 goto bail;
103 mr->ibmr.rkey = mr->ibmr.lkey = mr->mr.lkey;
104
105 goto done;
106
107bail:
108 while (i) {
109 i--;
110 kfree(mr->mr.map[i]);
111 }
112 kfree(mr);
113 mr = NULL;
114
115done:
116 return mr;
117}
118
119/**
120 * ipath_reg_user_mr - register a userspace memory region
121 * @pd: protection domain for this memory region
122 * @start: starting userspace address
123 * @length: length of region to register
124 * @virt_addr: virtual address to use (from HCA's point of view)
125 * @mr_access_flags: access flags for this memory region
126 * @udata: unused by the InfiniPath driver
127 *
128 * Returns the memory region on success, otherwise returns an errno.
129 */
130struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
131 u64 virt_addr, int mr_access_flags,
132 struct ib_udata *udata)
133{
134 struct ipath_mr *mr;
135 struct ib_umem *umem;
136 int n, m, entry;
137 struct scatterlist *sg;
138 struct ib_mr *ret;
139
140 if (length == 0) {
141 ret = ERR_PTR(-EINVAL);
142 goto bail;
143 }
144
145 umem = ib_umem_get(pd->uobject->context, start, length,
146 mr_access_flags, 0);
147 if (IS_ERR(umem))
148 return (void *) umem;
149
150 n = umem->nmap;
151 mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
152 if (!mr) {
153 ret = ERR_PTR(-ENOMEM);
154 ib_umem_release(umem);
155 goto bail;
156 }
157
158 mr->mr.pd = pd;
159 mr->mr.user_base = start;
160 mr->mr.iova = virt_addr;
161 mr->mr.length = length;
162 mr->mr.offset = ib_umem_offset(umem);
163 mr->mr.access_flags = mr_access_flags;
164 mr->mr.max_segs = n;
165 mr->umem = umem;
166
167 m = 0;
168 n = 0;
169 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
170 void *vaddr;
171
172 vaddr = page_address(sg_page(sg));
173 if (!vaddr) {
174 ret = ERR_PTR(-EINVAL);
175 goto bail;
176 }
177 mr->mr.map[m]->segs[n].vaddr = vaddr;
178 mr->mr.map[m]->segs[n].length = umem->page_size;
179 n++;
180 if (n == IPATH_SEGSZ) {
181 m++;
182 n = 0;
183 }
184 }
185 ret = &mr->ibmr;
186
187bail:
188 return ret;
189}
190
191/**
192 * ipath_dereg_mr - unregister and free a memory region
193 * @ibmr: the memory region to free
194 *
195 * Returns 0 on success.
196 *
197 * Note that this is called to free MRs created by ipath_get_dma_mr()
198 * or ipath_reg_user_mr().
199 */
200int ipath_dereg_mr(struct ib_mr *ibmr)
201{
202 struct ipath_mr *mr = to_imr(ibmr);
203 int i;
204
205 ipath_free_lkey(&to_idev(ibmr->device)->lk_table, ibmr->lkey);
206 i = mr->mr.mapsz;
207 while (i) {
208 i--;
209 kfree(mr->mr.map[i]);
210 }
211
212 if (mr->umem)
213 ib_umem_release(mr->umem);
214
215 kfree(mr);
216 return 0;
217}
218
219/**
220 * ipath_alloc_fmr - allocate a fast memory region
221 * @pd: the protection domain for this memory region
222 * @mr_access_flags: access flags for this memory region
223 * @fmr_attr: fast memory region attributes
224 *
225 * Returns the memory region on success, otherwise returns an errno.
226 */
227struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
228 struct ib_fmr_attr *fmr_attr)
229{
230 struct ipath_fmr *fmr;
231 int m, i = 0;
232 struct ib_fmr *ret;
233
234 /* Allocate struct plus pointers to first level page tables. */
235 m = (fmr_attr->max_pages + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
236 fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
237 if (!fmr)
238 goto bail;
239
240 /* Allocate first level page tables. */
241 for (; i < m; i++) {
242 fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
243 GFP_KERNEL);
244 if (!fmr->mr.map[i])
245 goto bail;
246 }
247 fmr->mr.mapsz = m;
248
249 /*
250 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
251 * rkey.
252 */
253 if (!ipath_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
254 goto bail;
255 fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mr.lkey;
256 /*
257 * Resources are allocated but no valid mapping (RKEY can't be
258 * used).
259 */
260 fmr->mr.pd = pd;
261 fmr->mr.user_base = 0;
262 fmr->mr.iova = 0;
263 fmr->mr.length = 0;
264 fmr->mr.offset = 0;
265 fmr->mr.access_flags = mr_access_flags;
266 fmr->mr.max_segs = fmr_attr->max_pages;
267 fmr->page_shift = fmr_attr->page_shift;
268
269 ret = &fmr->ibfmr;
270 goto done;
271
272bail:
273 while (i)
274 kfree(fmr->mr.map[--i]);
275 kfree(fmr);
276 ret = ERR_PTR(-ENOMEM);
277
278done:
279 return ret;
280}
281
282/**
283 * ipath_map_phys_fmr - set up a fast memory region
284 * @ibmfr: the fast memory region to set up
285 * @page_list: the list of pages to associate with the fast memory region
286 * @list_len: the number of pages to associate with the fast memory region
287 * @iova: the virtual address of the start of the fast memory region
288 *
289 * This may be called from interrupt context.
290 */
291
292int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
293 int list_len, u64 iova)
294{
295 struct ipath_fmr *fmr = to_ifmr(ibfmr);
296 struct ipath_lkey_table *rkt;
297 unsigned long flags;
298 int m, n, i;
299 u32 ps;
300 int ret;
301
302 if (list_len > fmr->mr.max_segs) {
303 ret = -EINVAL;
304 goto bail;
305 }
306 rkt = &to_idev(ibfmr->device)->lk_table;
307 spin_lock_irqsave(&rkt->lock, flags);
308 fmr->mr.user_base = iova;
309 fmr->mr.iova = iova;
310 ps = 1 << fmr->page_shift;
311 fmr->mr.length = list_len * ps;
312 m = 0;
313 n = 0;
314 ps = 1 << fmr->page_shift;
315 for (i = 0; i < list_len; i++) {
316 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
317 fmr->mr.map[m]->segs[n].length = ps;
318 if (++n == IPATH_SEGSZ) {
319 m++;
320 n = 0;
321 }
322 }
323 spin_unlock_irqrestore(&rkt->lock, flags);
324 ret = 0;
325
326bail:
327 return ret;
328}
329
330/**
331 * ipath_unmap_fmr - unmap fast memory regions
332 * @fmr_list: the list of fast memory regions to unmap
333 *
334 * Returns 0 on success.
335 */
336int ipath_unmap_fmr(struct list_head *fmr_list)
337{
338 struct ipath_fmr *fmr;
339 struct ipath_lkey_table *rkt;
340 unsigned long flags;
341
342 list_for_each_entry(fmr, fmr_list, ibfmr.list) {
343 rkt = &to_idev(fmr->ibfmr.device)->lk_table;
344 spin_lock_irqsave(&rkt->lock, flags);
345 fmr->mr.user_base = 0;
346 fmr->mr.iova = 0;
347 fmr->mr.length = 0;
348 spin_unlock_irqrestore(&rkt->lock, flags);
349 }
350 return 0;
351}
352
353/**
354 * ipath_dealloc_fmr - deallocate a fast memory region
355 * @ibfmr: the fast memory region to deallocate
356 *
357 * Returns 0 on success.
358 */
359int ipath_dealloc_fmr(struct ib_fmr *ibfmr)
360{
361 struct ipath_fmr *fmr = to_ifmr(ibfmr);
362 int i;
363
364 ipath_free_lkey(&to_idev(ibfmr->device)->lk_table, ibfmr->lkey);
365 i = fmr->mr.mapsz;
366 while (i)
367 kfree(fmr->mr.map[--i]);
368 kfree(fmr);
369 return 0;
370}
diff --git a/drivers/staging/rdma/ipath/ipath_qp.c b/drivers/staging/rdma/ipath/ipath_qp.c
deleted file mode 100644
index 280cd2d638e4..000000000000
--- a/drivers/staging/rdma/ipath/ipath_qp.c
+++ /dev/null
@@ -1,1079 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/err.h>
35#include <linux/slab.h>
36#include <linux/vmalloc.h>
37
38#include "ipath_verbs.h"
39#include "ipath_kernel.h"
40
41#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
42#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
43#define mk_qpn(qpt, map, off) (((map) - (qpt)->map) * BITS_PER_PAGE + \
44 (off))
45#define find_next_offset(map, off) find_next_zero_bit((map)->page, \
46 BITS_PER_PAGE, off)
47
48/*
49 * Convert the AETH credit code into the number of credits.
50 */
51static u32 credit_table[31] = {
52 0, /* 0 */
53 1, /* 1 */
54 2, /* 2 */
55 3, /* 3 */
56 4, /* 4 */
57 6, /* 5 */
58 8, /* 6 */
59 12, /* 7 */
60 16, /* 8 */
61 24, /* 9 */
62 32, /* A */
63 48, /* B */
64 64, /* C */
65 96, /* D */
66 128, /* E */
67 192, /* F */
68 256, /* 10 */
69 384, /* 11 */
70 512, /* 12 */
71 768, /* 13 */
72 1024, /* 14 */
73 1536, /* 15 */
74 2048, /* 16 */
75 3072, /* 17 */
76 4096, /* 18 */
77 6144, /* 19 */
78 8192, /* 1A */
79 12288, /* 1B */
80 16384, /* 1C */
81 24576, /* 1D */
82 32768 /* 1E */
83};
84
85
86static void get_map_page(struct ipath_qp_table *qpt, struct qpn_map *map)
87{
88 unsigned long page = get_zeroed_page(GFP_KERNEL);
89 unsigned long flags;
90
91 /*
92 * Free the page if someone raced with us installing it.
93 */
94
95 spin_lock_irqsave(&qpt->lock, flags);
96 if (map->page)
97 free_page(page);
98 else
99 map->page = (void *)page;
100 spin_unlock_irqrestore(&qpt->lock, flags);
101}
102
103
104static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type)
105{
106 u32 i, offset, max_scan, qpn;
107 struct qpn_map *map;
108 u32 ret = -1;
109
110 if (type == IB_QPT_SMI)
111 ret = 0;
112 else if (type == IB_QPT_GSI)
113 ret = 1;
114
115 if (ret != -1) {
116 map = &qpt->map[0];
117 if (unlikely(!map->page)) {
118 get_map_page(qpt, map);
119 if (unlikely(!map->page)) {
120 ret = -ENOMEM;
121 goto bail;
122 }
123 }
124 if (!test_and_set_bit(ret, map->page))
125 atomic_dec(&map->n_free);
126 else
127 ret = -EBUSY;
128 goto bail;
129 }
130
131 qpn = qpt->last + 1;
132 if (qpn >= QPN_MAX)
133 qpn = 2;
134 offset = qpn & BITS_PER_PAGE_MASK;
135 map = &qpt->map[qpn / BITS_PER_PAGE];
136 max_scan = qpt->nmaps - !offset;
137 for (i = 0;;) {
138 if (unlikely(!map->page)) {
139 get_map_page(qpt, map);
140 if (unlikely(!map->page))
141 break;
142 }
143 if (likely(atomic_read(&map->n_free))) {
144 do {
145 if (!test_and_set_bit(offset, map->page)) {
146 atomic_dec(&map->n_free);
147 qpt->last = qpn;
148 ret = qpn;
149 goto bail;
150 }
151 offset = find_next_offset(map, offset);
152 qpn = mk_qpn(qpt, map, offset);
153 /*
154 * This test differs from alloc_pidmap().
155 * If find_next_offset() does find a zero
156 * bit, we don't need to check for QPN
157 * wrapping around past our starting QPN.
158 * We just need to be sure we don't loop
159 * forever.
160 */
161 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
162 }
163 /*
164 * In order to keep the number of pages allocated to a
165 * minimum, we scan the all existing pages before increasing
166 * the size of the bitmap table.
167 */
168 if (++i > max_scan) {
169 if (qpt->nmaps == QPNMAP_ENTRIES)
170 break;
171 map = &qpt->map[qpt->nmaps++];
172 offset = 0;
173 } else if (map < &qpt->map[qpt->nmaps]) {
174 ++map;
175 offset = 0;
176 } else {
177 map = &qpt->map[0];
178 offset = 2;
179 }
180 qpn = mk_qpn(qpt, map, offset);
181 }
182
183 ret = -ENOMEM;
184
185bail:
186 return ret;
187}
188
189static void free_qpn(struct ipath_qp_table *qpt, u32 qpn)
190{
191 struct qpn_map *map;
192
193 map = qpt->map + qpn / BITS_PER_PAGE;
194 if (map->page)
195 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
196 atomic_inc(&map->n_free);
197}
198
199/**
200 * ipath_alloc_qpn - allocate a QP number
201 * @qpt: the QP table
202 * @qp: the QP
203 * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special)
204 *
205 * Allocate the next available QPN and put the QP into the hash table.
206 * The hash table holds a reference to the QP.
207 */
208static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
209 enum ib_qp_type type)
210{
211 unsigned long flags;
212 int ret;
213
214 ret = alloc_qpn(qpt, type);
215 if (ret < 0)
216 goto bail;
217 qp->ibqp.qp_num = ret;
218
219 /* Add the QP to the hash table. */
220 spin_lock_irqsave(&qpt->lock, flags);
221
222 ret %= qpt->max;
223 qp->next = qpt->table[ret];
224 qpt->table[ret] = qp;
225 atomic_inc(&qp->refcount);
226
227 spin_unlock_irqrestore(&qpt->lock, flags);
228 ret = 0;
229
230bail:
231 return ret;
232}
233
234/**
235 * ipath_free_qp - remove a QP from the QP table
236 * @qpt: the QP table
237 * @qp: the QP to remove
238 *
239 * Remove the QP from the table so it can't be found asynchronously by
240 * the receive interrupt routine.
241 */
242static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
243{
244 struct ipath_qp *q, **qpp;
245 unsigned long flags;
246
247 spin_lock_irqsave(&qpt->lock, flags);
248
249 /* Remove QP from the hash table. */
250 qpp = &qpt->table[qp->ibqp.qp_num % qpt->max];
251 for (; (q = *qpp) != NULL; qpp = &q->next) {
252 if (q == qp) {
253 *qpp = qp->next;
254 qp->next = NULL;
255 atomic_dec(&qp->refcount);
256 break;
257 }
258 }
259
260 spin_unlock_irqrestore(&qpt->lock, flags);
261}
262
263/**
264 * ipath_free_all_qps - check for QPs still in use
265 * @qpt: the QP table to empty
266 *
267 * There should not be any QPs still in use.
268 * Free memory for table.
269 */
270unsigned ipath_free_all_qps(struct ipath_qp_table *qpt)
271{
272 unsigned long flags;
273 struct ipath_qp *qp;
274 u32 n, qp_inuse = 0;
275
276 spin_lock_irqsave(&qpt->lock, flags);
277 for (n = 0; n < qpt->max; n++) {
278 qp = qpt->table[n];
279 qpt->table[n] = NULL;
280
281 for (; qp; qp = qp->next)
282 qp_inuse++;
283 }
284 spin_unlock_irqrestore(&qpt->lock, flags);
285
286 for (n = 0; n < ARRAY_SIZE(qpt->map); n++)
287 if (qpt->map[n].page)
288 free_page((unsigned long) qpt->map[n].page);
289 return qp_inuse;
290}
291
292/**
293 * ipath_lookup_qpn - return the QP with the given QPN
294 * @qpt: the QP table
295 * @qpn: the QP number to look up
296 *
297 * The caller is responsible for decrementing the QP reference count
298 * when done.
299 */
300struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn)
301{
302 unsigned long flags;
303 struct ipath_qp *qp;
304
305 spin_lock_irqsave(&qpt->lock, flags);
306
307 for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) {
308 if (qp->ibqp.qp_num == qpn) {
309 atomic_inc(&qp->refcount);
310 break;
311 }
312 }
313
314 spin_unlock_irqrestore(&qpt->lock, flags);
315 return qp;
316}
317
318/**
319 * ipath_reset_qp - initialize the QP state to the reset state
320 * @qp: the QP to reset
321 * @type: the QP type
322 */
323static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
324{
325 qp->remote_qpn = 0;
326 qp->qkey = 0;
327 qp->qp_access_flags = 0;
328 atomic_set(&qp->s_dma_busy, 0);
329 qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
330 qp->s_hdrwords = 0;
331 qp->s_wqe = NULL;
332 qp->s_pkt_delay = 0;
333 qp->s_draining = 0;
334 qp->s_psn = 0;
335 qp->r_psn = 0;
336 qp->r_msn = 0;
337 if (type == IB_QPT_RC) {
338 qp->s_state = IB_OPCODE_RC_SEND_LAST;
339 qp->r_state = IB_OPCODE_RC_SEND_LAST;
340 } else {
341 qp->s_state = IB_OPCODE_UC_SEND_LAST;
342 qp->r_state = IB_OPCODE_UC_SEND_LAST;
343 }
344 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
345 qp->r_nak_state = 0;
346 qp->r_aflags = 0;
347 qp->r_flags = 0;
348 qp->s_rnr_timeout = 0;
349 qp->s_head = 0;
350 qp->s_tail = 0;
351 qp->s_cur = 0;
352 qp->s_last = 0;
353 qp->s_ssn = 1;
354 qp->s_lsn = 0;
355 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
356 qp->r_head_ack_queue = 0;
357 qp->s_tail_ack_queue = 0;
358 qp->s_num_rd_atomic = 0;
359 if (qp->r_rq.wq) {
360 qp->r_rq.wq->head = 0;
361 qp->r_rq.wq->tail = 0;
362 }
363}
364
365/**
366 * ipath_error_qp - put a QP into the error state
367 * @qp: the QP to put into the error state
368 * @err: the receive completion error to signal if a RWQE is active
369 *
370 * Flushes both send and receive work queues.
371 * Returns true if last WQE event should be generated.
372 * The QP s_lock should be held and interrupts disabled.
373 * If we are already in error state, just return.
374 */
375
376int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
377{
378 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
379 struct ib_wc wc;
380 int ret = 0;
381
382 if (qp->state == IB_QPS_ERR)
383 goto bail;
384
385 qp->state = IB_QPS_ERR;
386
387 spin_lock(&dev->pending_lock);
388 if (!list_empty(&qp->timerwait))
389 list_del_init(&qp->timerwait);
390 if (!list_empty(&qp->piowait))
391 list_del_init(&qp->piowait);
392 spin_unlock(&dev->pending_lock);
393
394 /* Schedule the sending tasklet to drain the send work queue. */
395 if (qp->s_last != qp->s_head)
396 ipath_schedule_send(qp);
397
398 memset(&wc, 0, sizeof(wc));
399 wc.qp = &qp->ibqp;
400 wc.opcode = IB_WC_RECV;
401
402 if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) {
403 wc.wr_id = qp->r_wr_id;
404 wc.status = err;
405 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
406 }
407 wc.status = IB_WC_WR_FLUSH_ERR;
408
409 if (qp->r_rq.wq) {
410 struct ipath_rwq *wq;
411 u32 head;
412 u32 tail;
413
414 spin_lock(&qp->r_rq.lock);
415
416 /* sanity check pointers before trusting them */
417 wq = qp->r_rq.wq;
418 head = wq->head;
419 if (head >= qp->r_rq.size)
420 head = 0;
421 tail = wq->tail;
422 if (tail >= qp->r_rq.size)
423 tail = 0;
424 while (tail != head) {
425 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
426 if (++tail >= qp->r_rq.size)
427 tail = 0;
428 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
429 }
430 wq->tail = tail;
431
432 spin_unlock(&qp->r_rq.lock);
433 } else if (qp->ibqp.event_handler)
434 ret = 1;
435
436bail:
437 return ret;
438}
439
440/**
441 * ipath_modify_qp - modify the attributes of a queue pair
442 * @ibqp: the queue pair who's attributes we're modifying
443 * @attr: the new attributes
444 * @attr_mask: the mask of attributes to modify
445 * @udata: user data for ipathverbs.so
446 *
447 * Returns 0 on success, otherwise returns an errno.
448 */
449int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
450 int attr_mask, struct ib_udata *udata)
451{
452 struct ipath_ibdev *dev = to_idev(ibqp->device);
453 struct ipath_qp *qp = to_iqp(ibqp);
454 enum ib_qp_state cur_state, new_state;
455 int lastwqe = 0;
456 int ret;
457
458 spin_lock_irq(&qp->s_lock);
459
460 cur_state = attr_mask & IB_QP_CUR_STATE ?
461 attr->cur_qp_state : qp->state;
462 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
463
464 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
465 attr_mask, IB_LINK_LAYER_UNSPECIFIED))
466 goto inval;
467
468 if (attr_mask & IB_QP_AV) {
469 if (attr->ah_attr.dlid == 0 ||
470 attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
471 goto inval;
472
473 if ((attr->ah_attr.ah_flags & IB_AH_GRH) &&
474 (attr->ah_attr.grh.sgid_index > 1))
475 goto inval;
476 }
477
478 if (attr_mask & IB_QP_PKEY_INDEX)
479 if (attr->pkey_index >= ipath_get_npkeys(dev->dd))
480 goto inval;
481
482 if (attr_mask & IB_QP_MIN_RNR_TIMER)
483 if (attr->min_rnr_timer > 31)
484 goto inval;
485
486 if (attr_mask & IB_QP_PORT)
487 if (attr->port_num == 0 ||
488 attr->port_num > ibqp->device->phys_port_cnt)
489 goto inval;
490
491 /*
492 * don't allow invalid Path MTU values or greater than 2048
493 * unless we are configured for a 4KB MTU
494 */
495 if ((attr_mask & IB_QP_PATH_MTU) &&
496 (ib_mtu_enum_to_int(attr->path_mtu) == -1 ||
497 (attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096)))
498 goto inval;
499
500 if (attr_mask & IB_QP_PATH_MIG_STATE)
501 if (attr->path_mig_state != IB_MIG_MIGRATED &&
502 attr->path_mig_state != IB_MIG_REARM)
503 goto inval;
504
505 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
506 if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC)
507 goto inval;
508
509 switch (new_state) {
510 case IB_QPS_RESET:
511 if (qp->state != IB_QPS_RESET) {
512 qp->state = IB_QPS_RESET;
513 spin_lock(&dev->pending_lock);
514 if (!list_empty(&qp->timerwait))
515 list_del_init(&qp->timerwait);
516 if (!list_empty(&qp->piowait))
517 list_del_init(&qp->piowait);
518 spin_unlock(&dev->pending_lock);
519 qp->s_flags &= ~IPATH_S_ANY_WAIT;
520 spin_unlock_irq(&qp->s_lock);
521 /* Stop the sending tasklet */
522 tasklet_kill(&qp->s_task);
523 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
524 spin_lock_irq(&qp->s_lock);
525 }
526 ipath_reset_qp(qp, ibqp->qp_type);
527 break;
528
529 case IB_QPS_SQD:
530 qp->s_draining = qp->s_last != qp->s_cur;
531 qp->state = new_state;
532 break;
533
534 case IB_QPS_SQE:
535 if (qp->ibqp.qp_type == IB_QPT_RC)
536 goto inval;
537 qp->state = new_state;
538 break;
539
540 case IB_QPS_ERR:
541 lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
542 break;
543
544 default:
545 qp->state = new_state;
546 break;
547 }
548
549 if (attr_mask & IB_QP_PKEY_INDEX)
550 qp->s_pkey_index = attr->pkey_index;
551
552 if (attr_mask & IB_QP_DEST_QPN)
553 qp->remote_qpn = attr->dest_qp_num;
554
555 if (attr_mask & IB_QP_SQ_PSN) {
556 qp->s_psn = qp->s_next_psn = attr->sq_psn;
557 qp->s_last_psn = qp->s_next_psn - 1;
558 }
559
560 if (attr_mask & IB_QP_RQ_PSN)
561 qp->r_psn = attr->rq_psn;
562
563 if (attr_mask & IB_QP_ACCESS_FLAGS)
564 qp->qp_access_flags = attr->qp_access_flags;
565
566 if (attr_mask & IB_QP_AV) {
567 qp->remote_ah_attr = attr->ah_attr;
568 qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate);
569 }
570
571 if (attr_mask & IB_QP_PATH_MTU)
572 qp->path_mtu = attr->path_mtu;
573
574 if (attr_mask & IB_QP_RETRY_CNT)
575 qp->s_retry = qp->s_retry_cnt = attr->retry_cnt;
576
577 if (attr_mask & IB_QP_RNR_RETRY) {
578 qp->s_rnr_retry = attr->rnr_retry;
579 if (qp->s_rnr_retry > 7)
580 qp->s_rnr_retry = 7;
581 qp->s_rnr_retry_cnt = qp->s_rnr_retry;
582 }
583
584 if (attr_mask & IB_QP_MIN_RNR_TIMER)
585 qp->r_min_rnr_timer = attr->min_rnr_timer;
586
587 if (attr_mask & IB_QP_TIMEOUT)
588 qp->timeout = attr->timeout;
589
590 if (attr_mask & IB_QP_QKEY)
591 qp->qkey = attr->qkey;
592
593 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
594 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
595
596 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
597 qp->s_max_rd_atomic = attr->max_rd_atomic;
598
599 spin_unlock_irq(&qp->s_lock);
600
601 if (lastwqe) {
602 struct ib_event ev;
603
604 ev.device = qp->ibqp.device;
605 ev.element.qp = &qp->ibqp;
606 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
607 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
608 }
609 ret = 0;
610 goto bail;
611
612inval:
613 spin_unlock_irq(&qp->s_lock);
614 ret = -EINVAL;
615
616bail:
617 return ret;
618}
619
620int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
621 int attr_mask, struct ib_qp_init_attr *init_attr)
622{
623 struct ipath_qp *qp = to_iqp(ibqp);
624
625 attr->qp_state = qp->state;
626 attr->cur_qp_state = attr->qp_state;
627 attr->path_mtu = qp->path_mtu;
628 attr->path_mig_state = 0;
629 attr->qkey = qp->qkey;
630 attr->rq_psn = qp->r_psn;
631 attr->sq_psn = qp->s_next_psn;
632 attr->dest_qp_num = qp->remote_qpn;
633 attr->qp_access_flags = qp->qp_access_flags;
634 attr->cap.max_send_wr = qp->s_size - 1;
635 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
636 attr->cap.max_send_sge = qp->s_max_sge;
637 attr->cap.max_recv_sge = qp->r_rq.max_sge;
638 attr->cap.max_inline_data = 0;
639 attr->ah_attr = qp->remote_ah_attr;
640 memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr));
641 attr->pkey_index = qp->s_pkey_index;
642 attr->alt_pkey_index = 0;
643 attr->en_sqd_async_notify = 0;
644 attr->sq_draining = qp->s_draining;
645 attr->max_rd_atomic = qp->s_max_rd_atomic;
646 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
647 attr->min_rnr_timer = qp->r_min_rnr_timer;
648 attr->port_num = 1;
649 attr->timeout = qp->timeout;
650 attr->retry_cnt = qp->s_retry_cnt;
651 attr->rnr_retry = qp->s_rnr_retry_cnt;
652 attr->alt_port_num = 0;
653 attr->alt_timeout = 0;
654
655 init_attr->event_handler = qp->ibqp.event_handler;
656 init_attr->qp_context = qp->ibqp.qp_context;
657 init_attr->send_cq = qp->ibqp.send_cq;
658 init_attr->recv_cq = qp->ibqp.recv_cq;
659 init_attr->srq = qp->ibqp.srq;
660 init_attr->cap = attr->cap;
661 if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR)
662 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
663 else
664 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
665 init_attr->qp_type = qp->ibqp.qp_type;
666 init_attr->port_num = 1;
667 return 0;
668}
669
670/**
671 * ipath_compute_aeth - compute the AETH (syndrome + MSN)
672 * @qp: the queue pair to compute the AETH for
673 *
674 * Returns the AETH.
675 */
676__be32 ipath_compute_aeth(struct ipath_qp *qp)
677{
678 u32 aeth = qp->r_msn & IPATH_MSN_MASK;
679
680 if (qp->ibqp.srq) {
681 /*
682 * Shared receive queues don't generate credits.
683 * Set the credit field to the invalid value.
684 */
685 aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT;
686 } else {
687 u32 min, max, x;
688 u32 credits;
689 struct ipath_rwq *wq = qp->r_rq.wq;
690 u32 head;
691 u32 tail;
692
693 /* sanity check pointers before trusting them */
694 head = wq->head;
695 if (head >= qp->r_rq.size)
696 head = 0;
697 tail = wq->tail;
698 if (tail >= qp->r_rq.size)
699 tail = 0;
700 /*
701 * Compute the number of credits available (RWQEs).
702 * XXX Not holding the r_rq.lock here so there is a small
703 * chance that the pair of reads are not atomic.
704 */
705 credits = head - tail;
706 if ((int)credits < 0)
707 credits += qp->r_rq.size;
708 /*
709 * Binary search the credit table to find the code to
710 * use.
711 */
712 min = 0;
713 max = 31;
714 for (;;) {
715 x = (min + max) / 2;
716 if (credit_table[x] == credits)
717 break;
718 if (credit_table[x] > credits)
719 max = x;
720 else if (min == x)
721 break;
722 else
723 min = x;
724 }
725 aeth |= x << IPATH_AETH_CREDIT_SHIFT;
726 }
727 return cpu_to_be32(aeth);
728}
729
730/**
731 * ipath_create_qp - create a queue pair for a device
732 * @ibpd: the protection domain who's device we create the queue pair for
733 * @init_attr: the attributes of the queue pair
734 * @udata: unused by InfiniPath
735 *
736 * Returns the queue pair on success, otherwise returns an errno.
737 *
738 * Called by the ib_create_qp() core verbs function.
739 */
740struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
741 struct ib_qp_init_attr *init_attr,
742 struct ib_udata *udata)
743{
744 struct ipath_qp *qp;
745 int err;
746 struct ipath_swqe *swq = NULL;
747 struct ipath_ibdev *dev;
748 size_t sz;
749 size_t sg_list_sz;
750 struct ib_qp *ret;
751
752 if (init_attr->create_flags) {
753 ret = ERR_PTR(-EINVAL);
754 goto bail;
755 }
756
757 if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
758 init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs) {
759 ret = ERR_PTR(-EINVAL);
760 goto bail;
761 }
762
763 /* Check receive queue parameters if no SRQ is specified. */
764 if (!init_attr->srq) {
765 if (init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
766 init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
767 ret = ERR_PTR(-EINVAL);
768 goto bail;
769 }
770 if (init_attr->cap.max_send_sge +
771 init_attr->cap.max_send_wr +
772 init_attr->cap.max_recv_sge +
773 init_attr->cap.max_recv_wr == 0) {
774 ret = ERR_PTR(-EINVAL);
775 goto bail;
776 }
777 }
778
779 switch (init_attr->qp_type) {
780 case IB_QPT_UC:
781 case IB_QPT_RC:
782 case IB_QPT_UD:
783 case IB_QPT_SMI:
784 case IB_QPT_GSI:
785 sz = sizeof(struct ipath_sge) *
786 init_attr->cap.max_send_sge +
787 sizeof(struct ipath_swqe);
788 swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
789 if (swq == NULL) {
790 ret = ERR_PTR(-ENOMEM);
791 goto bail;
792 }
793 sz = sizeof(*qp);
794 sg_list_sz = 0;
795 if (init_attr->srq) {
796 struct ipath_srq *srq = to_isrq(init_attr->srq);
797
798 if (srq->rq.max_sge > 1)
799 sg_list_sz = sizeof(*qp->r_sg_list) *
800 (srq->rq.max_sge - 1);
801 } else if (init_attr->cap.max_recv_sge > 1)
802 sg_list_sz = sizeof(*qp->r_sg_list) *
803 (init_attr->cap.max_recv_sge - 1);
804 qp = kmalloc(sz + sg_list_sz, GFP_KERNEL);
805 if (!qp) {
806 ret = ERR_PTR(-ENOMEM);
807 goto bail_swq;
808 }
809 if (sg_list_sz && (init_attr->qp_type == IB_QPT_UD ||
810 init_attr->qp_type == IB_QPT_SMI ||
811 init_attr->qp_type == IB_QPT_GSI)) {
812 qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL);
813 if (!qp->r_ud_sg_list) {
814 ret = ERR_PTR(-ENOMEM);
815 goto bail_qp;
816 }
817 } else
818 qp->r_ud_sg_list = NULL;
819 if (init_attr->srq) {
820 sz = 0;
821 qp->r_rq.size = 0;
822 qp->r_rq.max_sge = 0;
823 qp->r_rq.wq = NULL;
824 init_attr->cap.max_recv_wr = 0;
825 init_attr->cap.max_recv_sge = 0;
826 } else {
827 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
828 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
829 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
830 sizeof(struct ipath_rwqe);
831 qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
832 qp->r_rq.size * sz);
833 if (!qp->r_rq.wq) {
834 ret = ERR_PTR(-ENOMEM);
835 goto bail_sg_list;
836 }
837 }
838
839 /*
840 * ib_create_qp() will initialize qp->ibqp
841 * except for qp->ibqp.qp_num.
842 */
843 spin_lock_init(&qp->s_lock);
844 spin_lock_init(&qp->r_rq.lock);
845 atomic_set(&qp->refcount, 0);
846 init_waitqueue_head(&qp->wait);
847 init_waitqueue_head(&qp->wait_dma);
848 tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
849 INIT_LIST_HEAD(&qp->piowait);
850 INIT_LIST_HEAD(&qp->timerwait);
851 qp->state = IB_QPS_RESET;
852 qp->s_wq = swq;
853 qp->s_size = init_attr->cap.max_send_wr + 1;
854 qp->s_max_sge = init_attr->cap.max_send_sge;
855 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
856 qp->s_flags = IPATH_S_SIGNAL_REQ_WR;
857 else
858 qp->s_flags = 0;
859 dev = to_idev(ibpd->device);
860 err = ipath_alloc_qpn(&dev->qp_table, qp,
861 init_attr->qp_type);
862 if (err) {
863 ret = ERR_PTR(err);
864 vfree(qp->r_rq.wq);
865 goto bail_sg_list;
866 }
867 qp->ip = NULL;
868 qp->s_tx = NULL;
869 ipath_reset_qp(qp, init_attr->qp_type);
870 break;
871
872 default:
873 /* Don't support raw QPs */
874 ret = ERR_PTR(-ENOSYS);
875 goto bail;
876 }
877
878 init_attr->cap.max_inline_data = 0;
879
880 /*
881 * Return the address of the RWQ as the offset to mmap.
882 * See ipath_mmap() for details.
883 */
884 if (udata && udata->outlen >= sizeof(__u64)) {
885 if (!qp->r_rq.wq) {
886 __u64 offset = 0;
887
888 err = ib_copy_to_udata(udata, &offset,
889 sizeof(offset));
890 if (err) {
891 ret = ERR_PTR(err);
892 goto bail_ip;
893 }
894 } else {
895 u32 s = sizeof(struct ipath_rwq) +
896 qp->r_rq.size * sz;
897
898 qp->ip =
899 ipath_create_mmap_info(dev, s,
900 ibpd->uobject->context,
901 qp->r_rq.wq);
902 if (!qp->ip) {
903 ret = ERR_PTR(-ENOMEM);
904 goto bail_ip;
905 }
906
907 err = ib_copy_to_udata(udata, &(qp->ip->offset),
908 sizeof(qp->ip->offset));
909 if (err) {
910 ret = ERR_PTR(err);
911 goto bail_ip;
912 }
913 }
914 }
915
916 spin_lock(&dev->n_qps_lock);
917 if (dev->n_qps_allocated == ib_ipath_max_qps) {
918 spin_unlock(&dev->n_qps_lock);
919 ret = ERR_PTR(-ENOMEM);
920 goto bail_ip;
921 }
922
923 dev->n_qps_allocated++;
924 spin_unlock(&dev->n_qps_lock);
925
926 if (qp->ip) {
927 spin_lock_irq(&dev->pending_lock);
928 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
929 spin_unlock_irq(&dev->pending_lock);
930 }
931
932 ret = &qp->ibqp;
933 goto bail;
934
935bail_ip:
936 if (qp->ip)
937 kref_put(&qp->ip->ref, ipath_release_mmap_info);
938 else
939 vfree(qp->r_rq.wq);
940 ipath_free_qp(&dev->qp_table, qp);
941 free_qpn(&dev->qp_table, qp->ibqp.qp_num);
942bail_sg_list:
943 kfree(qp->r_ud_sg_list);
944bail_qp:
945 kfree(qp);
946bail_swq:
947 vfree(swq);
948bail:
949 return ret;
950}
951
952/**
953 * ipath_destroy_qp - destroy a queue pair
954 * @ibqp: the queue pair to destroy
955 *
956 * Returns 0 on success.
957 *
958 * Note that this can be called while the QP is actively sending or
959 * receiving!
960 */
961int ipath_destroy_qp(struct ib_qp *ibqp)
962{
963 struct ipath_qp *qp = to_iqp(ibqp);
964 struct ipath_ibdev *dev = to_idev(ibqp->device);
965
966 /* Make sure HW and driver activity is stopped. */
967 spin_lock_irq(&qp->s_lock);
968 if (qp->state != IB_QPS_RESET) {
969 qp->state = IB_QPS_RESET;
970 spin_lock(&dev->pending_lock);
971 if (!list_empty(&qp->timerwait))
972 list_del_init(&qp->timerwait);
973 if (!list_empty(&qp->piowait))
974 list_del_init(&qp->piowait);
975 spin_unlock(&dev->pending_lock);
976 qp->s_flags &= ~IPATH_S_ANY_WAIT;
977 spin_unlock_irq(&qp->s_lock);
978 /* Stop the sending tasklet */
979 tasklet_kill(&qp->s_task);
980 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
981 } else
982 spin_unlock_irq(&qp->s_lock);
983
984 ipath_free_qp(&dev->qp_table, qp);
985
986 if (qp->s_tx) {
987 atomic_dec(&qp->refcount);
988 if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
989 kfree(qp->s_tx->txreq.map_addr);
990 spin_lock_irq(&dev->pending_lock);
991 list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
992 spin_unlock_irq(&dev->pending_lock);
993 qp->s_tx = NULL;
994 }
995
996 wait_event(qp->wait, !atomic_read(&qp->refcount));
997
998 /* all user's cleaned up, mark it available */
999 free_qpn(&dev->qp_table, qp->ibqp.qp_num);
1000 spin_lock(&dev->n_qps_lock);
1001 dev->n_qps_allocated--;
1002 spin_unlock(&dev->n_qps_lock);
1003
1004 if (qp->ip)
1005 kref_put(&qp->ip->ref, ipath_release_mmap_info);
1006 else
1007 vfree(qp->r_rq.wq);
1008 kfree(qp->r_ud_sg_list);
1009 vfree(qp->s_wq);
1010 kfree(qp);
1011 return 0;
1012}
1013
1014/**
1015 * ipath_init_qp_table - initialize the QP table for a device
1016 * @idev: the device who's QP table we're initializing
1017 * @size: the size of the QP table
1018 *
1019 * Returns 0 on success, otherwise returns an errno.
1020 */
1021int ipath_init_qp_table(struct ipath_ibdev *idev, int size)
1022{
1023 int i;
1024 int ret;
1025
1026 idev->qp_table.last = 1; /* QPN 0 and 1 are special. */
1027 idev->qp_table.max = size;
1028 idev->qp_table.nmaps = 1;
1029 idev->qp_table.table = kcalloc(size, sizeof(*idev->qp_table.table),
1030 GFP_KERNEL);
1031 if (idev->qp_table.table == NULL) {
1032 ret = -ENOMEM;
1033 goto bail;
1034 }
1035
1036 for (i = 0; i < ARRAY_SIZE(idev->qp_table.map); i++) {
1037 atomic_set(&idev->qp_table.map[i].n_free, BITS_PER_PAGE);
1038 idev->qp_table.map[i].page = NULL;
1039 }
1040
1041 ret = 0;
1042
1043bail:
1044 return ret;
1045}
1046
1047/**
1048 * ipath_get_credit - flush the send work queue of a QP
1049 * @qp: the qp who's send work queue to flush
1050 * @aeth: the Acknowledge Extended Transport Header
1051 *
1052 * The QP s_lock should be held.
1053 */
1054void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
1055{
1056 u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK;
1057
1058 /*
1059 * If the credit is invalid, we can send
1060 * as many packets as we like. Otherwise, we have to
1061 * honor the credit field.
1062 */
1063 if (credit == IPATH_AETH_CREDIT_INVAL)
1064 qp->s_lsn = (u32) -1;
1065 else if (qp->s_lsn != (u32) -1) {
1066 /* Compute new LSN (i.e., MSN + credit) */
1067 credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK;
1068 if (ipath_cmp24(credit, qp->s_lsn) > 0)
1069 qp->s_lsn = credit;
1070 }
1071
1072 /* Restart sending if it was blocked due to lack of credits. */
1073 if ((qp->s_flags & IPATH_S_WAIT_SSN_CREDIT) &&
1074 qp->s_cur != qp->s_head &&
1075 (qp->s_lsn == (u32) -1 ||
1076 ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
1077 qp->s_lsn + 1) <= 0))
1078 ipath_schedule_send(qp);
1079}
diff --git a/drivers/staging/rdma/ipath/ipath_rc.c b/drivers/staging/rdma/ipath/ipath_rc.c
deleted file mode 100644
index d4aa53574e57..000000000000
--- a/drivers/staging/rdma/ipath/ipath_rc.c
+++ /dev/null
@@ -1,1969 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/io.h>
35
36#include "ipath_verbs.h"
37#include "ipath_kernel.h"
38
39/* cut down ridiculously long IB macro names */
40#define OP(x) IB_OPCODE_RC_##x
41
42static u32 restart_sge(struct ipath_sge_state *ss, struct ipath_swqe *wqe,
43 u32 psn, u32 pmtu)
44{
45 u32 len;
46
47 len = ((psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
48 ss->sge = wqe->sg_list[0];
49 ss->sg_list = wqe->sg_list + 1;
50 ss->num_sge = wqe->wr.num_sge;
51 ipath_skip_sge(ss, len);
52 return wqe->length - len;
53}
54
55/**
56 * ipath_init_restart- initialize the qp->s_sge after a restart
57 * @qp: the QP who's SGE we're restarting
58 * @wqe: the work queue to initialize the QP's SGE from
59 *
60 * The QP s_lock should be held and interrupts disabled.
61 */
62static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
63{
64 struct ipath_ibdev *dev;
65
66 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn,
67 ib_mtu_enum_to_int(qp->path_mtu));
68 dev = to_idev(qp->ibqp.device);
69 spin_lock(&dev->pending_lock);
70 if (list_empty(&qp->timerwait))
71 list_add_tail(&qp->timerwait,
72 &dev->pending[dev->pending_index]);
73 spin_unlock(&dev->pending_lock);
74}
75
76/**
77 * ipath_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
78 * @qp: a pointer to the QP
79 * @ohdr: a pointer to the IB header being constructed
80 * @pmtu: the path MTU
81 *
82 * Return 1 if constructed; otherwise, return 0.
83 * Note that we are in the responder's side of the QP context.
84 * Note the QP s_lock must be held.
85 */
86static int ipath_make_rc_ack(struct ipath_ibdev *dev, struct ipath_qp *qp,
87 struct ipath_other_headers *ohdr, u32 pmtu)
88{
89 struct ipath_ack_entry *e;
90 u32 hwords;
91 u32 len;
92 u32 bth0;
93 u32 bth2;
94
95 /* Don't send an ACK if we aren't supposed to. */
96 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
97 goto bail;
98
99 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
100 hwords = 5;
101
102 switch (qp->s_ack_state) {
103 case OP(RDMA_READ_RESPONSE_LAST):
104 case OP(RDMA_READ_RESPONSE_ONLY):
105 case OP(ATOMIC_ACKNOWLEDGE):
106 /*
107 * We can increment the tail pointer now that the last
108 * response has been sent instead of only being
109 * constructed.
110 */
111 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
112 qp->s_tail_ack_queue = 0;
113 /* FALLTHROUGH */
114 case OP(SEND_ONLY):
115 case OP(ACKNOWLEDGE):
116 /* Check for no next entry in the queue. */
117 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
118 if (qp->s_flags & IPATH_S_ACK_PENDING)
119 goto normal;
120 qp->s_ack_state = OP(ACKNOWLEDGE);
121 goto bail;
122 }
123
124 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
125 if (e->opcode == OP(RDMA_READ_REQUEST)) {
126 /* Copy SGE state in case we need to resend */
127 qp->s_ack_rdma_sge = e->rdma_sge;
128 qp->s_cur_sge = &qp->s_ack_rdma_sge;
129 len = e->rdma_sge.sge.sge_length;
130 if (len > pmtu) {
131 len = pmtu;
132 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
133 } else {
134 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
135 e->sent = 1;
136 }
137 ohdr->u.aeth = ipath_compute_aeth(qp);
138 hwords++;
139 qp->s_ack_rdma_psn = e->psn;
140 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
141 } else {
142 /* COMPARE_SWAP or FETCH_ADD */
143 qp->s_cur_sge = NULL;
144 len = 0;
145 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
146 ohdr->u.at.aeth = ipath_compute_aeth(qp);
147 ohdr->u.at.atomic_ack_eth[0] =
148 cpu_to_be32(e->atomic_data >> 32);
149 ohdr->u.at.atomic_ack_eth[1] =
150 cpu_to_be32(e->atomic_data);
151 hwords += sizeof(ohdr->u.at) / sizeof(u32);
152 bth2 = e->psn;
153 e->sent = 1;
154 }
155 bth0 = qp->s_ack_state << 24;
156 break;
157
158 case OP(RDMA_READ_RESPONSE_FIRST):
159 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
160 /* FALLTHROUGH */
161 case OP(RDMA_READ_RESPONSE_MIDDLE):
162 len = qp->s_ack_rdma_sge.sge.sge_length;
163 if (len > pmtu)
164 len = pmtu;
165 else {
166 ohdr->u.aeth = ipath_compute_aeth(qp);
167 hwords++;
168 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
169 qp->s_ack_queue[qp->s_tail_ack_queue].sent = 1;
170 }
171 bth0 = qp->s_ack_state << 24;
172 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
173 break;
174
175 default:
176 normal:
177 /*
178 * Send a regular ACK.
179 * Set the s_ack_state so we wait until after sending
180 * the ACK before setting s_ack_state to ACKNOWLEDGE
181 * (see above).
182 */
183 qp->s_ack_state = OP(SEND_ONLY);
184 qp->s_flags &= ~IPATH_S_ACK_PENDING;
185 qp->s_cur_sge = NULL;
186 if (qp->s_nak_state)
187 ohdr->u.aeth =
188 cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
189 (qp->s_nak_state <<
190 IPATH_AETH_CREDIT_SHIFT));
191 else
192 ohdr->u.aeth = ipath_compute_aeth(qp);
193 hwords++;
194 len = 0;
195 bth0 = OP(ACKNOWLEDGE) << 24;
196 bth2 = qp->s_ack_psn & IPATH_PSN_MASK;
197 }
198 qp->s_hdrwords = hwords;
199 qp->s_cur_size = len;
200 ipath_make_ruc_header(dev, qp, ohdr, bth0, bth2);
201 return 1;
202
203bail:
204 return 0;
205}
206
207/**
208 * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
209 * @qp: a pointer to the QP
210 *
211 * Return 1 if constructed; otherwise, return 0.
212 */
213int ipath_make_rc_req(struct ipath_qp *qp)
214{
215 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
216 struct ipath_other_headers *ohdr;
217 struct ipath_sge_state *ss;
218 struct ipath_swqe *wqe;
219 u32 hwords;
220 u32 len;
221 u32 bth0;
222 u32 bth2;
223 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
224 char newreq;
225 unsigned long flags;
226 int ret = 0;
227
228 ohdr = &qp->s_hdr.u.oth;
229 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
230 ohdr = &qp->s_hdr.u.l.oth;
231
232 /*
233 * The lock is needed to synchronize between the sending tasklet,
234 * the receive interrupt handler, and timeout resends.
235 */
236 spin_lock_irqsave(&qp->s_lock, flags);
237
238 /* Sending responses has higher priority over sending requests. */
239 if ((qp->r_head_ack_queue != qp->s_tail_ack_queue ||
240 (qp->s_flags & IPATH_S_ACK_PENDING) ||
241 qp->s_ack_state != OP(ACKNOWLEDGE)) &&
242 ipath_make_rc_ack(dev, qp, ohdr, pmtu))
243 goto done;
244
245 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) {
246 if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
247 goto bail;
248 /* We are in the error state, flush the work request. */
249 if (qp->s_last == qp->s_head)
250 goto bail;
251 /* If DMAs are in progress, we can't flush immediately. */
252 if (atomic_read(&qp->s_dma_busy)) {
253 qp->s_flags |= IPATH_S_WAIT_DMA;
254 goto bail;
255 }
256 wqe = get_swqe_ptr(qp, qp->s_last);
257 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
258 goto done;
259 }
260
261 /* Leave BUSY set until RNR timeout. */
262 if (qp->s_rnr_timeout) {
263 qp->s_flags |= IPATH_S_WAITING;
264 goto bail;
265 }
266
267 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
268 hwords = 5;
269 bth0 = 1 << 22; /* Set M bit */
270
271 /* Send a request. */
272 wqe = get_swqe_ptr(qp, qp->s_cur);
273 switch (qp->s_state) {
274 default:
275 if (!(ib_ipath_state_ops[qp->state] &
276 IPATH_PROCESS_NEXT_SEND_OK))
277 goto bail;
278 /*
279 * Resend an old request or start a new one.
280 *
281 * We keep track of the current SWQE so that
282 * we don't reset the "furthest progress" state
283 * if we need to back up.
284 */
285 newreq = 0;
286 if (qp->s_cur == qp->s_tail) {
287 /* Check if send work queue is empty. */
288 if (qp->s_tail == qp->s_head)
289 goto bail;
290 /*
291 * If a fence is requested, wait for previous
292 * RDMA read and atomic operations to finish.
293 */
294 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
295 qp->s_num_rd_atomic) {
296 qp->s_flags |= IPATH_S_FENCE_PENDING;
297 goto bail;
298 }
299 wqe->psn = qp->s_next_psn;
300 newreq = 1;
301 }
302 /*
303 * Note that we have to be careful not to modify the
304 * original work request since we may need to resend
305 * it.
306 */
307 len = wqe->length;
308 ss = &qp->s_sge;
309 bth2 = 0;
310 switch (wqe->wr.opcode) {
311 case IB_WR_SEND:
312 case IB_WR_SEND_WITH_IMM:
313 /* If no credit, return. */
314 if (qp->s_lsn != (u32) -1 &&
315 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
316 qp->s_flags |= IPATH_S_WAIT_SSN_CREDIT;
317 goto bail;
318 }
319 wqe->lpsn = wqe->psn;
320 if (len > pmtu) {
321 wqe->lpsn += (len - 1) / pmtu;
322 qp->s_state = OP(SEND_FIRST);
323 len = pmtu;
324 break;
325 }
326 if (wqe->wr.opcode == IB_WR_SEND)
327 qp->s_state = OP(SEND_ONLY);
328 else {
329 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
330 /* Immediate data comes after the BTH */
331 ohdr->u.imm_data = wqe->wr.ex.imm_data;
332 hwords += 1;
333 }
334 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
335 bth0 |= 1 << 23;
336 bth2 = 1 << 31; /* Request ACK. */
337 if (++qp->s_cur == qp->s_size)
338 qp->s_cur = 0;
339 break;
340
341 case IB_WR_RDMA_WRITE:
342 if (newreq && qp->s_lsn != (u32) -1)
343 qp->s_lsn++;
344 /* FALLTHROUGH */
345 case IB_WR_RDMA_WRITE_WITH_IMM:
346 /* If no credit, return. */
347 if (qp->s_lsn != (u32) -1 &&
348 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
349 qp->s_flags |= IPATH_S_WAIT_SSN_CREDIT;
350 goto bail;
351 }
352 ohdr->u.rc.reth.vaddr =
353 cpu_to_be64(wqe->rdma_wr.remote_addr);
354 ohdr->u.rc.reth.rkey =
355 cpu_to_be32(wqe->rdma_wr.rkey);
356 ohdr->u.rc.reth.length = cpu_to_be32(len);
357 hwords += sizeof(struct ib_reth) / sizeof(u32);
358 wqe->lpsn = wqe->psn;
359 if (len > pmtu) {
360 wqe->lpsn += (len - 1) / pmtu;
361 qp->s_state = OP(RDMA_WRITE_FIRST);
362 len = pmtu;
363 break;
364 }
365 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
366 qp->s_state = OP(RDMA_WRITE_ONLY);
367 else {
368 qp->s_state =
369 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
370 /* Immediate data comes after RETH */
371 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
372 hwords += 1;
373 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
374 bth0 |= 1 << 23;
375 }
376 bth2 = 1 << 31; /* Request ACK. */
377 if (++qp->s_cur == qp->s_size)
378 qp->s_cur = 0;
379 break;
380
381 case IB_WR_RDMA_READ:
382 /*
383 * Don't allow more operations to be started
384 * than the QP limits allow.
385 */
386 if (newreq) {
387 if (qp->s_num_rd_atomic >=
388 qp->s_max_rd_atomic) {
389 qp->s_flags |= IPATH_S_RDMAR_PENDING;
390 goto bail;
391 }
392 qp->s_num_rd_atomic++;
393 if (qp->s_lsn != (u32) -1)
394 qp->s_lsn++;
395 /*
396 * Adjust s_next_psn to count the
397 * expected number of responses.
398 */
399 if (len > pmtu)
400 qp->s_next_psn += (len - 1) / pmtu;
401 wqe->lpsn = qp->s_next_psn++;
402 }
403 ohdr->u.rc.reth.vaddr =
404 cpu_to_be64(wqe->rdma_wr.remote_addr);
405 ohdr->u.rc.reth.rkey =
406 cpu_to_be32(wqe->rdma_wr.rkey);
407 ohdr->u.rc.reth.length = cpu_to_be32(len);
408 qp->s_state = OP(RDMA_READ_REQUEST);
409 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
410 ss = NULL;
411 len = 0;
412 if (++qp->s_cur == qp->s_size)
413 qp->s_cur = 0;
414 break;
415
416 case IB_WR_ATOMIC_CMP_AND_SWP:
417 case IB_WR_ATOMIC_FETCH_AND_ADD:
418 /*
419 * Don't allow more operations to be started
420 * than the QP limits allow.
421 */
422 if (newreq) {
423 if (qp->s_num_rd_atomic >=
424 qp->s_max_rd_atomic) {
425 qp->s_flags |= IPATH_S_RDMAR_PENDING;
426 goto bail;
427 }
428 qp->s_num_rd_atomic++;
429 if (qp->s_lsn != (u32) -1)
430 qp->s_lsn++;
431 wqe->lpsn = wqe->psn;
432 }
433 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
434 qp->s_state = OP(COMPARE_SWAP);
435 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
436 wqe->atomic_wr.swap);
437 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
438 wqe->atomic_wr.compare_add);
439 } else {
440 qp->s_state = OP(FETCH_ADD);
441 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
442 wqe->atomic_wr.compare_add);
443 ohdr->u.atomic_eth.compare_data = 0;
444 }
445 ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
446 wqe->atomic_wr.remote_addr >> 32);
447 ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
448 wqe->atomic_wr.remote_addr);
449 ohdr->u.atomic_eth.rkey = cpu_to_be32(
450 wqe->atomic_wr.rkey);
451 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
452 ss = NULL;
453 len = 0;
454 if (++qp->s_cur == qp->s_size)
455 qp->s_cur = 0;
456 break;
457
458 default:
459 goto bail;
460 }
461 qp->s_sge.sge = wqe->sg_list[0];
462 qp->s_sge.sg_list = wqe->sg_list + 1;
463 qp->s_sge.num_sge = wqe->wr.num_sge;
464 qp->s_len = wqe->length;
465 if (newreq) {
466 qp->s_tail++;
467 if (qp->s_tail >= qp->s_size)
468 qp->s_tail = 0;
469 }
470 bth2 |= qp->s_psn & IPATH_PSN_MASK;
471 if (wqe->wr.opcode == IB_WR_RDMA_READ)
472 qp->s_psn = wqe->lpsn + 1;
473 else {
474 qp->s_psn++;
475 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
476 qp->s_next_psn = qp->s_psn;
477 }
478 /*
479 * Put the QP on the pending list so lost ACKs will cause
480 * a retry. More than one request can be pending so the
481 * QP may already be on the dev->pending list.
482 */
483 spin_lock(&dev->pending_lock);
484 if (list_empty(&qp->timerwait))
485 list_add_tail(&qp->timerwait,
486 &dev->pending[dev->pending_index]);
487 spin_unlock(&dev->pending_lock);
488 break;
489
490 case OP(RDMA_READ_RESPONSE_FIRST):
491 /*
492 * This case can only happen if a send is restarted.
493 * See ipath_restart_rc().
494 */
495 ipath_init_restart(qp, wqe);
496 /* FALLTHROUGH */
497 case OP(SEND_FIRST):
498 qp->s_state = OP(SEND_MIDDLE);
499 /* FALLTHROUGH */
500 case OP(SEND_MIDDLE):
501 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
502 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
503 qp->s_next_psn = qp->s_psn;
504 ss = &qp->s_sge;
505 len = qp->s_len;
506 if (len > pmtu) {
507 len = pmtu;
508 break;
509 }
510 if (wqe->wr.opcode == IB_WR_SEND)
511 qp->s_state = OP(SEND_LAST);
512 else {
513 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
514 /* Immediate data comes after the BTH */
515 ohdr->u.imm_data = wqe->wr.ex.imm_data;
516 hwords += 1;
517 }
518 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
519 bth0 |= 1 << 23;
520 bth2 |= 1 << 31; /* Request ACK. */
521 qp->s_cur++;
522 if (qp->s_cur >= qp->s_size)
523 qp->s_cur = 0;
524 break;
525
526 case OP(RDMA_READ_RESPONSE_LAST):
527 /*
528 * This case can only happen if a RDMA write is restarted.
529 * See ipath_restart_rc().
530 */
531 ipath_init_restart(qp, wqe);
532 /* FALLTHROUGH */
533 case OP(RDMA_WRITE_FIRST):
534 qp->s_state = OP(RDMA_WRITE_MIDDLE);
535 /* FALLTHROUGH */
536 case OP(RDMA_WRITE_MIDDLE):
537 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
538 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
539 qp->s_next_psn = qp->s_psn;
540 ss = &qp->s_sge;
541 len = qp->s_len;
542 if (len > pmtu) {
543 len = pmtu;
544 break;
545 }
546 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
547 qp->s_state = OP(RDMA_WRITE_LAST);
548 else {
549 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
550 /* Immediate data comes after the BTH */
551 ohdr->u.imm_data = wqe->wr.ex.imm_data;
552 hwords += 1;
553 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
554 bth0 |= 1 << 23;
555 }
556 bth2 |= 1 << 31; /* Request ACK. */
557 qp->s_cur++;
558 if (qp->s_cur >= qp->s_size)
559 qp->s_cur = 0;
560 break;
561
562 case OP(RDMA_READ_RESPONSE_MIDDLE):
563 /*
564 * This case can only happen if a RDMA read is restarted.
565 * See ipath_restart_rc().
566 */
567 ipath_init_restart(qp, wqe);
568 len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
569 ohdr->u.rc.reth.vaddr =
570 cpu_to_be64(wqe->rdma_wr.remote_addr + len);
571 ohdr->u.rc.reth.rkey =
572 cpu_to_be32(wqe->rdma_wr.rkey);
573 ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
574 qp->s_state = OP(RDMA_READ_REQUEST);
575 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
576 bth2 = qp->s_psn & IPATH_PSN_MASK;
577 qp->s_psn = wqe->lpsn + 1;
578 ss = NULL;
579 len = 0;
580 qp->s_cur++;
581 if (qp->s_cur == qp->s_size)
582 qp->s_cur = 0;
583 break;
584 }
585 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0)
586 bth2 |= 1 << 31; /* Request ACK. */
587 qp->s_len -= len;
588 qp->s_hdrwords = hwords;
589 qp->s_cur_sge = ss;
590 qp->s_cur_size = len;
591 ipath_make_ruc_header(dev, qp, ohdr, bth0 | (qp->s_state << 24), bth2);
592done:
593 ret = 1;
594 goto unlock;
595
596bail:
597 qp->s_flags &= ~IPATH_S_BUSY;
598unlock:
599 spin_unlock_irqrestore(&qp->s_lock, flags);
600 return ret;
601}
602
603/**
604 * send_rc_ack - Construct an ACK packet and send it
605 * @qp: a pointer to the QP
606 *
607 * This is called from ipath_rc_rcv() and only uses the receive
608 * side QP state.
609 * Note that RDMA reads and atomics are handled in the
610 * send side QP state and tasklet.
611 */
612static void send_rc_ack(struct ipath_qp *qp)
613{
614 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
615 struct ipath_devdata *dd;
616 u16 lrh0;
617 u32 bth0;
618 u32 hwords;
619 u32 __iomem *piobuf;
620 struct ipath_ib_header hdr;
621 struct ipath_other_headers *ohdr;
622 unsigned long flags;
623
624 spin_lock_irqsave(&qp->s_lock, flags);
625
626 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
627 if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
628 (qp->s_flags & IPATH_S_ACK_PENDING) ||
629 qp->s_ack_state != OP(ACKNOWLEDGE))
630 goto queue_ack;
631
632 spin_unlock_irqrestore(&qp->s_lock, flags);
633
634 /* Don't try to send ACKs if the link isn't ACTIVE */
635 dd = dev->dd;
636 if (!(dd->ipath_flags & IPATH_LINKACTIVE))
637 goto done;
638
639 piobuf = ipath_getpiobuf(dd, 0, NULL);
640 if (!piobuf) {
641 /*
642 * We are out of PIO buffers at the moment.
643 * Pass responsibility for sending the ACK to the
644 * send tasklet so that when a PIO buffer becomes
645 * available, the ACK is sent ahead of other outgoing
646 * packets.
647 */
648 spin_lock_irqsave(&qp->s_lock, flags);
649 goto queue_ack;
650 }
651
652 /* Construct the header. */
653 ohdr = &hdr.u.oth;
654 lrh0 = IPATH_LRH_BTH;
655 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
656 hwords = 6;
657 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
658 hwords += ipath_make_grh(dev, &hdr.u.l.grh,
659 &qp->remote_ah_attr.grh,
660 hwords, 0);
661 ohdr = &hdr.u.l.oth;
662 lrh0 = IPATH_LRH_GRH;
663 }
664 /* read pkey_index w/o lock (its atomic) */
665 bth0 = ipath_get_pkey(dd, qp->s_pkey_index) |
666 (OP(ACKNOWLEDGE) << 24) | (1 << 22);
667 if (qp->r_nak_state)
668 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
669 (qp->r_nak_state <<
670 IPATH_AETH_CREDIT_SHIFT));
671 else
672 ohdr->u.aeth = ipath_compute_aeth(qp);
673 lrh0 |= qp->remote_ah_attr.sl << 4;
674 hdr.lrh[0] = cpu_to_be16(lrh0);
675 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
676 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
677 hdr.lrh[3] = cpu_to_be16(dd->ipath_lid |
678 qp->remote_ah_attr.src_path_bits);
679 ohdr->bth[0] = cpu_to_be32(bth0);
680 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
681 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
682
683 writeq(hwords + 1, piobuf);
684
685 if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
686 u32 *hdrp = (u32 *) &hdr;
687
688 ipath_flush_wc();
689 __iowrite32_copy(piobuf + 2, hdrp, hwords - 1);
690 ipath_flush_wc();
691 __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
692 } else
693 __iowrite32_copy(piobuf + 2, (u32 *) &hdr, hwords);
694
695 ipath_flush_wc();
696
697 dev->n_unicast_xmit++;
698 goto done;
699
700queue_ack:
701 if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK) {
702 dev->n_rc_qacks++;
703 qp->s_flags |= IPATH_S_ACK_PENDING;
704 qp->s_nak_state = qp->r_nak_state;
705 qp->s_ack_psn = qp->r_ack_psn;
706
707 /* Schedule the send tasklet. */
708 ipath_schedule_send(qp);
709 }
710 spin_unlock_irqrestore(&qp->s_lock, flags);
711done:
712 return;
713}
714
715/**
716 * reset_psn - reset the QP state to send starting from PSN
717 * @qp: the QP
718 * @psn: the packet sequence number to restart at
719 *
720 * This is called from ipath_rc_rcv() to process an incoming RC ACK
721 * for the given QP.
722 * Called at interrupt level with the QP s_lock held.
723 */
724static void reset_psn(struct ipath_qp *qp, u32 psn)
725{
726 u32 n = qp->s_last;
727 struct ipath_swqe *wqe = get_swqe_ptr(qp, n);
728 u32 opcode;
729
730 qp->s_cur = n;
731
732 /*
733 * If we are starting the request from the beginning,
734 * let the normal send code handle initialization.
735 */
736 if (ipath_cmp24(psn, wqe->psn) <= 0) {
737 qp->s_state = OP(SEND_LAST);
738 goto done;
739 }
740
741 /* Find the work request opcode corresponding to the given PSN. */
742 opcode = wqe->wr.opcode;
743 for (;;) {
744 int diff;
745
746 if (++n == qp->s_size)
747 n = 0;
748 if (n == qp->s_tail)
749 break;
750 wqe = get_swqe_ptr(qp, n);
751 diff = ipath_cmp24(psn, wqe->psn);
752 if (diff < 0)
753 break;
754 qp->s_cur = n;
755 /*
756 * If we are starting the request from the beginning,
757 * let the normal send code handle initialization.
758 */
759 if (diff == 0) {
760 qp->s_state = OP(SEND_LAST);
761 goto done;
762 }
763 opcode = wqe->wr.opcode;
764 }
765
766 /*
767 * Set the state to restart in the middle of a request.
768 * Don't change the s_sge, s_cur_sge, or s_cur_size.
769 * See ipath_make_rc_req().
770 */
771 switch (opcode) {
772 case IB_WR_SEND:
773 case IB_WR_SEND_WITH_IMM:
774 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
775 break;
776
777 case IB_WR_RDMA_WRITE:
778 case IB_WR_RDMA_WRITE_WITH_IMM:
779 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
780 break;
781
782 case IB_WR_RDMA_READ:
783 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
784 break;
785
786 default:
787 /*
788 * This case shouldn't happen since its only
789 * one PSN per req.
790 */
791 qp->s_state = OP(SEND_LAST);
792 }
793done:
794 qp->s_psn = psn;
795}
796
797/**
798 * ipath_restart_rc - back up requester to resend the last un-ACKed request
799 * @qp: the QP to restart
800 * @psn: packet sequence number for the request
801 * @wc: the work completion request
802 *
803 * The QP s_lock should be held and interrupts disabled.
804 */
805void ipath_restart_rc(struct ipath_qp *qp, u32 psn)
806{
807 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
808 struct ipath_ibdev *dev;
809
810 if (qp->s_retry == 0) {
811 ipath_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
812 ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
813 goto bail;
814 }
815 qp->s_retry--;
816
817 /*
818 * Remove the QP from the timeout queue.
819 * Note: it may already have been removed by ipath_ib_timer().
820 */
821 dev = to_idev(qp->ibqp.device);
822 spin_lock(&dev->pending_lock);
823 if (!list_empty(&qp->timerwait))
824 list_del_init(&qp->timerwait);
825 if (!list_empty(&qp->piowait))
826 list_del_init(&qp->piowait);
827 spin_unlock(&dev->pending_lock);
828
829 if (wqe->wr.opcode == IB_WR_RDMA_READ)
830 dev->n_rc_resends++;
831 else
832 dev->n_rc_resends += (qp->s_psn - psn) & IPATH_PSN_MASK;
833
834 reset_psn(qp, psn);
835 ipath_schedule_send(qp);
836
837bail:
838 return;
839}
840
841static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
842{
843 qp->s_last_psn = psn;
844}
845
846/**
847 * do_rc_ack - process an incoming RC ACK
848 * @qp: the QP the ACK came in on
849 * @psn: the packet sequence number of the ACK
850 * @opcode: the opcode of the request that resulted in the ACK
851 *
852 * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
853 * for the given QP.
854 * Called at interrupt level with the QP s_lock held and interrupts disabled.
855 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
856 */
857static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
858 u64 val)
859{
860 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
861 struct ib_wc wc;
862 enum ib_wc_status status;
863 struct ipath_swqe *wqe;
864 int ret = 0;
865 u32 ack_psn;
866 int diff;
867
868 /*
869 * Remove the QP from the timeout queue (or RNR timeout queue).
870 * If ipath_ib_timer() has already removed it,
871 * it's OK since we hold the QP s_lock and ipath_restart_rc()
872 * just won't find anything to restart if we ACK everything.
873 */
874 spin_lock(&dev->pending_lock);
875 if (!list_empty(&qp->timerwait))
876 list_del_init(&qp->timerwait);
877 spin_unlock(&dev->pending_lock);
878
879 /*
880 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
881 * requests and implicitly NAK RDMA read and atomic requests issued
882 * before the NAK'ed request. The MSN won't include the NAK'ed
883 * request but will include an ACK'ed request(s).
884 */
885 ack_psn = psn;
886 if (aeth >> 29)
887 ack_psn--;
888 wqe = get_swqe_ptr(qp, qp->s_last);
889
890 /*
891 * The MSN might be for a later WQE than the PSN indicates so
892 * only complete WQEs that the PSN finishes.
893 */
894 while ((diff = ipath_cmp24(ack_psn, wqe->lpsn)) >= 0) {
895 /*
896 * RDMA_READ_RESPONSE_ONLY is a special case since
897 * we want to generate completion events for everything
898 * before the RDMA read, copy the data, then generate
899 * the completion for the read.
900 */
901 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
902 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
903 diff == 0) {
904 ret = 1;
905 goto bail;
906 }
907 /*
908 * If this request is a RDMA read or atomic, and the ACK is
909 * for a later operation, this ACK NAKs the RDMA read or
910 * atomic. In other words, only a RDMA_READ_LAST or ONLY
911 * can ACK a RDMA read and likewise for atomic ops. Note
912 * that the NAK case can only happen if relaxed ordering is
913 * used and requests are sent after an RDMA read or atomic
914 * is sent but before the response is received.
915 */
916 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
917 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
918 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
919 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
920 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
921 /*
922 * The last valid PSN seen is the previous
923 * request's.
924 */
925 update_last_psn(qp, wqe->psn - 1);
926 /* Retry this request. */
927 ipath_restart_rc(qp, wqe->psn);
928 /*
929 * No need to process the ACK/NAK since we are
930 * restarting an earlier request.
931 */
932 goto bail;
933 }
934 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
935 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
936 *(u64 *) wqe->sg_list[0].vaddr = val;
937 if (qp->s_num_rd_atomic &&
938 (wqe->wr.opcode == IB_WR_RDMA_READ ||
939 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
940 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
941 qp->s_num_rd_atomic--;
942 /* Restart sending task if fence is complete */
943 if (((qp->s_flags & IPATH_S_FENCE_PENDING) &&
944 !qp->s_num_rd_atomic) ||
945 qp->s_flags & IPATH_S_RDMAR_PENDING)
946 ipath_schedule_send(qp);
947 }
948 /* Post a send completion queue entry if requested. */
949 if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
950 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
951 memset(&wc, 0, sizeof wc);
952 wc.wr_id = wqe->wr.wr_id;
953 wc.status = IB_WC_SUCCESS;
954 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
955 wc.byte_len = wqe->length;
956 wc.qp = &qp->ibqp;
957 wc.src_qp = qp->remote_qpn;
958 wc.slid = qp->remote_ah_attr.dlid;
959 wc.sl = qp->remote_ah_attr.sl;
960 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
961 }
962 qp->s_retry = qp->s_retry_cnt;
963 /*
964 * If we are completing a request which is in the process of
965 * being resent, we can stop resending it since we know the
966 * responder has already seen it.
967 */
968 if (qp->s_last == qp->s_cur) {
969 if (++qp->s_cur >= qp->s_size)
970 qp->s_cur = 0;
971 qp->s_last = qp->s_cur;
972 if (qp->s_last == qp->s_tail)
973 break;
974 wqe = get_swqe_ptr(qp, qp->s_cur);
975 qp->s_state = OP(SEND_LAST);
976 qp->s_psn = wqe->psn;
977 } else {
978 if (++qp->s_last >= qp->s_size)
979 qp->s_last = 0;
980 if (qp->state == IB_QPS_SQD && qp->s_last == qp->s_cur)
981 qp->s_draining = 0;
982 if (qp->s_last == qp->s_tail)
983 break;
984 wqe = get_swqe_ptr(qp, qp->s_last);
985 }
986 }
987
988 switch (aeth >> 29) {
989 case 0: /* ACK */
990 dev->n_rc_acks++;
991 /* If this is a partial ACK, reset the retransmit timer. */
992 if (qp->s_last != qp->s_tail) {
993 spin_lock(&dev->pending_lock);
994 if (list_empty(&qp->timerwait))
995 list_add_tail(&qp->timerwait,
996 &dev->pending[dev->pending_index]);
997 spin_unlock(&dev->pending_lock);
998 /*
999 * If we get a partial ACK for a resent operation,
1000 * we can stop resending the earlier packets and
1001 * continue with the next packet the receiver wants.
1002 */
1003 if (ipath_cmp24(qp->s_psn, psn) <= 0) {
1004 reset_psn(qp, psn + 1);
1005 ipath_schedule_send(qp);
1006 }
1007 } else if (ipath_cmp24(qp->s_psn, psn) <= 0) {
1008 qp->s_state = OP(SEND_LAST);
1009 qp->s_psn = psn + 1;
1010 }
1011 ipath_get_credit(qp, aeth);
1012 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1013 qp->s_retry = qp->s_retry_cnt;
1014 update_last_psn(qp, psn);
1015 ret = 1;
1016 goto bail;
1017
1018 case 1: /* RNR NAK */
1019 dev->n_rnr_naks++;
1020 if (qp->s_last == qp->s_tail)
1021 goto bail;
1022 if (qp->s_rnr_retry == 0) {
1023 status = IB_WC_RNR_RETRY_EXC_ERR;
1024 goto class_b;
1025 }
1026 if (qp->s_rnr_retry_cnt < 7)
1027 qp->s_rnr_retry--;
1028
1029 /* The last valid PSN is the previous PSN. */
1030 update_last_psn(qp, psn - 1);
1031
1032 if (wqe->wr.opcode == IB_WR_RDMA_READ)
1033 dev->n_rc_resends++;
1034 else
1035 dev->n_rc_resends +=
1036 (qp->s_psn - psn) & IPATH_PSN_MASK;
1037
1038 reset_psn(qp, psn);
1039
1040 qp->s_rnr_timeout =
1041 ib_ipath_rnr_table[(aeth >> IPATH_AETH_CREDIT_SHIFT) &
1042 IPATH_AETH_CREDIT_MASK];
1043 ipath_insert_rnr_queue(qp);
1044 ipath_schedule_send(qp);
1045 goto bail;
1046
1047 case 3: /* NAK */
1048 if (qp->s_last == qp->s_tail)
1049 goto bail;
1050 /* The last valid PSN is the previous PSN. */
1051 update_last_psn(qp, psn - 1);
1052 switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) &
1053 IPATH_AETH_CREDIT_MASK) {
1054 case 0: /* PSN sequence error */
1055 dev->n_seq_naks++;
1056 /*
1057 * Back up to the responder's expected PSN.
1058 * Note that we might get a NAK in the middle of an
1059 * RDMA READ response which terminates the RDMA
1060 * READ.
1061 */
1062 ipath_restart_rc(qp, psn);
1063 break;
1064
1065 case 1: /* Invalid Request */
1066 status = IB_WC_REM_INV_REQ_ERR;
1067 dev->n_other_naks++;
1068 goto class_b;
1069
1070 case 2: /* Remote Access Error */
1071 status = IB_WC_REM_ACCESS_ERR;
1072 dev->n_other_naks++;
1073 goto class_b;
1074
1075 case 3: /* Remote Operation Error */
1076 status = IB_WC_REM_OP_ERR;
1077 dev->n_other_naks++;
1078 class_b:
1079 ipath_send_complete(qp, wqe, status);
1080 ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1081 break;
1082
1083 default:
1084 /* Ignore other reserved NAK error codes */
1085 goto reserved;
1086 }
1087 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1088 goto bail;
1089
1090 default: /* 2: reserved */
1091 reserved:
1092 /* Ignore reserved NAK codes. */
1093 goto bail;
1094 }
1095
1096bail:
1097 return ret;
1098}
1099
1100/**
1101 * ipath_rc_rcv_resp - process an incoming RC response packet
1102 * @dev: the device this packet came in on
1103 * @ohdr: the other headers for this packet
1104 * @data: the packet data
1105 * @tlen: the packet length
1106 * @qp: the QP for this packet
1107 * @opcode: the opcode for this packet
1108 * @psn: the packet sequence number for this packet
1109 * @hdrsize: the header length
1110 * @pmtu: the path MTU
1111 * @header_in_data: true if part of the header data is in the data buffer
1112 *
1113 * This is called from ipath_rc_rcv() to process an incoming RC response
1114 * packet for the given QP.
1115 * Called at interrupt level.
1116 */
1117static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1118 struct ipath_other_headers *ohdr,
1119 void *data, u32 tlen,
1120 struct ipath_qp *qp,
1121 u32 opcode,
1122 u32 psn, u32 hdrsize, u32 pmtu,
1123 int header_in_data)
1124{
1125 struct ipath_swqe *wqe;
1126 enum ib_wc_status status;
1127 unsigned long flags;
1128 int diff;
1129 u32 pad;
1130 u32 aeth;
1131 u64 val;
1132
1133 spin_lock_irqsave(&qp->s_lock, flags);
1134
1135 /* Double check we can process this now that we hold the s_lock. */
1136 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
1137 goto ack_done;
1138
1139 /* Ignore invalid responses. */
1140 if (ipath_cmp24(psn, qp->s_next_psn) >= 0)
1141 goto ack_done;
1142
1143 /* Ignore duplicate responses. */
1144 diff = ipath_cmp24(psn, qp->s_last_psn);
1145 if (unlikely(diff <= 0)) {
1146 /* Update credits for "ghost" ACKs */
1147 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1148 if (!header_in_data)
1149 aeth = be32_to_cpu(ohdr->u.aeth);
1150 else {
1151 aeth = be32_to_cpu(((__be32 *) data)[0]);
1152 data += sizeof(__be32);
1153 }
1154 if ((aeth >> 29) == 0)
1155 ipath_get_credit(qp, aeth);
1156 }
1157 goto ack_done;
1158 }
1159
1160 if (unlikely(qp->s_last == qp->s_tail))
1161 goto ack_done;
1162 wqe = get_swqe_ptr(qp, qp->s_last);
1163 status = IB_WC_SUCCESS;
1164
1165 switch (opcode) {
1166 case OP(ACKNOWLEDGE):
1167 case OP(ATOMIC_ACKNOWLEDGE):
1168 case OP(RDMA_READ_RESPONSE_FIRST):
1169 if (!header_in_data)
1170 aeth = be32_to_cpu(ohdr->u.aeth);
1171 else {
1172 aeth = be32_to_cpu(((__be32 *) data)[0]);
1173 data += sizeof(__be32);
1174 }
1175 if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
1176 if (!header_in_data) {
1177 __be32 *p = ohdr->u.at.atomic_ack_eth;
1178
1179 val = ((u64) be32_to_cpu(p[0]) << 32) |
1180 be32_to_cpu(p[1]);
1181 } else
1182 val = be64_to_cpu(((__be64 *) data)[0]);
1183 } else
1184 val = 0;
1185 if (!do_rc_ack(qp, aeth, psn, opcode, val) ||
1186 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1187 goto ack_done;
1188 hdrsize += 4;
1189 wqe = get_swqe_ptr(qp, qp->s_last);
1190 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1191 goto ack_op_err;
1192 qp->r_flags &= ~IPATH_R_RDMAR_SEQ;
1193 /*
1194 * If this is a response to a resent RDMA read, we
1195 * have to be careful to copy the data to the right
1196 * location.
1197 */
1198 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1199 wqe, psn, pmtu);
1200 goto read_middle;
1201
1202 case OP(RDMA_READ_RESPONSE_MIDDLE):
1203 /* no AETH, no ACK */
1204 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1205 dev->n_rdma_seq++;
1206 if (qp->r_flags & IPATH_R_RDMAR_SEQ)
1207 goto ack_done;
1208 qp->r_flags |= IPATH_R_RDMAR_SEQ;
1209 ipath_restart_rc(qp, qp->s_last_psn + 1);
1210 goto ack_done;
1211 }
1212 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1213 goto ack_op_err;
1214 read_middle:
1215 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1216 goto ack_len_err;
1217 if (unlikely(pmtu >= qp->s_rdma_read_len))
1218 goto ack_len_err;
1219
1220 /* We got a response so update the timeout. */
1221 spin_lock(&dev->pending_lock);
1222 if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
1223 list_move_tail(&qp->timerwait,
1224 &dev->pending[dev->pending_index]);
1225 spin_unlock(&dev->pending_lock);
1226
1227 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
1228 qp->s_retry = qp->s_retry_cnt;
1229
1230 /*
1231 * Update the RDMA receive state but do the copy w/o
1232 * holding the locks and blocking interrupts.
1233 */
1234 qp->s_rdma_read_len -= pmtu;
1235 update_last_psn(qp, psn);
1236 spin_unlock_irqrestore(&qp->s_lock, flags);
1237 ipath_copy_sge(&qp->s_rdma_read_sge, data, pmtu);
1238 goto bail;
1239
1240 case OP(RDMA_READ_RESPONSE_ONLY):
1241 if (!header_in_data)
1242 aeth = be32_to_cpu(ohdr->u.aeth);
1243 else
1244 aeth = be32_to_cpu(((__be32 *) data)[0]);
1245 if (!do_rc_ack(qp, aeth, psn, opcode, 0))
1246 goto ack_done;
1247 /* Get the number of bytes the message was padded by. */
1248 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1249 /*
1250 * Check that the data size is >= 0 && <= pmtu.
1251 * Remember to account for the AETH header (4) and
1252 * ICRC (4).
1253 */
1254 if (unlikely(tlen < (hdrsize + pad + 8)))
1255 goto ack_len_err;
1256 /*
1257 * If this is a response to a resent RDMA read, we
1258 * have to be careful to copy the data to the right
1259 * location.
1260 */
1261 wqe = get_swqe_ptr(qp, qp->s_last);
1262 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1263 wqe, psn, pmtu);
1264 goto read_last;
1265
1266 case OP(RDMA_READ_RESPONSE_LAST):
1267 /* ACKs READ req. */
1268 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1269 dev->n_rdma_seq++;
1270 if (qp->r_flags & IPATH_R_RDMAR_SEQ)
1271 goto ack_done;
1272 qp->r_flags |= IPATH_R_RDMAR_SEQ;
1273 ipath_restart_rc(qp, qp->s_last_psn + 1);
1274 goto ack_done;
1275 }
1276 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1277 goto ack_op_err;
1278 /* Get the number of bytes the message was padded by. */
1279 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1280 /*
1281 * Check that the data size is >= 1 && <= pmtu.
1282 * Remember to account for the AETH header (4) and
1283 * ICRC (4).
1284 */
1285 if (unlikely(tlen <= (hdrsize + pad + 8)))
1286 goto ack_len_err;
1287 read_last:
1288 tlen -= hdrsize + pad + 8;
1289 if (unlikely(tlen != qp->s_rdma_read_len))
1290 goto ack_len_err;
1291 if (!header_in_data)
1292 aeth = be32_to_cpu(ohdr->u.aeth);
1293 else {
1294 aeth = be32_to_cpu(((__be32 *) data)[0]);
1295 data += sizeof(__be32);
1296 }
1297 ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen);
1298 (void) do_rc_ack(qp, aeth, psn,
1299 OP(RDMA_READ_RESPONSE_LAST), 0);
1300 goto ack_done;
1301 }
1302
1303ack_op_err:
1304 status = IB_WC_LOC_QP_OP_ERR;
1305 goto ack_err;
1306
1307ack_len_err:
1308 status = IB_WC_LOC_LEN_ERR;
1309ack_err:
1310 ipath_send_complete(qp, wqe, status);
1311 ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1312ack_done:
1313 spin_unlock_irqrestore(&qp->s_lock, flags);
1314bail:
1315 return;
1316}
1317
1318/**
1319 * ipath_rc_rcv_error - process an incoming duplicate or error RC packet
1320 * @dev: the device this packet came in on
1321 * @ohdr: the other headers for this packet
1322 * @data: the packet data
1323 * @qp: the QP for this packet
1324 * @opcode: the opcode for this packet
1325 * @psn: the packet sequence number for this packet
1326 * @diff: the difference between the PSN and the expected PSN
1327 * @header_in_data: true if part of the header data is in the data buffer
1328 *
1329 * This is called from ipath_rc_rcv() to process an unexpected
1330 * incoming RC packet for the given QP.
1331 * Called at interrupt level.
1332 * Return 1 if no more processing is needed; otherwise return 0 to
1333 * schedule a response to be sent.
1334 */
1335static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1336 struct ipath_other_headers *ohdr,
1337 void *data,
1338 struct ipath_qp *qp,
1339 u32 opcode,
1340 u32 psn,
1341 int diff,
1342 int header_in_data)
1343{
1344 struct ipath_ack_entry *e;
1345 u8 i, prev;
1346 int old_req;
1347 unsigned long flags;
1348
1349 if (diff > 0) {
1350 /*
1351 * Packet sequence error.
1352 * A NAK will ACK earlier sends and RDMA writes.
1353 * Don't queue the NAK if we already sent one.
1354 */
1355 if (!qp->r_nak_state) {
1356 qp->r_nak_state = IB_NAK_PSN_ERROR;
1357 /* Use the expected PSN. */
1358 qp->r_ack_psn = qp->r_psn;
1359 goto send_ack;
1360 }
1361 goto done;
1362 }
1363
1364 /*
1365 * Handle a duplicate request. Don't re-execute SEND, RDMA
1366 * write or atomic op. Don't NAK errors, just silently drop
1367 * the duplicate request. Note that r_sge, r_len, and
1368 * r_rcv_len may be in use so don't modify them.
1369 *
1370 * We are supposed to ACK the earliest duplicate PSN but we
1371 * can coalesce an outstanding duplicate ACK. We have to
1372 * send the earliest so that RDMA reads can be restarted at
1373 * the requester's expected PSN.
1374 *
1375 * First, find where this duplicate PSN falls within the
1376 * ACKs previously sent.
1377 */
1378 psn &= IPATH_PSN_MASK;
1379 e = NULL;
1380 old_req = 1;
1381
1382 spin_lock_irqsave(&qp->s_lock, flags);
1383 /* Double check we can process this now that we hold the s_lock. */
1384 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
1385 goto unlock_done;
1386
1387 for (i = qp->r_head_ack_queue; ; i = prev) {
1388 if (i == qp->s_tail_ack_queue)
1389 old_req = 0;
1390 if (i)
1391 prev = i - 1;
1392 else
1393 prev = IPATH_MAX_RDMA_ATOMIC;
1394 if (prev == qp->r_head_ack_queue) {
1395 e = NULL;
1396 break;
1397 }
1398 e = &qp->s_ack_queue[prev];
1399 if (!e->opcode) {
1400 e = NULL;
1401 break;
1402 }
1403 if (ipath_cmp24(psn, e->psn) >= 0) {
1404 if (prev == qp->s_tail_ack_queue)
1405 old_req = 0;
1406 break;
1407 }
1408 }
1409 switch (opcode) {
1410 case OP(RDMA_READ_REQUEST): {
1411 struct ib_reth *reth;
1412 u32 offset;
1413 u32 len;
1414
1415 /*
1416 * If we didn't find the RDMA read request in the ack queue,
1417 * or the send tasklet is already backed up to send an
1418 * earlier entry, we can ignore this request.
1419 */
1420 if (!e || e->opcode != OP(RDMA_READ_REQUEST) || old_req)
1421 goto unlock_done;
1422 /* RETH comes after BTH */
1423 if (!header_in_data)
1424 reth = &ohdr->u.rc.reth;
1425 else {
1426 reth = (struct ib_reth *)data;
1427 data += sizeof(*reth);
1428 }
1429 /*
1430 * Address range must be a subset of the original
1431 * request and start on pmtu boundaries.
1432 * We reuse the old ack_queue slot since the requester
1433 * should not back up and request an earlier PSN for the
1434 * same request.
1435 */
1436 offset = ((psn - e->psn) & IPATH_PSN_MASK) *
1437 ib_mtu_enum_to_int(qp->path_mtu);
1438 len = be32_to_cpu(reth->length);
1439 if (unlikely(offset + len > e->rdma_sge.sge.sge_length))
1440 goto unlock_done;
1441 if (len != 0) {
1442 u32 rkey = be32_to_cpu(reth->rkey);
1443 u64 vaddr = be64_to_cpu(reth->vaddr);
1444 int ok;
1445
1446 ok = ipath_rkey_ok(qp, &e->rdma_sge,
1447 len, vaddr, rkey,
1448 IB_ACCESS_REMOTE_READ);
1449 if (unlikely(!ok))
1450 goto unlock_done;
1451 } else {
1452 e->rdma_sge.sg_list = NULL;
1453 e->rdma_sge.num_sge = 0;
1454 e->rdma_sge.sge.mr = NULL;
1455 e->rdma_sge.sge.vaddr = NULL;
1456 e->rdma_sge.sge.length = 0;
1457 e->rdma_sge.sge.sge_length = 0;
1458 }
1459 e->psn = psn;
1460 qp->s_ack_state = OP(ACKNOWLEDGE);
1461 qp->s_tail_ack_queue = prev;
1462 break;
1463 }
1464
1465 case OP(COMPARE_SWAP):
1466 case OP(FETCH_ADD): {
1467 /*
1468 * If we didn't find the atomic request in the ack queue
1469 * or the send tasklet is already backed up to send an
1470 * earlier entry, we can ignore this request.
1471 */
1472 if (!e || e->opcode != (u8) opcode || old_req)
1473 goto unlock_done;
1474 qp->s_ack_state = OP(ACKNOWLEDGE);
1475 qp->s_tail_ack_queue = prev;
1476 break;
1477 }
1478
1479 default:
1480 if (old_req)
1481 goto unlock_done;
1482 /*
1483 * Resend the most recent ACK if this request is
1484 * after all the previous RDMA reads and atomics.
1485 */
1486 if (i == qp->r_head_ack_queue) {
1487 spin_unlock_irqrestore(&qp->s_lock, flags);
1488 qp->r_nak_state = 0;
1489 qp->r_ack_psn = qp->r_psn - 1;
1490 goto send_ack;
1491 }
1492 /*
1493 * Try to send a simple ACK to work around a Mellanox bug
1494 * which doesn't accept a RDMA read response or atomic
1495 * response as an ACK for earlier SENDs or RDMA writes.
1496 */
1497 if (qp->r_head_ack_queue == qp->s_tail_ack_queue &&
1498 !(qp->s_flags & IPATH_S_ACK_PENDING) &&
1499 qp->s_ack_state == OP(ACKNOWLEDGE)) {
1500 spin_unlock_irqrestore(&qp->s_lock, flags);
1501 qp->r_nak_state = 0;
1502 qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
1503 goto send_ack;
1504 }
1505 /*
1506 * Resend the RDMA read or atomic op which
1507 * ACKs this duplicate request.
1508 */
1509 qp->s_ack_state = OP(ACKNOWLEDGE);
1510 qp->s_tail_ack_queue = i;
1511 break;
1512 }
1513 qp->r_nak_state = 0;
1514 ipath_schedule_send(qp);
1515
1516unlock_done:
1517 spin_unlock_irqrestore(&qp->s_lock, flags);
1518done:
1519 return 1;
1520
1521send_ack:
1522 return 0;
1523}
1524
1525void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
1526{
1527 unsigned long flags;
1528 int lastwqe;
1529
1530 spin_lock_irqsave(&qp->s_lock, flags);
1531 lastwqe = ipath_error_qp(qp, err);
1532 spin_unlock_irqrestore(&qp->s_lock, flags);
1533
1534 if (lastwqe) {
1535 struct ib_event ev;
1536
1537 ev.device = qp->ibqp.device;
1538 ev.element.qp = &qp->ibqp;
1539 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1540 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1541 }
1542}
1543
1544static inline void ipath_update_ack_queue(struct ipath_qp *qp, unsigned n)
1545{
1546 unsigned next;
1547
1548 next = n + 1;
1549 if (next > IPATH_MAX_RDMA_ATOMIC)
1550 next = 0;
1551 if (n == qp->s_tail_ack_queue) {
1552 qp->s_tail_ack_queue = next;
1553 qp->s_ack_state = OP(ACKNOWLEDGE);
1554 }
1555}
1556
1557/**
1558 * ipath_rc_rcv - process an incoming RC packet
1559 * @dev: the device this packet came in on
1560 * @hdr: the header of this packet
1561 * @has_grh: true if the header has a GRH
1562 * @data: the packet data
1563 * @tlen: the packet length
1564 * @qp: the QP for this packet
1565 *
1566 * This is called from ipath_qp_rcv() to process an incoming RC packet
1567 * for the given QP.
1568 * Called at interrupt level.
1569 */
1570void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1571 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
1572{
1573 struct ipath_other_headers *ohdr;
1574 u32 opcode;
1575 u32 hdrsize;
1576 u32 psn;
1577 u32 pad;
1578 struct ib_wc wc;
1579 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
1580 int diff;
1581 struct ib_reth *reth;
1582 int header_in_data;
1583 unsigned long flags;
1584
1585 /* Validate the SLID. See Ch. 9.6.1.5 */
1586 if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
1587 goto done;
1588
1589 /* Check for GRH */
1590 if (!has_grh) {
1591 ohdr = &hdr->u.oth;
1592 hdrsize = 8 + 12; /* LRH + BTH */
1593 psn = be32_to_cpu(ohdr->bth[2]);
1594 header_in_data = 0;
1595 } else {
1596 ohdr = &hdr->u.l.oth;
1597 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
1598 /*
1599 * The header with GRH is 60 bytes and the core driver sets
1600 * the eager header buffer size to 56 bytes so the last 4
1601 * bytes of the BTH header (PSN) is in the data buffer.
1602 */
1603 header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
1604 if (header_in_data) {
1605 psn = be32_to_cpu(((__be32 *) data)[0]);
1606 data += sizeof(__be32);
1607 } else
1608 psn = be32_to_cpu(ohdr->bth[2]);
1609 }
1610
1611 /*
1612 * Process responses (ACKs) before anything else. Note that the
1613 * packet sequence number will be for something in the send work
1614 * queue rather than the expected receive packet sequence number.
1615 * In other words, this QP is the requester.
1616 */
1617 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
1618 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1619 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1620 ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
1621 hdrsize, pmtu, header_in_data);
1622 goto done;
1623 }
1624
1625 /* Compute 24 bits worth of difference. */
1626 diff = ipath_cmp24(psn, qp->r_psn);
1627 if (unlikely(diff)) {
1628 if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
1629 psn, diff, header_in_data))
1630 goto done;
1631 goto send_ack;
1632 }
1633
1634 /* Check for opcode sequence errors. */
1635 switch (qp->r_state) {
1636 case OP(SEND_FIRST):
1637 case OP(SEND_MIDDLE):
1638 if (opcode == OP(SEND_MIDDLE) ||
1639 opcode == OP(SEND_LAST) ||
1640 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1641 break;
1642 goto nack_inv;
1643
1644 case OP(RDMA_WRITE_FIRST):
1645 case OP(RDMA_WRITE_MIDDLE):
1646 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1647 opcode == OP(RDMA_WRITE_LAST) ||
1648 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1649 break;
1650 goto nack_inv;
1651
1652 default:
1653 if (opcode == OP(SEND_MIDDLE) ||
1654 opcode == OP(SEND_LAST) ||
1655 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1656 opcode == OP(RDMA_WRITE_MIDDLE) ||
1657 opcode == OP(RDMA_WRITE_LAST) ||
1658 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1659 goto nack_inv;
1660 /*
1661 * Note that it is up to the requester to not send a new
1662 * RDMA read or atomic operation before receiving an ACK
1663 * for the previous operation.
1664 */
1665 break;
1666 }
1667
1668 memset(&wc, 0, sizeof wc);
1669
1670 /* OK, process the packet. */
1671 switch (opcode) {
1672 case OP(SEND_FIRST):
1673 if (!ipath_get_rwqe(qp, 0))
1674 goto rnr_nak;
1675 qp->r_rcv_len = 0;
1676 /* FALLTHROUGH */
1677 case OP(SEND_MIDDLE):
1678 case OP(RDMA_WRITE_MIDDLE):
1679 send_middle:
1680 /* Check for invalid length PMTU or posted rwqe len. */
1681 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1682 goto nack_inv;
1683 qp->r_rcv_len += pmtu;
1684 if (unlikely(qp->r_rcv_len > qp->r_len))
1685 goto nack_inv;
1686 ipath_copy_sge(&qp->r_sge, data, pmtu);
1687 break;
1688
1689 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1690 /* consume RWQE */
1691 if (!ipath_get_rwqe(qp, 1))
1692 goto rnr_nak;
1693 goto send_last_imm;
1694
1695 case OP(SEND_ONLY):
1696 case OP(SEND_ONLY_WITH_IMMEDIATE):
1697 if (!ipath_get_rwqe(qp, 0))
1698 goto rnr_nak;
1699 qp->r_rcv_len = 0;
1700 if (opcode == OP(SEND_ONLY))
1701 goto send_last;
1702 /* FALLTHROUGH */
1703 case OP(SEND_LAST_WITH_IMMEDIATE):
1704 send_last_imm:
1705 if (header_in_data) {
1706 wc.ex.imm_data = *(__be32 *) data;
1707 data += sizeof(__be32);
1708 } else {
1709 /* Immediate data comes after BTH */
1710 wc.ex.imm_data = ohdr->u.imm_data;
1711 }
1712 hdrsize += 4;
1713 wc.wc_flags = IB_WC_WITH_IMM;
1714 /* FALLTHROUGH */
1715 case OP(SEND_LAST):
1716 case OP(RDMA_WRITE_LAST):
1717 send_last:
1718 /* Get the number of bytes the message was padded by. */
1719 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1720 /* Check for invalid length. */
1721 /* XXX LAST len should be >= 1 */
1722 if (unlikely(tlen < (hdrsize + pad + 4)))
1723 goto nack_inv;
1724 /* Don't count the CRC. */
1725 tlen -= (hdrsize + pad + 4);
1726 wc.byte_len = tlen + qp->r_rcv_len;
1727 if (unlikely(wc.byte_len > qp->r_len))
1728 goto nack_inv;
1729 ipath_copy_sge(&qp->r_sge, data, tlen);
1730 qp->r_msn++;
1731 if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
1732 break;
1733 wc.wr_id = qp->r_wr_id;
1734 wc.status = IB_WC_SUCCESS;
1735 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
1736 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
1737 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
1738 else
1739 wc.opcode = IB_WC_RECV;
1740 wc.qp = &qp->ibqp;
1741 wc.src_qp = qp->remote_qpn;
1742 wc.slid = qp->remote_ah_attr.dlid;
1743 wc.sl = qp->remote_ah_attr.sl;
1744 /* Signal completion event if the solicited bit is set. */
1745 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
1746 (ohdr->bth[0] &
1747 cpu_to_be32(1 << 23)) != 0);
1748 break;
1749
1750 case OP(RDMA_WRITE_FIRST):
1751 case OP(RDMA_WRITE_ONLY):
1752 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
1753 if (unlikely(!(qp->qp_access_flags &
1754 IB_ACCESS_REMOTE_WRITE)))
1755 goto nack_inv;
1756 /* consume RWQE */
1757 /* RETH comes after BTH */
1758 if (!header_in_data)
1759 reth = &ohdr->u.rc.reth;
1760 else {
1761 reth = (struct ib_reth *)data;
1762 data += sizeof(*reth);
1763 }
1764 hdrsize += sizeof(*reth);
1765 qp->r_len = be32_to_cpu(reth->length);
1766 qp->r_rcv_len = 0;
1767 if (qp->r_len != 0) {
1768 u32 rkey = be32_to_cpu(reth->rkey);
1769 u64 vaddr = be64_to_cpu(reth->vaddr);
1770 int ok;
1771
1772 /* Check rkey & NAK */
1773 ok = ipath_rkey_ok(qp, &qp->r_sge,
1774 qp->r_len, vaddr, rkey,
1775 IB_ACCESS_REMOTE_WRITE);
1776 if (unlikely(!ok))
1777 goto nack_acc;
1778 } else {
1779 qp->r_sge.sg_list = NULL;
1780 qp->r_sge.sge.mr = NULL;
1781 qp->r_sge.sge.vaddr = NULL;
1782 qp->r_sge.sge.length = 0;
1783 qp->r_sge.sge.sge_length = 0;
1784 }
1785 if (opcode == OP(RDMA_WRITE_FIRST))
1786 goto send_middle;
1787 else if (opcode == OP(RDMA_WRITE_ONLY))
1788 goto send_last;
1789 if (!ipath_get_rwqe(qp, 1))
1790 goto rnr_nak;
1791 goto send_last_imm;
1792
1793 case OP(RDMA_READ_REQUEST): {
1794 struct ipath_ack_entry *e;
1795 u32 len;
1796 u8 next;
1797
1798 if (unlikely(!(qp->qp_access_flags &
1799 IB_ACCESS_REMOTE_READ)))
1800 goto nack_inv;
1801 next = qp->r_head_ack_queue + 1;
1802 if (next > IPATH_MAX_RDMA_ATOMIC)
1803 next = 0;
1804 spin_lock_irqsave(&qp->s_lock, flags);
1805 /* Double check we can process this while holding the s_lock. */
1806 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
1807 goto unlock;
1808 if (unlikely(next == qp->s_tail_ack_queue)) {
1809 if (!qp->s_ack_queue[next].sent)
1810 goto nack_inv_unlck;
1811 ipath_update_ack_queue(qp, next);
1812 }
1813 e = &qp->s_ack_queue[qp->r_head_ack_queue];
1814 /* RETH comes after BTH */
1815 if (!header_in_data)
1816 reth = &ohdr->u.rc.reth;
1817 else {
1818 reth = (struct ib_reth *)data;
1819 data += sizeof(*reth);
1820 }
1821 len = be32_to_cpu(reth->length);
1822 if (len) {
1823 u32 rkey = be32_to_cpu(reth->rkey);
1824 u64 vaddr = be64_to_cpu(reth->vaddr);
1825 int ok;
1826
1827 /* Check rkey & NAK */
1828 ok = ipath_rkey_ok(qp, &e->rdma_sge, len, vaddr,
1829 rkey, IB_ACCESS_REMOTE_READ);
1830 if (unlikely(!ok))
1831 goto nack_acc_unlck;
1832 /*
1833 * Update the next expected PSN. We add 1 later
1834 * below, so only add the remainder here.
1835 */
1836 if (len > pmtu)
1837 qp->r_psn += (len - 1) / pmtu;
1838 } else {
1839 e->rdma_sge.sg_list = NULL;
1840 e->rdma_sge.num_sge = 0;
1841 e->rdma_sge.sge.mr = NULL;
1842 e->rdma_sge.sge.vaddr = NULL;
1843 e->rdma_sge.sge.length = 0;
1844 e->rdma_sge.sge.sge_length = 0;
1845 }
1846 e->opcode = opcode;
1847 e->sent = 0;
1848 e->psn = psn;
1849 /*
1850 * We need to increment the MSN here instead of when we
1851 * finish sending the result since a duplicate request would
1852 * increment it more than once.
1853 */
1854 qp->r_msn++;
1855 qp->r_psn++;
1856 qp->r_state = opcode;
1857 qp->r_nak_state = 0;
1858 qp->r_head_ack_queue = next;
1859
1860 /* Schedule the send tasklet. */
1861 ipath_schedule_send(qp);
1862
1863 goto unlock;
1864 }
1865
1866 case OP(COMPARE_SWAP):
1867 case OP(FETCH_ADD): {
1868 struct ib_atomic_eth *ateth;
1869 struct ipath_ack_entry *e;
1870 u64 vaddr;
1871 atomic64_t *maddr;
1872 u64 sdata;
1873 u32 rkey;
1874 u8 next;
1875
1876 if (unlikely(!(qp->qp_access_flags &
1877 IB_ACCESS_REMOTE_ATOMIC)))
1878 goto nack_inv;
1879 next = qp->r_head_ack_queue + 1;
1880 if (next > IPATH_MAX_RDMA_ATOMIC)
1881 next = 0;
1882 spin_lock_irqsave(&qp->s_lock, flags);
1883 /* Double check we can process this while holding the s_lock. */
1884 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
1885 goto unlock;
1886 if (unlikely(next == qp->s_tail_ack_queue)) {
1887 if (!qp->s_ack_queue[next].sent)
1888 goto nack_inv_unlck;
1889 ipath_update_ack_queue(qp, next);
1890 }
1891 if (!header_in_data)
1892 ateth = &ohdr->u.atomic_eth;
1893 else
1894 ateth = (struct ib_atomic_eth *)data;
1895 vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
1896 be32_to_cpu(ateth->vaddr[1]);
1897 if (unlikely(vaddr & (sizeof(u64) - 1)))
1898 goto nack_inv_unlck;
1899 rkey = be32_to_cpu(ateth->rkey);
1900 /* Check rkey & NAK */
1901 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge,
1902 sizeof(u64), vaddr, rkey,
1903 IB_ACCESS_REMOTE_ATOMIC)))
1904 goto nack_acc_unlck;
1905 /* Perform atomic OP and save result. */
1906 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
1907 sdata = be64_to_cpu(ateth->swap_data);
1908 e = &qp->s_ack_queue[qp->r_head_ack_queue];
1909 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
1910 (u64) atomic64_add_return(sdata, maddr) - sdata :
1911 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
1912 be64_to_cpu(ateth->compare_data),
1913 sdata);
1914 e->opcode = opcode;
1915 e->sent = 0;
1916 e->psn = psn & IPATH_PSN_MASK;
1917 qp->r_msn++;
1918 qp->r_psn++;
1919 qp->r_state = opcode;
1920 qp->r_nak_state = 0;
1921 qp->r_head_ack_queue = next;
1922
1923 /* Schedule the send tasklet. */
1924 ipath_schedule_send(qp);
1925
1926 goto unlock;
1927 }
1928
1929 default:
1930 /* NAK unknown opcodes. */
1931 goto nack_inv;
1932 }
1933 qp->r_psn++;
1934 qp->r_state = opcode;
1935 qp->r_ack_psn = psn;
1936 qp->r_nak_state = 0;
1937 /* Send an ACK if requested or required. */
1938 if (psn & (1 << 31))
1939 goto send_ack;
1940 goto done;
1941
1942rnr_nak:
1943 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
1944 qp->r_ack_psn = qp->r_psn;
1945 goto send_ack;
1946
1947nack_inv_unlck:
1948 spin_unlock_irqrestore(&qp->s_lock, flags);
1949nack_inv:
1950 ipath_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
1951 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
1952 qp->r_ack_psn = qp->r_psn;
1953 goto send_ack;
1954
1955nack_acc_unlck:
1956 spin_unlock_irqrestore(&qp->s_lock, flags);
1957nack_acc:
1958 ipath_rc_error(qp, IB_WC_LOC_PROT_ERR);
1959 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
1960 qp->r_ack_psn = qp->r_psn;
1961send_ack:
1962 send_rc_ack(qp);
1963 goto done;
1964
1965unlock:
1966 spin_unlock_irqrestore(&qp->s_lock, flags);
1967done:
1968 return;
1969}
diff --git a/drivers/staging/rdma/ipath/ipath_registers.h b/drivers/staging/rdma/ipath/ipath_registers.h
deleted file mode 100644
index 8f44d0cf3833..000000000000
--- a/drivers/staging/rdma/ipath/ipath_registers.h
+++ /dev/null
@@ -1,512 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef _IPATH_REGISTERS_H
35#define _IPATH_REGISTERS_H
36
37/*
38 * This file should only be included by kernel source, and by the diags. It
39 * defines the registers, and their contents, for InfiniPath chips.
40 */
41
42/*
43 * These are the InfiniPath register and buffer bit definitions,
44 * that are visible to software, and needed only by the kernel
45 * and diag code. A few, that are visible to protocol and user
46 * code are in ipath_common.h. Some bits are specific
47 * to a given chip implementation, and have been moved to the
48 * chip-specific source file
49 */
50
51/* kr_revision bits */
52#define INFINIPATH_R_CHIPREVMINOR_MASK 0xFF
53#define INFINIPATH_R_CHIPREVMINOR_SHIFT 0
54#define INFINIPATH_R_CHIPREVMAJOR_MASK 0xFF
55#define INFINIPATH_R_CHIPREVMAJOR_SHIFT 8
56#define INFINIPATH_R_ARCH_MASK 0xFF
57#define INFINIPATH_R_ARCH_SHIFT 16
58#define INFINIPATH_R_SOFTWARE_MASK 0xFF
59#define INFINIPATH_R_SOFTWARE_SHIFT 24
60#define INFINIPATH_R_BOARDID_MASK 0xFF
61#define INFINIPATH_R_BOARDID_SHIFT 32
62
63/* kr_control bits */
64#define INFINIPATH_C_FREEZEMODE 0x00000002
65#define INFINIPATH_C_LINKENABLE 0x00000004
66
67/* kr_sendctrl bits */
68#define INFINIPATH_S_DISARMPIOBUF_SHIFT 16
69#define INFINIPATH_S_UPDTHRESH_SHIFT 24
70#define INFINIPATH_S_UPDTHRESH_MASK 0x1f
71
72#define IPATH_S_ABORT 0
73#define IPATH_S_PIOINTBUFAVAIL 1
74#define IPATH_S_PIOBUFAVAILUPD 2
75#define IPATH_S_PIOENABLE 3
76#define IPATH_S_SDMAINTENABLE 9
77#define IPATH_S_SDMASINGLEDESCRIPTOR 10
78#define IPATH_S_SDMAENABLE 11
79#define IPATH_S_SDMAHALT 12
80#define IPATH_S_DISARM 31
81
82#define INFINIPATH_S_ABORT (1U << IPATH_S_ABORT)
83#define INFINIPATH_S_PIOINTBUFAVAIL (1U << IPATH_S_PIOINTBUFAVAIL)
84#define INFINIPATH_S_PIOBUFAVAILUPD (1U << IPATH_S_PIOBUFAVAILUPD)
85#define INFINIPATH_S_PIOENABLE (1U << IPATH_S_PIOENABLE)
86#define INFINIPATH_S_SDMAINTENABLE (1U << IPATH_S_SDMAINTENABLE)
87#define INFINIPATH_S_SDMASINGLEDESCRIPTOR \
88 (1U << IPATH_S_SDMASINGLEDESCRIPTOR)
89#define INFINIPATH_S_SDMAENABLE (1U << IPATH_S_SDMAENABLE)
90#define INFINIPATH_S_SDMAHALT (1U << IPATH_S_SDMAHALT)
91#define INFINIPATH_S_DISARM (1U << IPATH_S_DISARM)
92
93/* kr_rcvctrl bits that are the same on multiple chips */
94#define INFINIPATH_R_PORTENABLE_SHIFT 0
95#define INFINIPATH_R_QPMAP_ENABLE (1ULL << 38)
96
97/* kr_intstatus, kr_intclear, kr_intmask bits */
98#define INFINIPATH_I_SDMAINT 0x8000000000000000ULL
99#define INFINIPATH_I_SDMADISABLED 0x4000000000000000ULL
100#define INFINIPATH_I_ERROR 0x0000000080000000ULL
101#define INFINIPATH_I_SPIOSENT 0x0000000040000000ULL
102#define INFINIPATH_I_SPIOBUFAVAIL 0x0000000020000000ULL
103#define INFINIPATH_I_GPIO 0x0000000010000000ULL
104#define INFINIPATH_I_JINT 0x0000000004000000ULL
105
106/* kr_errorstatus, kr_errorclear, kr_errormask bits */
107#define INFINIPATH_E_RFORMATERR 0x0000000000000001ULL
108#define INFINIPATH_E_RVCRC 0x0000000000000002ULL
109#define INFINIPATH_E_RICRC 0x0000000000000004ULL
110#define INFINIPATH_E_RMINPKTLEN 0x0000000000000008ULL
111#define INFINIPATH_E_RMAXPKTLEN 0x0000000000000010ULL
112#define INFINIPATH_E_RLONGPKTLEN 0x0000000000000020ULL
113#define INFINIPATH_E_RSHORTPKTLEN 0x0000000000000040ULL
114#define INFINIPATH_E_RUNEXPCHAR 0x0000000000000080ULL
115#define INFINIPATH_E_RUNSUPVL 0x0000000000000100ULL
116#define INFINIPATH_E_REBP 0x0000000000000200ULL
117#define INFINIPATH_E_RIBFLOW 0x0000000000000400ULL
118#define INFINIPATH_E_RBADVERSION 0x0000000000000800ULL
119#define INFINIPATH_E_RRCVEGRFULL 0x0000000000001000ULL
120#define INFINIPATH_E_RRCVHDRFULL 0x0000000000002000ULL
121#define INFINIPATH_E_RBADTID 0x0000000000004000ULL
122#define INFINIPATH_E_RHDRLEN 0x0000000000008000ULL
123#define INFINIPATH_E_RHDR 0x0000000000010000ULL
124#define INFINIPATH_E_RIBLOSTLINK 0x0000000000020000ULL
125#define INFINIPATH_E_SENDSPECIALTRIGGER 0x0000000008000000ULL
126#define INFINIPATH_E_SDMADISABLED 0x0000000010000000ULL
127#define INFINIPATH_E_SMINPKTLEN 0x0000000020000000ULL
128#define INFINIPATH_E_SMAXPKTLEN 0x0000000040000000ULL
129#define INFINIPATH_E_SUNDERRUN 0x0000000080000000ULL
130#define INFINIPATH_E_SPKTLEN 0x0000000100000000ULL
131#define INFINIPATH_E_SDROPPEDSMPPKT 0x0000000200000000ULL
132#define INFINIPATH_E_SDROPPEDDATAPKT 0x0000000400000000ULL
133#define INFINIPATH_E_SPIOARMLAUNCH 0x0000000800000000ULL
134#define INFINIPATH_E_SUNEXPERRPKTNUM 0x0000001000000000ULL
135#define INFINIPATH_E_SUNSUPVL 0x0000002000000000ULL
136#define INFINIPATH_E_SENDBUFMISUSE 0x0000004000000000ULL
137#define INFINIPATH_E_SDMAGENMISMATCH 0x0000008000000000ULL
138#define INFINIPATH_E_SDMAOUTOFBOUND 0x0000010000000000ULL
139#define INFINIPATH_E_SDMATAILOUTOFBOUND 0x0000020000000000ULL
140#define INFINIPATH_E_SDMABASE 0x0000040000000000ULL
141#define INFINIPATH_E_SDMA1STDESC 0x0000080000000000ULL
142#define INFINIPATH_E_SDMARPYTAG 0x0000100000000000ULL
143#define INFINIPATH_E_SDMADWEN 0x0000200000000000ULL
144#define INFINIPATH_E_SDMAMISSINGDW 0x0000400000000000ULL
145#define INFINIPATH_E_SDMAUNEXPDATA 0x0000800000000000ULL
146#define INFINIPATH_E_IBSTATUSCHANGED 0x0001000000000000ULL
147#define INFINIPATH_E_INVALIDADDR 0x0002000000000000ULL
148#define INFINIPATH_E_RESET 0x0004000000000000ULL
149#define INFINIPATH_E_HARDWARE 0x0008000000000000ULL
150#define INFINIPATH_E_SDMADESCADDRMISALIGN 0x0010000000000000ULL
151#define INFINIPATH_E_INVALIDEEPCMD 0x0020000000000000ULL
152
153/*
154 * this is used to print "common" packet errors only when the
155 * __IPATH_ERRPKTDBG bit is set in ipath_debug.
156 */
157#define INFINIPATH_E_PKTERRS ( INFINIPATH_E_SPKTLEN \
158 | INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_RVCRC \
159 | INFINIPATH_E_RICRC | INFINIPATH_E_RSHORTPKTLEN \
160 | INFINIPATH_E_REBP )
161
162/* Convenience for decoding Send DMA errors */
163#define INFINIPATH_E_SDMAERRS ( \
164 INFINIPATH_E_SDMAGENMISMATCH | INFINIPATH_E_SDMAOUTOFBOUND | \
165 INFINIPATH_E_SDMATAILOUTOFBOUND | INFINIPATH_E_SDMABASE | \
166 INFINIPATH_E_SDMA1STDESC | INFINIPATH_E_SDMARPYTAG | \
167 INFINIPATH_E_SDMADWEN | INFINIPATH_E_SDMAMISSINGDW | \
168 INFINIPATH_E_SDMAUNEXPDATA | \
169 INFINIPATH_E_SDMADESCADDRMISALIGN | \
170 INFINIPATH_E_SDMADISABLED | \
171 INFINIPATH_E_SENDBUFMISUSE)
172
173/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
174/* TXEMEMPARITYERR bit 0: PIObuf, 1: PIOpbc, 2: launchfifo
175 * RXEMEMPARITYERR bit 0: rcvbuf, 1: lookupq, 2: expTID, 3: eagerTID
176 * bit 4: flag buffer, 5: datainfo, 6: header info */
177#define INFINIPATH_HWE_TXEMEMPARITYERR_MASK 0xFULL
178#define INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT 40
179#define INFINIPATH_HWE_RXEMEMPARITYERR_MASK 0x7FULL
180#define INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT 44
181#define INFINIPATH_HWE_IBCBUSTOSPCPARITYERR 0x4000000000000000ULL
182#define INFINIPATH_HWE_IBCBUSFRSPCPARITYERR 0x8000000000000000ULL
183/* txe mem parity errors (shift by INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) */
184#define INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF 0x1ULL
185#define INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC 0x2ULL
186#define INFINIPATH_HWE_TXEMEMPARITYERR_PIOLAUNCHFIFO 0x4ULL
187/* rxe mem parity errors (shift by INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) */
188#define INFINIPATH_HWE_RXEMEMPARITYERR_RCVBUF 0x01ULL
189#define INFINIPATH_HWE_RXEMEMPARITYERR_LOOKUPQ 0x02ULL
190#define INFINIPATH_HWE_RXEMEMPARITYERR_EXPTID 0x04ULL
191#define INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID 0x08ULL
192#define INFINIPATH_HWE_RXEMEMPARITYERR_FLAGBUF 0x10ULL
193#define INFINIPATH_HWE_RXEMEMPARITYERR_DATAINFO 0x20ULL
194#define INFINIPATH_HWE_RXEMEMPARITYERR_HDRINFO 0x40ULL
195/* waldo specific -- find the rest in ipath_6110.c */
196#define INFINIPATH_HWE_RXDSYNCMEMPARITYERR 0x0000000400000000ULL
197/* 6120/7220 specific -- find the rest in ipath_6120.c and ipath_7220.c */
198#define INFINIPATH_HWE_MEMBISTFAILED 0x0040000000000000ULL
199
200/* kr_hwdiagctrl bits */
201#define INFINIPATH_DC_FORCETXEMEMPARITYERR_MASK 0xFULL
202#define INFINIPATH_DC_FORCETXEMEMPARITYERR_SHIFT 40
203#define INFINIPATH_DC_FORCERXEMEMPARITYERR_MASK 0x7FULL
204#define INFINIPATH_DC_FORCERXEMEMPARITYERR_SHIFT 44
205#define INFINIPATH_DC_FORCERXDSYNCMEMPARITYERR 0x0000000400000000ULL
206#define INFINIPATH_DC_COUNTERDISABLE 0x1000000000000000ULL
207#define INFINIPATH_DC_COUNTERWREN 0x2000000000000000ULL
208#define INFINIPATH_DC_FORCEIBCBUSTOSPCPARITYERR 0x4000000000000000ULL
209#define INFINIPATH_DC_FORCEIBCBUSFRSPCPARITYERR 0x8000000000000000ULL
210
211/* kr_ibcctrl bits */
212#define INFINIPATH_IBCC_FLOWCTRLPERIOD_MASK 0xFFULL
213#define INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT 0
214#define INFINIPATH_IBCC_FLOWCTRLWATERMARK_MASK 0xFFULL
215#define INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT 8
216#define INFINIPATH_IBCC_LINKINITCMD_MASK 0x3ULL
217#define INFINIPATH_IBCC_LINKINITCMD_DISABLE 1
218/* cycle through TS1/TS2 till OK */
219#define INFINIPATH_IBCC_LINKINITCMD_POLL 2
220/* wait for TS1, then go on */
221#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3
222#define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16
223#define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL
224#define INFINIPATH_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
225#define INFINIPATH_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
226#define INFINIPATH_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
227#define INFINIPATH_IBCC_LINKCMD_SHIFT 18
228#define INFINIPATH_IBCC_MAXPKTLEN_MASK 0x7FFULL
229#define INFINIPATH_IBCC_MAXPKTLEN_SHIFT 20
230#define INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK 0xFULL
231#define INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT 32
232#define INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK 0xFULL
233#define INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT 36
234#define INFINIPATH_IBCC_CREDITSCALE_MASK 0x7ULL
235#define INFINIPATH_IBCC_CREDITSCALE_SHIFT 40
236#define INFINIPATH_IBCC_LOOPBACK 0x8000000000000000ULL
237#define INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE 0x4000000000000000ULL
238
239/* kr_ibcstatus bits */
240#define INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT 0
241#define INFINIPATH_IBCS_LINKSTATE_MASK 0x7
242
243#define INFINIPATH_IBCS_TXREADY 0x40000000
244#define INFINIPATH_IBCS_TXCREDITOK 0x80000000
245/* link training states (shift by
246 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */
247#define INFINIPATH_IBCS_LT_STATE_DISABLED 0x00
248#define INFINIPATH_IBCS_LT_STATE_LINKUP 0x01
249#define INFINIPATH_IBCS_LT_STATE_POLLACTIVE 0x02
250#define INFINIPATH_IBCS_LT_STATE_POLLQUIET 0x03
251#define INFINIPATH_IBCS_LT_STATE_SLEEPDELAY 0x04
252#define INFINIPATH_IBCS_LT_STATE_SLEEPQUIET 0x05
253#define INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE 0x08
254#define INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG 0x09
255#define INFINIPATH_IBCS_LT_STATE_CFGWAITRMT 0x0a
256#define INFINIPATH_IBCS_LT_STATE_CFGIDLE 0x0b
257#define INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN 0x0c
258#define INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT 0x0e
259#define INFINIPATH_IBCS_LT_STATE_RECOVERIDLE 0x0f
260/* link state machine states (shift by ibcs_ls_shift) */
261#define INFINIPATH_IBCS_L_STATE_DOWN 0x0
262#define INFINIPATH_IBCS_L_STATE_INIT 0x1
263#define INFINIPATH_IBCS_L_STATE_ARM 0x2
264#define INFINIPATH_IBCS_L_STATE_ACTIVE 0x3
265#define INFINIPATH_IBCS_L_STATE_ACT_DEFER 0x4
266
267
268/* kr_extstatus bits */
269#define INFINIPATH_EXTS_SERDESPLLLOCK 0x1
270#define INFINIPATH_EXTS_GPIOIN_MASK 0xFFFFULL
271#define INFINIPATH_EXTS_GPIOIN_SHIFT 48
272
273/* kr_extctrl bits */
274#define INFINIPATH_EXTC_GPIOINVERT_MASK 0xFFFFULL
275#define INFINIPATH_EXTC_GPIOINVERT_SHIFT 32
276#define INFINIPATH_EXTC_GPIOOE_MASK 0xFFFFULL
277#define INFINIPATH_EXTC_GPIOOE_SHIFT 48
278#define INFINIPATH_EXTC_SERDESENABLE 0x80000000ULL
279#define INFINIPATH_EXTC_SERDESCONNECT 0x40000000ULL
280#define INFINIPATH_EXTC_SERDESENTRUNKING 0x20000000ULL
281#define INFINIPATH_EXTC_SERDESDISRXFIFO 0x10000000ULL
282#define INFINIPATH_EXTC_SERDESENPLPBK1 0x08000000ULL
283#define INFINIPATH_EXTC_SERDESENPLPBK2 0x04000000ULL
284#define INFINIPATH_EXTC_SERDESENENCDEC 0x02000000ULL
285#define INFINIPATH_EXTC_LED1SECPORT_ON 0x00000020ULL
286#define INFINIPATH_EXTC_LED2SECPORT_ON 0x00000010ULL
287#define INFINIPATH_EXTC_LED1PRIPORT_ON 0x00000008ULL
288#define INFINIPATH_EXTC_LED2PRIPORT_ON 0x00000004ULL
289#define INFINIPATH_EXTC_LEDGBLOK_ON 0x00000002ULL
290#define INFINIPATH_EXTC_LEDGBLERR_OFF 0x00000001ULL
291
292/* kr_partitionkey bits */
293#define INFINIPATH_PKEY_SIZE 16
294#define INFINIPATH_PKEY_MASK 0xFFFF
295#define INFINIPATH_PKEY_DEFAULT_PKEY 0xFFFF
296
297/* kr_serdesconfig0 bits */
298#define INFINIPATH_SERDC0_RESET_MASK 0xfULL /* overal reset bits */
299#define INFINIPATH_SERDC0_RESET_PLL 0x10000000ULL /* pll reset */
300/* tx idle enables (per lane) */
301#define INFINIPATH_SERDC0_TXIDLE 0xF000ULL
302/* rx detect enables (per lane) */
303#define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL
304/* L1 Power down; use with RXDETECT, Otherwise not used on IB side */
305#define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL
306
307/* common kr_xgxsconfig bits (or safe in all, even if not implemented) */
308#define INFINIPATH_XGXS_RX_POL_SHIFT 19
309#define INFINIPATH_XGXS_RX_POL_MASK 0xfULL
310
311
312/*
313 * IPATH_PIO_MAXIBHDR is the max IB header size allowed for in our
314 * PIO send buffers. This is well beyond anything currently
315 * defined in the InfiniBand spec.
316 */
317#define IPATH_PIO_MAXIBHDR 128
318
319typedef u64 ipath_err_t;
320
321/* The following change with the type of device, so
322 * need to be part of the ipath_devdata struct, or
323 * we could have problems plugging in devices of
324 * different types (e.g. one HT, one PCIE)
325 * in one system, to be managed by one driver.
326 * On the other hand, this file is may also be included
327 * by other code, so leave the declarations here
328 * temporarily. Minor footprint issue if common-model
329 * linker used, none if C89+ linker used.
330 */
331
332/* mask of defined bits for various registers */
333extern u64 infinipath_i_bitsextant;
334extern ipath_err_t infinipath_e_bitsextant, infinipath_hwe_bitsextant;
335
336/* masks that are different in various chips, or only exist in some chips */
337extern u32 infinipath_i_rcvavail_mask, infinipath_i_rcvurg_mask;
338
339/*
340 * These are the infinipath general register numbers (not offsets).
341 * The kernel registers are used directly, those beyond the kernel
342 * registers are calculated from one of the base registers. The use of
343 * an integer type doesn't allow type-checking as thorough as, say,
344 * an enum but allows for better hiding of chip differences.
345 */
346typedef const u16 ipath_kreg, /* infinipath general registers */
347 ipath_creg, /* infinipath counter registers */
348 ipath_sreg; /* kernel-only, infinipath send registers */
349
350/*
351 * These are the chip registers common to all infinipath chips, and
352 * used both by the kernel and the diagnostics or other user code.
353 * They are all implemented such that 64 bit accesses work.
354 * Some implement no more than 32 bits. Because 64 bit reads
355 * require 2 HT cmds on opteron, we access those with 32 bit
356 * reads for efficiency (they are written as 64 bits, since
357 * the extra 32 bits are nearly free on writes, and it slightly reduces
358 * complexity). The rest are all accessed as 64 bits.
359 */
360struct ipath_kregs {
361 /* These are the 32 bit group */
362 ipath_kreg kr_control;
363 ipath_kreg kr_counterregbase;
364 ipath_kreg kr_intmask;
365 ipath_kreg kr_intstatus;
366 ipath_kreg kr_pagealign;
367 ipath_kreg kr_portcnt;
368 ipath_kreg kr_rcvtidbase;
369 ipath_kreg kr_rcvtidcnt;
370 ipath_kreg kr_rcvegrbase;
371 ipath_kreg kr_rcvegrcnt;
372 ipath_kreg kr_scratch;
373 ipath_kreg kr_sendctrl;
374 ipath_kreg kr_sendpiobufbase;
375 ipath_kreg kr_sendpiobufcnt;
376 ipath_kreg kr_sendpiosize;
377 ipath_kreg kr_sendregbase;
378 ipath_kreg kr_userregbase;
379 /* These are the 64 bit group */
380 ipath_kreg kr_debugport;
381 ipath_kreg kr_debugportselect;
382 ipath_kreg kr_errorclear;
383 ipath_kreg kr_errormask;
384 ipath_kreg kr_errorstatus;
385 ipath_kreg kr_extctrl;
386 ipath_kreg kr_extstatus;
387 ipath_kreg kr_gpio_clear;
388 ipath_kreg kr_gpio_mask;
389 ipath_kreg kr_gpio_out;
390 ipath_kreg kr_gpio_status;
391 ipath_kreg kr_hwdiagctrl;
392 ipath_kreg kr_hwerrclear;
393 ipath_kreg kr_hwerrmask;
394 ipath_kreg kr_hwerrstatus;
395 ipath_kreg kr_ibcctrl;
396 ipath_kreg kr_ibcstatus;
397 ipath_kreg kr_intblocked;
398 ipath_kreg kr_intclear;
399 ipath_kreg kr_interruptconfig;
400 ipath_kreg kr_mdio;
401 ipath_kreg kr_partitionkey;
402 ipath_kreg kr_rcvbthqp;
403 ipath_kreg kr_rcvbufbase;
404 ipath_kreg kr_rcvbufsize;
405 ipath_kreg kr_rcvctrl;
406 ipath_kreg kr_rcvhdrcnt;
407 ipath_kreg kr_rcvhdrentsize;
408 ipath_kreg kr_rcvhdrsize;
409 ipath_kreg kr_rcvintmembase;
410 ipath_kreg kr_rcvintmemsize;
411 ipath_kreg kr_revision;
412 ipath_kreg kr_sendbuffererror;
413 ipath_kreg kr_sendpioavailaddr;
414 ipath_kreg kr_serdesconfig0;
415 ipath_kreg kr_serdesconfig1;
416 ipath_kreg kr_serdesstatus;
417 ipath_kreg kr_txintmembase;
418 ipath_kreg kr_txintmemsize;
419 ipath_kreg kr_xgxsconfig;
420 ipath_kreg kr_ibpllcfg;
421 /* use these two (and the following N ports) only with
422 * ipath_k*_kreg64_port(); not *kreg64() */
423 ipath_kreg kr_rcvhdraddr;
424 ipath_kreg kr_rcvhdrtailaddr;
425
426 /* remaining registers are not present on all types of infinipath
427 chips */
428 ipath_kreg kr_rcvpktledcnt;
429 ipath_kreg kr_pcierbuftestreg0;
430 ipath_kreg kr_pcierbuftestreg1;
431 ipath_kreg kr_pcieq0serdesconfig0;
432 ipath_kreg kr_pcieq0serdesconfig1;
433 ipath_kreg kr_pcieq0serdesstatus;
434 ipath_kreg kr_pcieq1serdesconfig0;
435 ipath_kreg kr_pcieq1serdesconfig1;
436 ipath_kreg kr_pcieq1serdesstatus;
437 ipath_kreg kr_hrtbt_guid;
438 ipath_kreg kr_ibcddrctrl;
439 ipath_kreg kr_ibcddrstatus;
440 ipath_kreg kr_jintreload;
441
442 /* send dma related regs */
443 ipath_kreg kr_senddmabase;
444 ipath_kreg kr_senddmalengen;
445 ipath_kreg kr_senddmatail;
446 ipath_kreg kr_senddmahead;
447 ipath_kreg kr_senddmaheadaddr;
448 ipath_kreg kr_senddmabufmask0;
449 ipath_kreg kr_senddmabufmask1;
450 ipath_kreg kr_senddmabufmask2;
451 ipath_kreg kr_senddmastatus;
452
453 /* SerDes related regs (IBA7220-only) */
454 ipath_kreg kr_ibserdesctrl;
455 ipath_kreg kr_ib_epbacc;
456 ipath_kreg kr_ib_epbtrans;
457 ipath_kreg kr_pcie_epbacc;
458 ipath_kreg kr_pcie_epbtrans;
459 ipath_kreg kr_ib_ddsrxeq;
460};
461
462struct ipath_cregs {
463 ipath_creg cr_badformatcnt;
464 ipath_creg cr_erricrccnt;
465 ipath_creg cr_errlinkcnt;
466 ipath_creg cr_errlpcrccnt;
467 ipath_creg cr_errpkey;
468 ipath_creg cr_errrcvflowctrlcnt;
469 ipath_creg cr_err_rlencnt;
470 ipath_creg cr_errslencnt;
471 ipath_creg cr_errtidfull;
472 ipath_creg cr_errtidvalid;
473 ipath_creg cr_errvcrccnt;
474 ipath_creg cr_ibstatuschange;
475 ipath_creg cr_intcnt;
476 ipath_creg cr_invalidrlencnt;
477 ipath_creg cr_invalidslencnt;
478 ipath_creg cr_lbflowstallcnt;
479 ipath_creg cr_iblinkdowncnt;
480 ipath_creg cr_iblinkerrrecovcnt;
481 ipath_creg cr_ibsymbolerrcnt;
482 ipath_creg cr_pktrcvcnt;
483 ipath_creg cr_pktrcvflowctrlcnt;
484 ipath_creg cr_pktsendcnt;
485 ipath_creg cr_pktsendflowcnt;
486 ipath_creg cr_portovflcnt;
487 ipath_creg cr_rcvebpcnt;
488 ipath_creg cr_rcvovflcnt;
489 ipath_creg cr_rxdroppktcnt;
490 ipath_creg cr_senddropped;
491 ipath_creg cr_sendstallcnt;
492 ipath_creg cr_sendunderruncnt;
493 ipath_creg cr_unsupvlcnt;
494 ipath_creg cr_wordrcvcnt;
495 ipath_creg cr_wordsendcnt;
496 ipath_creg cr_vl15droppedpktcnt;
497 ipath_creg cr_rxotherlocalphyerrcnt;
498 ipath_creg cr_excessbufferovflcnt;
499 ipath_creg cr_locallinkintegrityerrcnt;
500 ipath_creg cr_rxvlerrcnt;
501 ipath_creg cr_rxdlidfltrcnt;
502 ipath_creg cr_psstat;
503 ipath_creg cr_psstart;
504 ipath_creg cr_psinterval;
505 ipath_creg cr_psrcvdatacount;
506 ipath_creg cr_psrcvpktscount;
507 ipath_creg cr_psxmitdatacount;
508 ipath_creg cr_psxmitpktscount;
509 ipath_creg cr_psxmitwaitcount;
510};
511
512#endif /* _IPATH_REGISTERS_H */
diff --git a/drivers/staging/rdma/ipath/ipath_ruc.c b/drivers/staging/rdma/ipath/ipath_ruc.c
deleted file mode 100644
index e541a01f1f61..000000000000
--- a/drivers/staging/rdma/ipath/ipath_ruc.c
+++ /dev/null
@@ -1,733 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/spinlock.h>
35
36#include "ipath_verbs.h"
37#include "ipath_kernel.h"
38
39/*
40 * Convert the AETH RNR timeout code into the number of milliseconds.
41 */
42const u32 ib_ipath_rnr_table[32] = {
43 656, /* 0 */
44 1, /* 1 */
45 1, /* 2 */
46 1, /* 3 */
47 1, /* 4 */
48 1, /* 5 */
49 1, /* 6 */
50 1, /* 7 */
51 1, /* 8 */
52 1, /* 9 */
53 1, /* A */
54 1, /* B */
55 1, /* C */
56 1, /* D */
57 2, /* E */
58 2, /* F */
59 3, /* 10 */
60 4, /* 11 */
61 6, /* 12 */
62 8, /* 13 */
63 11, /* 14 */
64 16, /* 15 */
65 21, /* 16 */
66 31, /* 17 */
67 41, /* 18 */
68 62, /* 19 */
69 82, /* 1A */
70 123, /* 1B */
71 164, /* 1C */
72 246, /* 1D */
73 328, /* 1E */
74 492 /* 1F */
75};
76
77/**
78 * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device
79 * @qp: the QP
80 *
81 * Called with the QP s_lock held and interrupts disabled.
82 * XXX Use a simple list for now. We might need a priority
83 * queue if we have lots of QPs waiting for RNR timeouts
84 * but that should be rare.
85 */
86void ipath_insert_rnr_queue(struct ipath_qp *qp)
87{
88 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
89
90 /* We already did a spin_lock_irqsave(), so just use spin_lock */
91 spin_lock(&dev->pending_lock);
92 if (list_empty(&dev->rnrwait))
93 list_add(&qp->timerwait, &dev->rnrwait);
94 else {
95 struct list_head *l = &dev->rnrwait;
96 struct ipath_qp *nqp = list_entry(l->next, struct ipath_qp,
97 timerwait);
98
99 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
100 qp->s_rnr_timeout -= nqp->s_rnr_timeout;
101 l = l->next;
102 if (l->next == &dev->rnrwait) {
103 nqp = NULL;
104 break;
105 }
106 nqp = list_entry(l->next, struct ipath_qp,
107 timerwait);
108 }
109 if (nqp)
110 nqp->s_rnr_timeout -= qp->s_rnr_timeout;
111 list_add(&qp->timerwait, l);
112 }
113 spin_unlock(&dev->pending_lock);
114}
115
116/**
117 * ipath_init_sge - Validate a RWQE and fill in the SGE state
118 * @qp: the QP
119 *
120 * Return 1 if OK.
121 */
122int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
123 u32 *lengthp, struct ipath_sge_state *ss)
124{
125 int i, j, ret;
126 struct ib_wc wc;
127
128 *lengthp = 0;
129 for (i = j = 0; i < wqe->num_sge; i++) {
130 if (wqe->sg_list[i].length == 0)
131 continue;
132 /* Check LKEY */
133 if (!ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge,
134 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
135 goto bad_lkey;
136 *lengthp += wqe->sg_list[i].length;
137 j++;
138 }
139 ss->num_sge = j;
140 ret = 1;
141 goto bail;
142
143bad_lkey:
144 memset(&wc, 0, sizeof(wc));
145 wc.wr_id = wqe->wr_id;
146 wc.status = IB_WC_LOC_PROT_ERR;
147 wc.opcode = IB_WC_RECV;
148 wc.qp = &qp->ibqp;
149 /* Signal solicited completion event. */
150 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
151 ret = 0;
152bail:
153 return ret;
154}
155
156/**
157 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
158 * @qp: the QP
159 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
160 *
161 * Return 0 if no RWQE is available, otherwise return 1.
162 *
163 * Can be called from interrupt level.
164 */
165int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
166{
167 unsigned long flags;
168 struct ipath_rq *rq;
169 struct ipath_rwq *wq;
170 struct ipath_srq *srq;
171 struct ipath_rwqe *wqe;
172 void (*handler)(struct ib_event *, void *);
173 u32 tail;
174 int ret;
175
176 if (qp->ibqp.srq) {
177 srq = to_isrq(qp->ibqp.srq);
178 handler = srq->ibsrq.event_handler;
179 rq = &srq->rq;
180 } else {
181 srq = NULL;
182 handler = NULL;
183 rq = &qp->r_rq;
184 }
185
186 spin_lock_irqsave(&rq->lock, flags);
187 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
188 ret = 0;
189 goto unlock;
190 }
191
192 wq = rq->wq;
193 tail = wq->tail;
194 /* Validate tail before using it since it is user writable. */
195 if (tail >= rq->size)
196 tail = 0;
197 do {
198 if (unlikely(tail == wq->head)) {
199 ret = 0;
200 goto unlock;
201 }
202 /* Make sure entry is read after head index is read. */
203 smp_rmb();
204 wqe = get_rwqe_ptr(rq, tail);
205 if (++tail >= rq->size)
206 tail = 0;
207 if (wr_id_only)
208 break;
209 qp->r_sge.sg_list = qp->r_sg_list;
210 } while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge));
211 qp->r_wr_id = wqe->wr_id;
212 wq->tail = tail;
213
214 ret = 1;
215 set_bit(IPATH_R_WRID_VALID, &qp->r_aflags);
216 if (handler) {
217 u32 n;
218
219 /*
220 * validate head pointer value and compute
221 * the number of remaining WQEs.
222 */
223 n = wq->head;
224 if (n >= rq->size)
225 n = 0;
226 if (n < tail)
227 n += rq->size - tail;
228 else
229 n -= tail;
230 if (n < srq->limit) {
231 struct ib_event ev;
232
233 srq->limit = 0;
234 spin_unlock_irqrestore(&rq->lock, flags);
235 ev.device = qp->ibqp.device;
236 ev.element.srq = qp->ibqp.srq;
237 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
238 handler(&ev, srq->ibsrq.srq_context);
239 goto bail;
240 }
241 }
242unlock:
243 spin_unlock_irqrestore(&rq->lock, flags);
244bail:
245 return ret;
246}
247
248/**
249 * ipath_ruc_loopback - handle UC and RC lookback requests
250 * @sqp: the sending QP
251 *
252 * This is called from ipath_do_send() to
253 * forward a WQE addressed to the same HCA.
254 * Note that although we are single threaded due to the tasklet, we still
255 * have to protect against post_send(). We don't have to worry about
256 * receive interrupts since this is a connected protocol and all packets
257 * will pass through here.
258 */
259static void ipath_ruc_loopback(struct ipath_qp *sqp)
260{
261 struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
262 struct ipath_qp *qp;
263 struct ipath_swqe *wqe;
264 struct ipath_sge *sge;
265 unsigned long flags;
266 struct ib_wc wc;
267 u64 sdata;
268 atomic64_t *maddr;
269 enum ib_wc_status send_status;
270
271 /*
272 * Note that we check the responder QP state after
273 * checking the requester's state.
274 */
275 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
276
277 spin_lock_irqsave(&sqp->s_lock, flags);
278
279 /* Return if we are already busy processing a work request. */
280 if ((sqp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) ||
281 !(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_OR_FLUSH_SEND))
282 goto unlock;
283
284 sqp->s_flags |= IPATH_S_BUSY;
285
286again:
287 if (sqp->s_last == sqp->s_head)
288 goto clr_busy;
289 wqe = get_swqe_ptr(sqp, sqp->s_last);
290
291 /* Return if it is not OK to start a new work reqeust. */
292 if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_NEXT_SEND_OK)) {
293 if (!(ib_ipath_state_ops[sqp->state] & IPATH_FLUSH_SEND))
294 goto clr_busy;
295 /* We are in the error state, flush the work request. */
296 send_status = IB_WC_WR_FLUSH_ERR;
297 goto flush_send;
298 }
299
300 /*
301 * We can rely on the entry not changing without the s_lock
302 * being held until we update s_last.
303 * We increment s_cur to indicate s_last is in progress.
304 */
305 if (sqp->s_last == sqp->s_cur) {
306 if (++sqp->s_cur >= sqp->s_size)
307 sqp->s_cur = 0;
308 }
309 spin_unlock_irqrestore(&sqp->s_lock, flags);
310
311 if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
312 dev->n_pkt_drops++;
313 /*
314 * For RC, the requester would timeout and retry so
315 * shortcut the timeouts and just signal too many retries.
316 */
317 if (sqp->ibqp.qp_type == IB_QPT_RC)
318 send_status = IB_WC_RETRY_EXC_ERR;
319 else
320 send_status = IB_WC_SUCCESS;
321 goto serr;
322 }
323
324 memset(&wc, 0, sizeof wc);
325 send_status = IB_WC_SUCCESS;
326
327 sqp->s_sge.sge = wqe->sg_list[0];
328 sqp->s_sge.sg_list = wqe->sg_list + 1;
329 sqp->s_sge.num_sge = wqe->wr.num_sge;
330 sqp->s_len = wqe->length;
331 switch (wqe->wr.opcode) {
332 case IB_WR_SEND_WITH_IMM:
333 wc.wc_flags = IB_WC_WITH_IMM;
334 wc.ex.imm_data = wqe->wr.ex.imm_data;
335 /* FALLTHROUGH */
336 case IB_WR_SEND:
337 if (!ipath_get_rwqe(qp, 0))
338 goto rnr_nak;
339 break;
340
341 case IB_WR_RDMA_WRITE_WITH_IMM:
342 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
343 goto inv_err;
344 wc.wc_flags = IB_WC_WITH_IMM;
345 wc.ex.imm_data = wqe->wr.ex.imm_data;
346 if (!ipath_get_rwqe(qp, 1))
347 goto rnr_nak;
348 /* FALLTHROUGH */
349 case IB_WR_RDMA_WRITE:
350 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
351 goto inv_err;
352 if (wqe->length == 0)
353 break;
354 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length,
355 wqe->rdma_wr.remote_addr,
356 wqe->rdma_wr.rkey,
357 IB_ACCESS_REMOTE_WRITE)))
358 goto acc_err;
359 break;
360
361 case IB_WR_RDMA_READ:
362 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
363 goto inv_err;
364 if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length,
365 wqe->rdma_wr.remote_addr,
366 wqe->rdma_wr.rkey,
367 IB_ACCESS_REMOTE_READ)))
368 goto acc_err;
369 qp->r_sge.sge = wqe->sg_list[0];
370 qp->r_sge.sg_list = wqe->sg_list + 1;
371 qp->r_sge.num_sge = wqe->wr.num_sge;
372 break;
373
374 case IB_WR_ATOMIC_CMP_AND_SWP:
375 case IB_WR_ATOMIC_FETCH_AND_ADD:
376 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
377 goto inv_err;
378 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64),
379 wqe->atomic_wr.remote_addr,
380 wqe->atomic_wr.rkey,
381 IB_ACCESS_REMOTE_ATOMIC)))
382 goto acc_err;
383 /* Perform atomic OP and save result. */
384 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
385 sdata = wqe->atomic_wr.compare_add;
386 *(u64 *) sqp->s_sge.sge.vaddr =
387 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
388 (u64) atomic64_add_return(sdata, maddr) - sdata :
389 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
390 sdata, wqe->atomic_wr.swap);
391 goto send_comp;
392
393 default:
394 send_status = IB_WC_LOC_QP_OP_ERR;
395 goto serr;
396 }
397
398 sge = &sqp->s_sge.sge;
399 while (sqp->s_len) {
400 u32 len = sqp->s_len;
401
402 if (len > sge->length)
403 len = sge->length;
404 if (len > sge->sge_length)
405 len = sge->sge_length;
406 BUG_ON(len == 0);
407 ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
408 sge->vaddr += len;
409 sge->length -= len;
410 sge->sge_length -= len;
411 if (sge->sge_length == 0) {
412 if (--sqp->s_sge.num_sge)
413 *sge = *sqp->s_sge.sg_list++;
414 } else if (sge->length == 0 && sge->mr != NULL) {
415 if (++sge->n >= IPATH_SEGSZ) {
416 if (++sge->m >= sge->mr->mapsz)
417 break;
418 sge->n = 0;
419 }
420 sge->vaddr =
421 sge->mr->map[sge->m]->segs[sge->n].vaddr;
422 sge->length =
423 sge->mr->map[sge->m]->segs[sge->n].length;
424 }
425 sqp->s_len -= len;
426 }
427
428 if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
429 goto send_comp;
430
431 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
432 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
433 else
434 wc.opcode = IB_WC_RECV;
435 wc.wr_id = qp->r_wr_id;
436 wc.status = IB_WC_SUCCESS;
437 wc.byte_len = wqe->length;
438 wc.qp = &qp->ibqp;
439 wc.src_qp = qp->remote_qpn;
440 wc.slid = qp->remote_ah_attr.dlid;
441 wc.sl = qp->remote_ah_attr.sl;
442 wc.port_num = 1;
443 /* Signal completion event if the solicited bit is set. */
444 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
445 wqe->wr.send_flags & IB_SEND_SOLICITED);
446
447send_comp:
448 spin_lock_irqsave(&sqp->s_lock, flags);
449flush_send:
450 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
451 ipath_send_complete(sqp, wqe, send_status);
452 goto again;
453
454rnr_nak:
455 /* Handle RNR NAK */
456 if (qp->ibqp.qp_type == IB_QPT_UC)
457 goto send_comp;
458 /*
459 * Note: we don't need the s_lock held since the BUSY flag
460 * makes this single threaded.
461 */
462 if (sqp->s_rnr_retry == 0) {
463 send_status = IB_WC_RNR_RETRY_EXC_ERR;
464 goto serr;
465 }
466 if (sqp->s_rnr_retry_cnt < 7)
467 sqp->s_rnr_retry--;
468 spin_lock_irqsave(&sqp->s_lock, flags);
469 if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_RECV_OK))
470 goto clr_busy;
471 sqp->s_flags |= IPATH_S_WAITING;
472 dev->n_rnr_naks++;
473 sqp->s_rnr_timeout = ib_ipath_rnr_table[qp->r_min_rnr_timer];
474 ipath_insert_rnr_queue(sqp);
475 goto clr_busy;
476
477inv_err:
478 send_status = IB_WC_REM_INV_REQ_ERR;
479 wc.status = IB_WC_LOC_QP_OP_ERR;
480 goto err;
481
482acc_err:
483 send_status = IB_WC_REM_ACCESS_ERR;
484 wc.status = IB_WC_LOC_PROT_ERR;
485err:
486 /* responder goes to error state */
487 ipath_rc_error(qp, wc.status);
488
489serr:
490 spin_lock_irqsave(&sqp->s_lock, flags);
491 ipath_send_complete(sqp, wqe, send_status);
492 if (sqp->ibqp.qp_type == IB_QPT_RC) {
493 int lastwqe = ipath_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
494
495 sqp->s_flags &= ~IPATH_S_BUSY;
496 spin_unlock_irqrestore(&sqp->s_lock, flags);
497 if (lastwqe) {
498 struct ib_event ev;
499
500 ev.device = sqp->ibqp.device;
501 ev.element.qp = &sqp->ibqp;
502 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
503 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
504 }
505 goto done;
506 }
507clr_busy:
508 sqp->s_flags &= ~IPATH_S_BUSY;
509unlock:
510 spin_unlock_irqrestore(&sqp->s_lock, flags);
511done:
512 if (qp && atomic_dec_and_test(&qp->refcount))
513 wake_up(&qp->wait);
514}
515
516static void want_buffer(struct ipath_devdata *dd, struct ipath_qp *qp)
517{
518 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA) ||
519 qp->ibqp.qp_type == IB_QPT_SMI) {
520 unsigned long flags;
521
522 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
523 dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
524 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
525 dd->ipath_sendctrl);
526 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
527 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
528 }
529}
530
531/**
532 * ipath_no_bufs_available - tell the layer driver we need buffers
533 * @qp: the QP that caused the problem
534 * @dev: the device we ran out of buffers on
535 *
536 * Called when we run out of PIO buffers.
537 * If we are now in the error state, return zero to flush the
538 * send work request.
539 */
540static int ipath_no_bufs_available(struct ipath_qp *qp,
541 struct ipath_ibdev *dev)
542{
543 unsigned long flags;
544 int ret = 1;
545
546 /*
547 * Note that as soon as want_buffer() is called and
548 * possibly before it returns, ipath_ib_piobufavail()
549 * could be called. Therefore, put QP on the piowait list before
550 * enabling the PIO avail interrupt.
551 */
552 spin_lock_irqsave(&qp->s_lock, flags);
553 if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) {
554 dev->n_piowait++;
555 qp->s_flags |= IPATH_S_WAITING;
556 qp->s_flags &= ~IPATH_S_BUSY;
557 spin_lock(&dev->pending_lock);
558 if (list_empty(&qp->piowait))
559 list_add_tail(&qp->piowait, &dev->piowait);
560 spin_unlock(&dev->pending_lock);
561 } else
562 ret = 0;
563 spin_unlock_irqrestore(&qp->s_lock, flags);
564 if (ret)
565 want_buffer(dev->dd, qp);
566 return ret;
567}
568
569/**
570 * ipath_make_grh - construct a GRH header
571 * @dev: a pointer to the ipath device
572 * @hdr: a pointer to the GRH header being constructed
573 * @grh: the global route address to send to
574 * @hwords: the number of 32 bit words of header being sent
575 * @nwords: the number of 32 bit words of data being sent
576 *
577 * Return the size of the header in 32 bit words.
578 */
579u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
580 struct ib_global_route *grh, u32 hwords, u32 nwords)
581{
582 hdr->version_tclass_flow =
583 cpu_to_be32((6 << 28) |
584 (grh->traffic_class << 20) |
585 grh->flow_label);
586 hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
587 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
588 hdr->next_hdr = 0x1B;
589 hdr->hop_limit = grh->hop_limit;
590 /* The SGID is 32-bit aligned. */
591 hdr->sgid.global.subnet_prefix = dev->gid_prefix;
592 hdr->sgid.global.interface_id = dev->dd->ipath_guid;
593 hdr->dgid = grh->dgid;
594
595 /* GRH header size in 32-bit words. */
596 return sizeof(struct ib_grh) / sizeof(u32);
597}
598
599void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
600 struct ipath_other_headers *ohdr,
601 u32 bth0, u32 bth2)
602{
603 u16 lrh0;
604 u32 nwords;
605 u32 extra_bytes;
606
607 /* Construct the header. */
608 extra_bytes = -qp->s_cur_size & 3;
609 nwords = (qp->s_cur_size + extra_bytes) >> 2;
610 lrh0 = IPATH_LRH_BTH;
611 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
612 qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
613 &qp->remote_ah_attr.grh,
614 qp->s_hdrwords, nwords);
615 lrh0 = IPATH_LRH_GRH;
616 }
617 lrh0 |= qp->remote_ah_attr.sl << 4;
618 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
619 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
620 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
621 qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid |
622 qp->remote_ah_attr.src_path_bits);
623 bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
624 bth0 |= extra_bytes << 20;
625 ohdr->bth[0] = cpu_to_be32(bth0 | (1 << 22));
626 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
627 ohdr->bth[2] = cpu_to_be32(bth2);
628}
629
630/**
631 * ipath_do_send - perform a send on a QP
632 * @data: contains a pointer to the QP
633 *
634 * Process entries in the send work queue until credit or queue is
635 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
636 * Otherwise, two threads could send packets out of order.
637 */
638void ipath_do_send(unsigned long data)
639{
640 struct ipath_qp *qp = (struct ipath_qp *)data;
641 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
642 int (*make_req)(struct ipath_qp *qp);
643 unsigned long flags;
644
645 if ((qp->ibqp.qp_type == IB_QPT_RC ||
646 qp->ibqp.qp_type == IB_QPT_UC) &&
647 qp->remote_ah_attr.dlid == dev->dd->ipath_lid) {
648 ipath_ruc_loopback(qp);
649 goto bail;
650 }
651
652 if (qp->ibqp.qp_type == IB_QPT_RC)
653 make_req = ipath_make_rc_req;
654 else if (qp->ibqp.qp_type == IB_QPT_UC)
655 make_req = ipath_make_uc_req;
656 else
657 make_req = ipath_make_ud_req;
658
659 spin_lock_irqsave(&qp->s_lock, flags);
660
661 /* Return if we are already busy processing a work request. */
662 if ((qp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) ||
663 !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) {
664 spin_unlock_irqrestore(&qp->s_lock, flags);
665 goto bail;
666 }
667
668 qp->s_flags |= IPATH_S_BUSY;
669
670 spin_unlock_irqrestore(&qp->s_lock, flags);
671
672again:
673 /* Check for a constructed packet to be sent. */
674 if (qp->s_hdrwords != 0) {
675 /*
676 * If no PIO bufs are available, return. An interrupt will
677 * call ipath_ib_piobufavail() when one is available.
678 */
679 if (ipath_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
680 qp->s_cur_sge, qp->s_cur_size)) {
681 if (ipath_no_bufs_available(qp, dev))
682 goto bail;
683 }
684 dev->n_unicast_xmit++;
685 /* Record that we sent the packet and s_hdr is empty. */
686 qp->s_hdrwords = 0;
687 }
688
689 if (make_req(qp))
690 goto again;
691
692bail:;
693}
694
695/*
696 * This should be called with s_lock held.
697 */
698void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
699 enum ib_wc_status status)
700{
701 u32 old_last, last;
702
703 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND))
704 return;
705
706 /* See ch. 11.2.4.1 and 10.7.3.1 */
707 if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
708 (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
709 status != IB_WC_SUCCESS) {
710 struct ib_wc wc;
711
712 memset(&wc, 0, sizeof wc);
713 wc.wr_id = wqe->wr.wr_id;
714 wc.status = status;
715 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
716 wc.qp = &qp->ibqp;
717 if (status == IB_WC_SUCCESS)
718 wc.byte_len = wqe->length;
719 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
720 status != IB_WC_SUCCESS);
721 }
722
723 old_last = last = qp->s_last;
724 if (++last >= qp->s_size)
725 last = 0;
726 qp->s_last = last;
727 if (qp->s_cur == old_last)
728 qp->s_cur = last;
729 if (qp->s_tail == old_last)
730 qp->s_tail = last;
731 if (qp->state == IB_QPS_SQD && last == qp->s_cur)
732 qp->s_draining = 0;
733}
diff --git a/drivers/staging/rdma/ipath/ipath_sdma.c b/drivers/staging/rdma/ipath/ipath_sdma.c
deleted file mode 100644
index 1ffc06abf9da..000000000000
--- a/drivers/staging/rdma/ipath/ipath_sdma.c
+++ /dev/null
@@ -1,818 +0,0 @@
1/*
2 * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/spinlock.h>
34#include <linux/gfp.h>
35
36#include "ipath_kernel.h"
37#include "ipath_verbs.h"
38#include "ipath_common.h"
39
40#define SDMA_DESCQ_SZ PAGE_SIZE /* 256 entries per 4KB page */
41
42static void vl15_watchdog_enq(struct ipath_devdata *dd)
43{
44 /* ipath_sdma_lock must already be held */
45 if (atomic_inc_return(&dd->ipath_sdma_vl15_count) == 1) {
46 unsigned long interval = (HZ + 19) / 20;
47 dd->ipath_sdma_vl15_timer.expires = jiffies + interval;
48 add_timer(&dd->ipath_sdma_vl15_timer);
49 }
50}
51
52static void vl15_watchdog_deq(struct ipath_devdata *dd)
53{
54 /* ipath_sdma_lock must already be held */
55 if (atomic_dec_return(&dd->ipath_sdma_vl15_count) != 0) {
56 unsigned long interval = (HZ + 19) / 20;
57 mod_timer(&dd->ipath_sdma_vl15_timer, jiffies + interval);
58 } else {
59 del_timer(&dd->ipath_sdma_vl15_timer);
60 }
61}
62
63static void vl15_watchdog_timeout(unsigned long opaque)
64{
65 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
66
67 if (atomic_read(&dd->ipath_sdma_vl15_count) != 0) {
68 ipath_dbg("vl15 watchdog timeout - clearing\n");
69 ipath_cancel_sends(dd, 1);
70 ipath_hol_down(dd);
71 } else {
72 ipath_dbg("vl15 watchdog timeout - "
73 "condition already cleared\n");
74 }
75}
76
77static void unmap_desc(struct ipath_devdata *dd, unsigned head)
78{
79 __le64 *descqp = &dd->ipath_sdma_descq[head].qw[0];
80 u64 desc[2];
81 dma_addr_t addr;
82 size_t len;
83
84 desc[0] = le64_to_cpu(descqp[0]);
85 desc[1] = le64_to_cpu(descqp[1]);
86
87 addr = (desc[1] << 32) | (desc[0] >> 32);
88 len = (desc[0] >> 14) & (0x7ffULL << 2);
89 dma_unmap_single(&dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
90}
91
92/*
93 * ipath_sdma_lock should be locked before calling this.
94 */
95int ipath_sdma_make_progress(struct ipath_devdata *dd)
96{
97 struct list_head *lp = NULL;
98 struct ipath_sdma_txreq *txp = NULL;
99 u16 dmahead;
100 u16 start_idx = 0;
101 int progress = 0;
102
103 if (!list_empty(&dd->ipath_sdma_activelist)) {
104 lp = dd->ipath_sdma_activelist.next;
105 txp = list_entry(lp, struct ipath_sdma_txreq, list);
106 start_idx = txp->start_idx;
107 }
108
109 /*
110 * Read the SDMA head register in order to know that the
111 * interrupt clear has been written to the chip.
112 * Otherwise, we may not get an interrupt for the last
113 * descriptor in the queue.
114 */
115 dmahead = (u16)ipath_read_kreg32(dd, dd->ipath_kregs->kr_senddmahead);
116 /* sanity check return value for error handling (chip reset, etc.) */
117 if (dmahead >= dd->ipath_sdma_descq_cnt)
118 goto done;
119
120 while (dd->ipath_sdma_descq_head != dmahead) {
121 if (txp && txp->flags & IPATH_SDMA_TXREQ_F_FREEDESC &&
122 dd->ipath_sdma_descq_head == start_idx) {
123 unmap_desc(dd, dd->ipath_sdma_descq_head);
124 start_idx++;
125 if (start_idx == dd->ipath_sdma_descq_cnt)
126 start_idx = 0;
127 }
128
129 /* increment free count and head */
130 dd->ipath_sdma_descq_removed++;
131 if (++dd->ipath_sdma_descq_head == dd->ipath_sdma_descq_cnt)
132 dd->ipath_sdma_descq_head = 0;
133
134 if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) {
135 /* move to notify list */
136 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
137 vl15_watchdog_deq(dd);
138 list_move_tail(lp, &dd->ipath_sdma_notifylist);
139 if (!list_empty(&dd->ipath_sdma_activelist)) {
140 lp = dd->ipath_sdma_activelist.next;
141 txp = list_entry(lp, struct ipath_sdma_txreq,
142 list);
143 start_idx = txp->start_idx;
144 } else {
145 lp = NULL;
146 txp = NULL;
147 }
148 }
149 progress = 1;
150 }
151
152 if (progress)
153 tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
154
155done:
156 return progress;
157}
158
159static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list)
160{
161 struct ipath_sdma_txreq *txp, *txp_next;
162
163 list_for_each_entry_safe(txp, txp_next, list, list) {
164 list_del_init(&txp->list);
165
166 if (txp->callback)
167 (*txp->callback)(txp->callback_cookie,
168 txp->callback_status);
169 }
170}
171
172static void sdma_notify_taskbody(struct ipath_devdata *dd)
173{
174 unsigned long flags;
175 struct list_head list;
176
177 INIT_LIST_HEAD(&list);
178
179 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
180
181 list_splice_init(&dd->ipath_sdma_notifylist, &list);
182
183 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
184
185 ipath_sdma_notify(dd, &list);
186
187 /*
188 * The IB verbs layer needs to see the callback before getting
189 * the call to ipath_ib_piobufavail() because the callback
190 * handles releasing resources the next send will need.
191 * Otherwise, we could do these calls in
192 * ipath_sdma_make_progress().
193 */
194 ipath_ib_piobufavail(dd->verbs_dev);
195}
196
197static void sdma_notify_task(unsigned long opaque)
198{
199 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
200
201 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
202 sdma_notify_taskbody(dd);
203}
204
205static void dump_sdma_state(struct ipath_devdata *dd)
206{
207 unsigned long reg;
208
209 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus);
210 ipath_cdbg(VERBOSE, "kr_senddmastatus: 0x%016lx\n", reg);
211
212 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendctrl);
213 ipath_cdbg(VERBOSE, "kr_sendctrl: 0x%016lx\n", reg);
214
215 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask0);
216 ipath_cdbg(VERBOSE, "kr_senddmabufmask0: 0x%016lx\n", reg);
217
218 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask1);
219 ipath_cdbg(VERBOSE, "kr_senddmabufmask1: 0x%016lx\n", reg);
220
221 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask2);
222 ipath_cdbg(VERBOSE, "kr_senddmabufmask2: 0x%016lx\n", reg);
223
224 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
225 ipath_cdbg(VERBOSE, "kr_senddmatail: 0x%016lx\n", reg);
226
227 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
228 ipath_cdbg(VERBOSE, "kr_senddmahead: 0x%016lx\n", reg);
229}
230
231static void sdma_abort_task(unsigned long opaque)
232{
233 struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
234 u64 status;
235 unsigned long flags;
236
237 if (test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
238 return;
239
240 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
241
242 status = dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK;
243
244 /* nothing to do */
245 if (status == IPATH_SDMA_ABORT_NONE)
246 goto unlock;
247
248 /* ipath_sdma_abort() is done, waiting for interrupt */
249 if (status == IPATH_SDMA_ABORT_DISARMED) {
250 if (time_before(jiffies, dd->ipath_sdma_abort_intr_timeout))
251 goto resched_noprint;
252 /* give up, intr got lost somewhere */
253 ipath_dbg("give up waiting for SDMADISABLED intr\n");
254 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
255 status = IPATH_SDMA_ABORT_ABORTED;
256 }
257
258 /* everything is stopped, time to clean up and restart */
259 if (status == IPATH_SDMA_ABORT_ABORTED) {
260 struct ipath_sdma_txreq *txp, *txpnext;
261 u64 hwstatus;
262 int notify = 0;
263
264 hwstatus = ipath_read_kreg64(dd,
265 dd->ipath_kregs->kr_senddmastatus);
266
267 if ((hwstatus & (IPATH_SDMA_STATUS_SCORE_BOARD_DRAIN_IN_PROG |
268 IPATH_SDMA_STATUS_ABORT_IN_PROG |
269 IPATH_SDMA_STATUS_INTERNAL_SDMA_ENABLE)) ||
270 !(hwstatus & IPATH_SDMA_STATUS_SCB_EMPTY)) {
271 if (dd->ipath_sdma_reset_wait > 0) {
272 /* not done shutting down sdma */
273 --dd->ipath_sdma_reset_wait;
274 goto resched;
275 }
276 ipath_cdbg(VERBOSE, "gave up waiting for quiescent "
277 "status after SDMA reset, continuing\n");
278 dump_sdma_state(dd);
279 }
280
281 /* dequeue all "sent" requests */
282 list_for_each_entry_safe(txp, txpnext,
283 &dd->ipath_sdma_activelist, list) {
284 txp->callback_status = IPATH_SDMA_TXREQ_S_ABORTED;
285 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
286 vl15_watchdog_deq(dd);
287 list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
288 notify = 1;
289 }
290 if (notify)
291 tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
292
293 /* reset our notion of head and tail */
294 dd->ipath_sdma_descq_tail = 0;
295 dd->ipath_sdma_descq_head = 0;
296 dd->ipath_sdma_head_dma[0] = 0;
297 dd->ipath_sdma_generation = 0;
298 dd->ipath_sdma_descq_removed = dd->ipath_sdma_descq_added;
299
300 /* Reset SendDmaLenGen */
301 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen,
302 (u64) dd->ipath_sdma_descq_cnt | (1ULL << 18));
303
304 /* done with sdma state for a bit */
305 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
306
307 /*
308 * Don't restart sdma here (with the exception
309 * below). Wait until link is up to ACTIVE. VL15 MADs
310 * used to bring the link up use PIO, and multiple link
311 * transitions otherwise cause the sdma engine to be
312 * stopped and started multiple times.
313 * The disable is done here, including the shadow,
314 * so the state is kept consistent.
315 * See ipath_restart_sdma() for the actual starting
316 * of sdma.
317 */
318 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
319 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
320 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
321 dd->ipath_sendctrl);
322 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
323 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
324
325 /* make sure I see next message */
326 dd->ipath_sdma_abort_jiffies = 0;
327
328 /*
329 * Not everything that takes SDMA offline is a link
330 * status change. If the link was up, restart SDMA.
331 */
332 if (dd->ipath_flags & IPATH_LINKACTIVE)
333 ipath_restart_sdma(dd);
334
335 goto done;
336 }
337
338resched:
339 /*
340 * for now, keep spinning
341 * JAG - this is bad to just have default be a loop without
342 * state change
343 */
344 if (time_after(jiffies, dd->ipath_sdma_abort_jiffies)) {
345 ipath_dbg("looping with status 0x%08lx\n",
346 dd->ipath_sdma_status);
347 dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ;
348 }
349resched_noprint:
350 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
351 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
352 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
353 return;
354
355unlock:
356 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
357done:
358 return;
359}
360
361/*
362 * This is called from interrupt context.
363 */
364void ipath_sdma_intr(struct ipath_devdata *dd)
365{
366 unsigned long flags;
367
368 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
369
370 (void) ipath_sdma_make_progress(dd);
371
372 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
373}
374
375static int alloc_sdma(struct ipath_devdata *dd)
376{
377 int ret = 0;
378
379 /* Allocate memory for SendDMA descriptor FIFO */
380 dd->ipath_sdma_descq = dma_alloc_coherent(&dd->pcidev->dev,
381 SDMA_DESCQ_SZ, &dd->ipath_sdma_descq_phys, GFP_KERNEL);
382
383 if (!dd->ipath_sdma_descq) {
384 ipath_dev_err(dd, "failed to allocate SendDMA descriptor "
385 "FIFO memory\n");
386 ret = -ENOMEM;
387 goto done;
388 }
389
390 dd->ipath_sdma_descq_cnt =
391 SDMA_DESCQ_SZ / sizeof(struct ipath_sdma_desc);
392
393 /* Allocate memory for DMA of head register to memory */
394 dd->ipath_sdma_head_dma = dma_alloc_coherent(&dd->pcidev->dev,
395 PAGE_SIZE, &dd->ipath_sdma_head_phys, GFP_KERNEL);
396 if (!dd->ipath_sdma_head_dma) {
397 ipath_dev_err(dd, "failed to allocate SendDMA head memory\n");
398 ret = -ENOMEM;
399 goto cleanup_descq;
400 }
401 dd->ipath_sdma_head_dma[0] = 0;
402
403 setup_timer(&dd->ipath_sdma_vl15_timer, vl15_watchdog_timeout,
404 (unsigned long)dd);
405
406 atomic_set(&dd->ipath_sdma_vl15_count, 0);
407
408 goto done;
409
410cleanup_descq:
411 dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
412 (void *)dd->ipath_sdma_descq, dd->ipath_sdma_descq_phys);
413 dd->ipath_sdma_descq = NULL;
414 dd->ipath_sdma_descq_phys = 0;
415done:
416 return ret;
417}
418
419int setup_sdma(struct ipath_devdata *dd)
420{
421 int ret = 0;
422 unsigned i, n;
423 u64 tmp64;
424 u64 senddmabufmask[3] = { 0 };
425 unsigned long flags;
426
427 ret = alloc_sdma(dd);
428 if (ret)
429 goto done;
430
431 if (!dd->ipath_sdma_descq) {
432 ipath_dev_err(dd, "SendDMA memory not allocated\n");
433 goto done;
434 }
435
436 /*
437 * Set initial status as if we had been up, then gone down.
438 * This lets initial start on transition to ACTIVE be the
439 * same as restart after link flap.
440 */
441 dd->ipath_sdma_status = IPATH_SDMA_ABORT_ABORTED;
442 dd->ipath_sdma_abort_jiffies = 0;
443 dd->ipath_sdma_generation = 0;
444 dd->ipath_sdma_descq_tail = 0;
445 dd->ipath_sdma_descq_head = 0;
446 dd->ipath_sdma_descq_removed = 0;
447 dd->ipath_sdma_descq_added = 0;
448
449 /* Set SendDmaBase */
450 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase,
451 dd->ipath_sdma_descq_phys);
452 /* Set SendDmaLenGen */
453 tmp64 = dd->ipath_sdma_descq_cnt;
454 tmp64 |= 1<<18; /* enable generation checking */
455 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, tmp64);
456 /* Set SendDmaTail */
457 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail,
458 dd->ipath_sdma_descq_tail);
459 /* Set SendDmaHeadAddr */
460 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr,
461 dd->ipath_sdma_head_phys);
462
463 /*
464 * Reserve all the former "kernel" piobufs, using high number range
465 * so we get as many 4K buffers as possible
466 */
467 n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
468 i = dd->ipath_lastport_piobuf + dd->ipath_pioreserved;
469 ipath_chg_pioavailkernel(dd, i, n - i , 0);
470 for (; i < n; ++i) {
471 unsigned word = i / 64;
472 unsigned bit = i & 63;
473 BUG_ON(word >= 3);
474 senddmabufmask[word] |= 1ULL << bit;
475 }
476 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0,
477 senddmabufmask[0]);
478 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1,
479 senddmabufmask[1]);
480 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2,
481 senddmabufmask[2]);
482
483 INIT_LIST_HEAD(&dd->ipath_sdma_activelist);
484 INIT_LIST_HEAD(&dd->ipath_sdma_notifylist);
485
486 tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task,
487 (unsigned long) dd);
488 tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task,
489 (unsigned long) dd);
490
491 /*
492 * No use to turn on SDMA here, as link is probably not ACTIVE
493 * Just mark it RUNNING and enable the interrupt, and let the
494 * ipath_restart_sdma() on link transition to ACTIVE actually
495 * enable it.
496 */
497 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
498 dd->ipath_sendctrl |= INFINIPATH_S_SDMAINTENABLE;
499 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
500 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
501 __set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
502 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
503
504done:
505 return ret;
506}
507
508void teardown_sdma(struct ipath_devdata *dd)
509{
510 struct ipath_sdma_txreq *txp, *txpnext;
511 unsigned long flags;
512 dma_addr_t sdma_head_phys = 0;
513 dma_addr_t sdma_descq_phys = 0;
514 void *sdma_descq = NULL;
515 void *sdma_head_dma = NULL;
516
517 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
518 __clear_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
519 __set_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
520 __set_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status);
521 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
522
523 tasklet_kill(&dd->ipath_sdma_abort_task);
524 tasklet_kill(&dd->ipath_sdma_notify_task);
525
526 /* turn off sdma */
527 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
528 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
529 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
530 dd->ipath_sendctrl);
531 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
532 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
533
534 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
535 /* dequeue all "sent" requests */
536 list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist,
537 list) {
538 txp->callback_status = IPATH_SDMA_TXREQ_S_SHUTDOWN;
539 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
540 vl15_watchdog_deq(dd);
541 list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
542 }
543 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
544
545 sdma_notify_taskbody(dd);
546
547 del_timer_sync(&dd->ipath_sdma_vl15_timer);
548
549 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
550
551 dd->ipath_sdma_abort_jiffies = 0;
552
553 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 0);
554 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 0);
555 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 0);
556 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 0);
557 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 0);
558 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 0);
559 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 0);
560
561 if (dd->ipath_sdma_head_dma) {
562 sdma_head_dma = (void *) dd->ipath_sdma_head_dma;
563 sdma_head_phys = dd->ipath_sdma_head_phys;
564 dd->ipath_sdma_head_dma = NULL;
565 dd->ipath_sdma_head_phys = 0;
566 }
567
568 if (dd->ipath_sdma_descq) {
569 sdma_descq = dd->ipath_sdma_descq;
570 sdma_descq_phys = dd->ipath_sdma_descq_phys;
571 dd->ipath_sdma_descq = NULL;
572 dd->ipath_sdma_descq_phys = 0;
573 }
574
575 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
576
577 if (sdma_head_dma)
578 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
579 sdma_head_dma, sdma_head_phys);
580
581 if (sdma_descq)
582 dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
583 sdma_descq, sdma_descq_phys);
584}
585
586/*
587 * [Re]start SDMA, if we use it, and it's not already OK.
588 * This is called on transition to link ACTIVE, either the first or
589 * subsequent times.
590 */
591void ipath_restart_sdma(struct ipath_devdata *dd)
592{
593 unsigned long flags;
594 int needed = 1;
595
596 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
597 goto bail;
598
599 /*
600 * First, make sure we should, which is to say,
601 * check that we are "RUNNING" (not in teardown)
602 * and not "SHUTDOWN"
603 */
604 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
605 if (!test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)
606 || test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
607 needed = 0;
608 else {
609 __clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
610 __clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
611 __clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
612 }
613 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
614 if (!needed) {
615 ipath_dbg("invalid attempt to restart SDMA, status 0x%08lx\n",
616 dd->ipath_sdma_status);
617 goto bail;
618 }
619 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
620 /*
621 * First clear, just to be safe. Enable is only done
622 * in chip on 0->1 transition
623 */
624 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
625 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
626 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
627 dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE;
628 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
629 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
630 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
631
632 /* notify upper layers */
633 ipath_ib_piobufavail(dd->verbs_dev);
634
635bail:
636 return;
637}
638
639static inline void make_sdma_desc(struct ipath_devdata *dd,
640 u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset)
641{
642 WARN_ON(addr & 3);
643 /* SDmaPhyAddr[47:32] */
644 sdmadesc[1] = addr >> 32;
645 /* SDmaPhyAddr[31:0] */
646 sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
647 /* SDmaGeneration[1:0] */
648 sdmadesc[0] |= (dd->ipath_sdma_generation & 3ULL) << 30;
649 /* SDmaDwordCount[10:0] */
650 sdmadesc[0] |= (dwlen & 0x7ffULL) << 16;
651 /* SDmaBufOffset[12:2] */
652 sdmadesc[0] |= dwoffset & 0x7ffULL;
653}
654
655/*
656 * This function queues one IB packet onto the send DMA queue per call.
657 * The caller is responsible for checking:
658 * 1) The number of send DMA descriptor entries is less than the size of
659 * the descriptor queue.
660 * 2) The IB SGE addresses and lengths are 32-bit aligned
661 * (except possibly the last SGE's length)
662 * 3) The SGE addresses are suitable for passing to dma_map_single().
663 */
664int ipath_sdma_verbs_send(struct ipath_devdata *dd,
665 struct ipath_sge_state *ss, u32 dwords,
666 struct ipath_verbs_txreq *tx)
667{
668
669 unsigned long flags;
670 struct ipath_sge *sge;
671 int ret = 0;
672 u16 tail;
673 __le64 *descqp;
674 u64 sdmadesc[2];
675 u32 dwoffset;
676 dma_addr_t addr;
677
678 if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) {
679 ipath_dbg("packet size %X > ibmax %X, fail\n",
680 tx->map_len + (dwords<<2), dd->ipath_ibmaxlen);
681 ret = -EMSGSIZE;
682 goto fail;
683 }
684
685 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
686
687retry:
688 if (unlikely(test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status))) {
689 ret = -EBUSY;
690 goto unlock;
691 }
692
693 if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) {
694 if (ipath_sdma_make_progress(dd))
695 goto retry;
696 ret = -ENOBUFS;
697 goto unlock;
698 }
699
700 addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
701 tx->map_len, DMA_TO_DEVICE);
702 if (dma_mapping_error(&dd->pcidev->dev, addr))
703 goto ioerr;
704
705 dwoffset = tx->map_len >> 2;
706 make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0);
707
708 /* SDmaFirstDesc */
709 sdmadesc[0] |= 1ULL << 12;
710 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
711 sdmadesc[0] |= 1ULL << 14; /* SDmaUseLargeBuf */
712
713 /* write to the descq */
714 tail = dd->ipath_sdma_descq_tail;
715 descqp = &dd->ipath_sdma_descq[tail].qw[0];
716 *descqp++ = cpu_to_le64(sdmadesc[0]);
717 *descqp++ = cpu_to_le64(sdmadesc[1]);
718
719 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEDESC)
720 tx->txreq.start_idx = tail;
721
722 /* increment the tail */
723 if (++tail == dd->ipath_sdma_descq_cnt) {
724 tail = 0;
725 descqp = &dd->ipath_sdma_descq[0].qw[0];
726 ++dd->ipath_sdma_generation;
727 }
728
729 sge = &ss->sge;
730 while (dwords) {
731 u32 dw;
732 u32 len;
733
734 len = dwords << 2;
735 if (len > sge->length)
736 len = sge->length;
737 if (len > sge->sge_length)
738 len = sge->sge_length;
739 BUG_ON(len == 0);
740 dw = (len + 3) >> 2;
741 addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
742 DMA_TO_DEVICE);
743 if (dma_mapping_error(&dd->pcidev->dev, addr))
744 goto unmap;
745 make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
746 /* SDmaUseLargeBuf has to be set in every descriptor */
747 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
748 sdmadesc[0] |= 1ULL << 14;
749 /* write to the descq */
750 *descqp++ = cpu_to_le64(sdmadesc[0]);
751 *descqp++ = cpu_to_le64(sdmadesc[1]);
752
753 /* increment the tail */
754 if (++tail == dd->ipath_sdma_descq_cnt) {
755 tail = 0;
756 descqp = &dd->ipath_sdma_descq[0].qw[0];
757 ++dd->ipath_sdma_generation;
758 }
759 sge->vaddr += len;
760 sge->length -= len;
761 sge->sge_length -= len;
762 if (sge->sge_length == 0) {
763 if (--ss->num_sge)
764 *sge = *ss->sg_list++;
765 } else if (sge->length == 0 && sge->mr != NULL) {
766 if (++sge->n >= IPATH_SEGSZ) {
767 if (++sge->m >= sge->mr->mapsz)
768 break;
769 sge->n = 0;
770 }
771 sge->vaddr =
772 sge->mr->map[sge->m]->segs[sge->n].vaddr;
773 sge->length =
774 sge->mr->map[sge->m]->segs[sge->n].length;
775 }
776
777 dwoffset += dw;
778 dwords -= dw;
779 }
780
781 if (!tail)
782 descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
783 descqp -= 2;
784 /* SDmaLastDesc */
785 descqp[0] |= cpu_to_le64(1ULL << 11);
786 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
787 /* SDmaIntReq */
788 descqp[0] |= cpu_to_le64(1ULL << 15);
789 }
790
791 /* Commit writes to memory and advance the tail on the chip */
792 wmb();
793 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
794
795 tx->txreq.next_descq_idx = tail;
796 tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK;
797 dd->ipath_sdma_descq_tail = tail;
798 dd->ipath_sdma_descq_added += tx->txreq.sg_count;
799 list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist);
800 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15)
801 vl15_watchdog_enq(dd);
802 goto unlock;
803
804unmap:
805 while (tail != dd->ipath_sdma_descq_tail) {
806 if (!tail)
807 tail = dd->ipath_sdma_descq_cnt - 1;
808 else
809 tail--;
810 unmap_desc(dd, tail);
811 }
812ioerr:
813 ret = -EIO;
814unlock:
815 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
816fail:
817 return ret;
818}
diff --git a/drivers/staging/rdma/ipath/ipath_srq.c b/drivers/staging/rdma/ipath/ipath_srq.c
deleted file mode 100644
index 26271984b717..000000000000
--- a/drivers/staging/rdma/ipath/ipath_srq.c
+++ /dev/null
@@ -1,380 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/err.h>
35#include <linux/slab.h>
36#include <linux/vmalloc.h>
37
38#include "ipath_verbs.h"
39
40/**
41 * ipath_post_srq_receive - post a receive on a shared receive queue
42 * @ibsrq: the SRQ to post the receive on
43 * @wr: the list of work requests to post
44 * @bad_wr: the first WR to cause a problem is put here
45 *
46 * This may be called from interrupt context.
47 */
48int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
49 struct ib_recv_wr **bad_wr)
50{
51 struct ipath_srq *srq = to_isrq(ibsrq);
52 struct ipath_rwq *wq;
53 unsigned long flags;
54 int ret;
55
56 for (; wr; wr = wr->next) {
57 struct ipath_rwqe *wqe;
58 u32 next;
59 int i;
60
61 if ((unsigned) wr->num_sge > srq->rq.max_sge) {
62 *bad_wr = wr;
63 ret = -EINVAL;
64 goto bail;
65 }
66
67 spin_lock_irqsave(&srq->rq.lock, flags);
68 wq = srq->rq.wq;
69 next = wq->head + 1;
70 if (next >= srq->rq.size)
71 next = 0;
72 if (next == wq->tail) {
73 spin_unlock_irqrestore(&srq->rq.lock, flags);
74 *bad_wr = wr;
75 ret = -ENOMEM;
76 goto bail;
77 }
78
79 wqe = get_rwqe_ptr(&srq->rq, wq->head);
80 wqe->wr_id = wr->wr_id;
81 wqe->num_sge = wr->num_sge;
82 for (i = 0; i < wr->num_sge; i++)
83 wqe->sg_list[i] = wr->sg_list[i];
84 /* Make sure queue entry is written before the head index. */
85 smp_wmb();
86 wq->head = next;
87 spin_unlock_irqrestore(&srq->rq.lock, flags);
88 }
89 ret = 0;
90
91bail:
92 return ret;
93}
94
95/**
96 * ipath_create_srq - create a shared receive queue
97 * @ibpd: the protection domain of the SRQ to create
98 * @srq_init_attr: the attributes of the SRQ
99 * @udata: data from libipathverbs when creating a user SRQ
100 */
101struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
102 struct ib_srq_init_attr *srq_init_attr,
103 struct ib_udata *udata)
104{
105 struct ipath_ibdev *dev = to_idev(ibpd->device);
106 struct ipath_srq *srq;
107 u32 sz;
108 struct ib_srq *ret;
109
110 if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
111 ret = ERR_PTR(-ENOSYS);
112 goto done;
113 }
114
115 if (srq_init_attr->attr.max_wr == 0) {
116 ret = ERR_PTR(-EINVAL);
117 goto done;
118 }
119
120 if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) ||
121 (srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) {
122 ret = ERR_PTR(-EINVAL);
123 goto done;
124 }
125
126 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
127 if (!srq) {
128 ret = ERR_PTR(-ENOMEM);
129 goto done;
130 }
131
132 /*
133 * Need to use vmalloc() if we want to support large #s of entries.
134 */
135 srq->rq.size = srq_init_attr->attr.max_wr + 1;
136 srq->rq.max_sge = srq_init_attr->attr.max_sge;
137 sz = sizeof(struct ib_sge) * srq->rq.max_sge +
138 sizeof(struct ipath_rwqe);
139 srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz);
140 if (!srq->rq.wq) {
141 ret = ERR_PTR(-ENOMEM);
142 goto bail_srq;
143 }
144
145 /*
146 * Return the address of the RWQ as the offset to mmap.
147 * See ipath_mmap() for details.
148 */
149 if (udata && udata->outlen >= sizeof(__u64)) {
150 int err;
151 u32 s = sizeof(struct ipath_rwq) + srq->rq.size * sz;
152
153 srq->ip =
154 ipath_create_mmap_info(dev, s,
155 ibpd->uobject->context,
156 srq->rq.wq);
157 if (!srq->ip) {
158 ret = ERR_PTR(-ENOMEM);
159 goto bail_wq;
160 }
161
162 err = ib_copy_to_udata(udata, &srq->ip->offset,
163 sizeof(srq->ip->offset));
164 if (err) {
165 ret = ERR_PTR(err);
166 goto bail_ip;
167 }
168 } else
169 srq->ip = NULL;
170
171 /*
172 * ib_create_srq() will initialize srq->ibsrq.
173 */
174 spin_lock_init(&srq->rq.lock);
175 srq->rq.wq->head = 0;
176 srq->rq.wq->tail = 0;
177 srq->limit = srq_init_attr->attr.srq_limit;
178
179 spin_lock(&dev->n_srqs_lock);
180 if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
181 spin_unlock(&dev->n_srqs_lock);
182 ret = ERR_PTR(-ENOMEM);
183 goto bail_ip;
184 }
185
186 dev->n_srqs_allocated++;
187 spin_unlock(&dev->n_srqs_lock);
188
189 if (srq->ip) {
190 spin_lock_irq(&dev->pending_lock);
191 list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
192 spin_unlock_irq(&dev->pending_lock);
193 }
194
195 ret = &srq->ibsrq;
196 goto done;
197
198bail_ip:
199 kfree(srq->ip);
200bail_wq:
201 vfree(srq->rq.wq);
202bail_srq:
203 kfree(srq);
204done:
205 return ret;
206}
207
208/**
209 * ipath_modify_srq - modify a shared receive queue
210 * @ibsrq: the SRQ to modify
211 * @attr: the new attributes of the SRQ
212 * @attr_mask: indicates which attributes to modify
213 * @udata: user data for ipathverbs.so
214 */
215int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
216 enum ib_srq_attr_mask attr_mask,
217 struct ib_udata *udata)
218{
219 struct ipath_srq *srq = to_isrq(ibsrq);
220 struct ipath_rwq *wq;
221 int ret = 0;
222
223 if (attr_mask & IB_SRQ_MAX_WR) {
224 struct ipath_rwq *owq;
225 struct ipath_rwqe *p;
226 u32 sz, size, n, head, tail;
227
228 /* Check that the requested sizes are below the limits. */
229 if ((attr->max_wr > ib_ipath_max_srq_wrs) ||
230 ((attr_mask & IB_SRQ_LIMIT) ?
231 attr->srq_limit : srq->limit) > attr->max_wr) {
232 ret = -EINVAL;
233 goto bail;
234 }
235
236 sz = sizeof(struct ipath_rwqe) +
237 srq->rq.max_sge * sizeof(struct ib_sge);
238 size = attr->max_wr + 1;
239 wq = vmalloc_user(sizeof(struct ipath_rwq) + size * sz);
240 if (!wq) {
241 ret = -ENOMEM;
242 goto bail;
243 }
244
245 /* Check that we can write the offset to mmap. */
246 if (udata && udata->inlen >= sizeof(__u64)) {
247 __u64 offset_addr;
248 __u64 offset = 0;
249
250 ret = ib_copy_from_udata(&offset_addr, udata,
251 sizeof(offset_addr));
252 if (ret)
253 goto bail_free;
254 udata->outbuf =
255 (void __user *) (unsigned long) offset_addr;
256 ret = ib_copy_to_udata(udata, &offset,
257 sizeof(offset));
258 if (ret)
259 goto bail_free;
260 }
261
262 spin_lock_irq(&srq->rq.lock);
263 /*
264 * validate head pointer value and compute
265 * the number of remaining WQEs.
266 */
267 owq = srq->rq.wq;
268 head = owq->head;
269 if (head >= srq->rq.size)
270 head = 0;
271 tail = owq->tail;
272 if (tail >= srq->rq.size)
273 tail = 0;
274 n = head;
275 if (n < tail)
276 n += srq->rq.size - tail;
277 else
278 n -= tail;
279 if (size <= n) {
280 ret = -EINVAL;
281 goto bail_unlock;
282 }
283 n = 0;
284 p = wq->wq;
285 while (tail != head) {
286 struct ipath_rwqe *wqe;
287 int i;
288
289 wqe = get_rwqe_ptr(&srq->rq, tail);
290 p->wr_id = wqe->wr_id;
291 p->num_sge = wqe->num_sge;
292 for (i = 0; i < wqe->num_sge; i++)
293 p->sg_list[i] = wqe->sg_list[i];
294 n++;
295 p = (struct ipath_rwqe *)((char *) p + sz);
296 if (++tail >= srq->rq.size)
297 tail = 0;
298 }
299 srq->rq.wq = wq;
300 srq->rq.size = size;
301 wq->head = n;
302 wq->tail = 0;
303 if (attr_mask & IB_SRQ_LIMIT)
304 srq->limit = attr->srq_limit;
305 spin_unlock_irq(&srq->rq.lock);
306
307 vfree(owq);
308
309 if (srq->ip) {
310 struct ipath_mmap_info *ip = srq->ip;
311 struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
312 u32 s = sizeof(struct ipath_rwq) + size * sz;
313
314 ipath_update_mmap_info(dev, ip, s, wq);
315
316 /*
317 * Return the offset to mmap.
318 * See ipath_mmap() for details.
319 */
320 if (udata && udata->inlen >= sizeof(__u64)) {
321 ret = ib_copy_to_udata(udata, &ip->offset,
322 sizeof(ip->offset));
323 if (ret)
324 goto bail;
325 }
326
327 spin_lock_irq(&dev->pending_lock);
328 if (list_empty(&ip->pending_mmaps))
329 list_add(&ip->pending_mmaps,
330 &dev->pending_mmaps);
331 spin_unlock_irq(&dev->pending_lock);
332 }
333 } else if (attr_mask & IB_SRQ_LIMIT) {
334 spin_lock_irq(&srq->rq.lock);
335 if (attr->srq_limit >= srq->rq.size)
336 ret = -EINVAL;
337 else
338 srq->limit = attr->srq_limit;
339 spin_unlock_irq(&srq->rq.lock);
340 }
341 goto bail;
342
343bail_unlock:
344 spin_unlock_irq(&srq->rq.lock);
345bail_free:
346 vfree(wq);
347bail:
348 return ret;
349}
350
351int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
352{
353 struct ipath_srq *srq = to_isrq(ibsrq);
354
355 attr->max_wr = srq->rq.size - 1;
356 attr->max_sge = srq->rq.max_sge;
357 attr->srq_limit = srq->limit;
358 return 0;
359}
360
361/**
362 * ipath_destroy_srq - destroy a shared receive queue
363 * @ibsrq: the SRQ to destroy
364 */
365int ipath_destroy_srq(struct ib_srq *ibsrq)
366{
367 struct ipath_srq *srq = to_isrq(ibsrq);
368 struct ipath_ibdev *dev = to_idev(ibsrq->device);
369
370 spin_lock(&dev->n_srqs_lock);
371 dev->n_srqs_allocated--;
372 spin_unlock(&dev->n_srqs_lock);
373 if (srq->ip)
374 kref_put(&srq->ip->ref, ipath_release_mmap_info);
375 else
376 vfree(srq->rq.wq);
377 kfree(srq);
378
379 return 0;
380}
diff --git a/drivers/staging/rdma/ipath/ipath_stats.c b/drivers/staging/rdma/ipath/ipath_stats.c
deleted file mode 100644
index f63e143e3292..000000000000
--- a/drivers/staging/rdma/ipath/ipath_stats.c
+++ /dev/null
@@ -1,347 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include "ipath_kernel.h"
35
36struct infinipath_stats ipath_stats;
37
38/**
39 * ipath_snap_cntr - snapshot a chip counter
40 * @dd: the infinipath device
41 * @creg: the counter to snapshot
42 *
43 * called from add_timer and user counter read calls, to deal with
44 * counters that wrap in "human time". The words sent and received, and
45 * the packets sent and received are all that we worry about. For now,
46 * at least, we don't worry about error counters, because if they wrap
47 * that quickly, we probably don't care. We may eventually just make this
48 * handle all the counters. word counters can wrap in about 20 seconds
49 * of full bandwidth traffic, packet counters in a few hours.
50 */
51
52u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg)
53{
54 u32 val, reg64 = 0;
55 u64 val64;
56 unsigned long t0, t1;
57 u64 ret;
58
59 t0 = jiffies;
60 /* If fast increment counters are only 32 bits, snapshot them,
61 * and maintain them as 64bit values in the driver */
62 if (!(dd->ipath_flags & IPATH_32BITCOUNTERS) &&
63 (creg == dd->ipath_cregs->cr_wordsendcnt ||
64 creg == dd->ipath_cregs->cr_wordrcvcnt ||
65 creg == dd->ipath_cregs->cr_pktsendcnt ||
66 creg == dd->ipath_cregs->cr_pktrcvcnt)) {
67 val64 = ipath_read_creg(dd, creg);
68 val = val64 == ~0ULL ? ~0U : 0;
69 reg64 = 1;
70 } else /* val64 just to keep gcc quiet... */
71 val64 = val = ipath_read_creg32(dd, creg);
72 /*
73 * See if a second has passed. This is just a way to detect things
74 * that are quite broken. Normally this should take just a few
75 * cycles (the check is for long enough that we don't care if we get
76 * pre-empted.) An Opteron HT O read timeout is 4 seconds with
77 * normal NB values
78 */
79 t1 = jiffies;
80 if (time_before(t0 + HZ, t1) && val == -1) {
81 ipath_dev_err(dd, "Error! Read counter 0x%x timed out\n",
82 creg);
83 ret = 0ULL;
84 goto bail;
85 }
86 if (reg64) {
87 ret = val64;
88 goto bail;
89 }
90
91 if (creg == dd->ipath_cregs->cr_wordsendcnt) {
92 if (val != dd->ipath_lastsword) {
93 dd->ipath_sword += val - dd->ipath_lastsword;
94 dd->ipath_lastsword = val;
95 }
96 val64 = dd->ipath_sword;
97 } else if (creg == dd->ipath_cregs->cr_wordrcvcnt) {
98 if (val != dd->ipath_lastrword) {
99 dd->ipath_rword += val - dd->ipath_lastrword;
100 dd->ipath_lastrword = val;
101 }
102 val64 = dd->ipath_rword;
103 } else if (creg == dd->ipath_cregs->cr_pktsendcnt) {
104 if (val != dd->ipath_lastspkts) {
105 dd->ipath_spkts += val - dd->ipath_lastspkts;
106 dd->ipath_lastspkts = val;
107 }
108 val64 = dd->ipath_spkts;
109 } else if (creg == dd->ipath_cregs->cr_pktrcvcnt) {
110 if (val != dd->ipath_lastrpkts) {
111 dd->ipath_rpkts += val - dd->ipath_lastrpkts;
112 dd->ipath_lastrpkts = val;
113 }
114 val64 = dd->ipath_rpkts;
115 } else if (creg == dd->ipath_cregs->cr_ibsymbolerrcnt) {
116 if (dd->ibdeltainprog)
117 val64 -= val64 - dd->ibsymsnap;
118 val64 -= dd->ibsymdelta;
119 } else if (creg == dd->ipath_cregs->cr_iblinkerrrecovcnt) {
120 if (dd->ibdeltainprog)
121 val64 -= val64 - dd->iblnkerrsnap;
122 val64 -= dd->iblnkerrdelta;
123 } else
124 val64 = (u64) val;
125
126 ret = val64;
127
128bail:
129 return ret;
130}
131
132/**
133 * ipath_qcheck - print delta of egrfull/hdrqfull errors for kernel ports
134 * @dd: the infinipath device
135 *
136 * print the delta of egrfull/hdrqfull errors for kernel ports no more than
137 * every 5 seconds. User processes are printed at close, but kernel doesn't
138 * close, so... Separate routine so may call from other places someday, and
139 * so function name when printed by _IPATH_INFO is meaningfull
140 */
141static void ipath_qcheck(struct ipath_devdata *dd)
142{
143 static u64 last_tot_hdrqfull;
144 struct ipath_portdata *pd = dd->ipath_pd[0];
145 size_t blen = 0;
146 char buf[128];
147 u32 hdrqtail;
148
149 *buf = 0;
150 if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) {
151 blen = snprintf(buf, sizeof buf, "port 0 hdrqfull %u",
152 pd->port_hdrqfull -
153 dd->ipath_p0_hdrqfull);
154 dd->ipath_p0_hdrqfull = pd->port_hdrqfull;
155 }
156 if (ipath_stats.sps_etidfull != dd->ipath_last_tidfull) {
157 blen += snprintf(buf + blen, sizeof buf - blen,
158 "%srcvegrfull %llu",
159 blen ? ", " : "",
160 (unsigned long long)
161 (ipath_stats.sps_etidfull -
162 dd->ipath_last_tidfull));
163 dd->ipath_last_tidfull = ipath_stats.sps_etidfull;
164 }
165
166 /*
167 * this is actually the number of hdrq full interrupts, not actual
168 * events, but at the moment that's mostly what I'm interested in.
169 * Actual count, etc. is in the counters, if needed. For production
170 * users this won't ordinarily be printed.
171 */
172
173 if ((ipath_debug & (__IPATH_PKTDBG | __IPATH_DBG)) &&
174 ipath_stats.sps_hdrqfull != last_tot_hdrqfull) {
175 blen += snprintf(buf + blen, sizeof buf - blen,
176 "%shdrqfull %llu (all ports)",
177 blen ? ", " : "",
178 (unsigned long long)
179 (ipath_stats.sps_hdrqfull -
180 last_tot_hdrqfull));
181 last_tot_hdrqfull = ipath_stats.sps_hdrqfull;
182 }
183 if (blen)
184 ipath_dbg("%s\n", buf);
185
186 hdrqtail = ipath_get_hdrqtail(pd);
187 if (pd->port_head != hdrqtail) {
188 if (dd->ipath_lastport0rcv_cnt ==
189 ipath_stats.sps_port0pkts) {
190 ipath_cdbg(PKT, "missing rcv interrupts? "
191 "port0 hd=%x tl=%x; port0pkts %llx; write"
192 " hd (w/intr)\n",
193 pd->port_head, hdrqtail,
194 (unsigned long long)
195 ipath_stats.sps_port0pkts);
196 ipath_write_ureg(dd, ur_rcvhdrhead, hdrqtail |
197 dd->ipath_rhdrhead_intr_off, pd->port_port);
198 }
199 dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
200 }
201}
202
203static void ipath_chk_errormask(struct ipath_devdata *dd)
204{
205 static u32 fixed;
206 u32 ctrl;
207 unsigned long errormask;
208 unsigned long hwerrs;
209
210 if (!dd->ipath_errormask || !(dd->ipath_flags & IPATH_INITTED))
211 return;
212
213 errormask = ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);
214
215 if (errormask == dd->ipath_errormask)
216 return;
217 fixed++;
218
219 hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
220 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
221
222 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
223 dd->ipath_errormask);
224
225 if ((hwerrs & dd->ipath_hwerrmask) ||
226 (ctrl & INFINIPATH_C_FREEZEMODE)) {
227 /* force re-interrupt of pending events, just in case */
228 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
229 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, 0ULL);
230 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
231 dev_info(&dd->pcidev->dev,
232 "errormask fixed(%u) %lx -> %lx, ctrl %x hwerr %lx\n",
233 fixed, errormask, (unsigned long)dd->ipath_errormask,
234 ctrl, hwerrs);
235 } else
236 ipath_dbg("errormask fixed(%u) %lx -> %lx, no freeze\n",
237 fixed, errormask,
238 (unsigned long)dd->ipath_errormask);
239}
240
241
242/**
243 * ipath_get_faststats - get word counters from chip before they overflow
244 * @opaque - contains a pointer to the infinipath device ipath_devdata
245 *
246 * called from add_timer
247 */
248void ipath_get_faststats(unsigned long opaque)
249{
250 struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
251 int i;
252 static unsigned cnt;
253 unsigned long flags;
254 u64 traffic_wds;
255
256 /*
257 * don't access the chip while running diags, or memory diags can
258 * fail
259 */
260 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_INITTED) ||
261 ipath_diag_inuse)
262 /* but re-arm the timer, for diags case; won't hurt other */
263 goto done;
264
265 /*
266 * We now try to maintain a "active timer", based on traffic
267 * exceeding a threshold, so we need to check the word-counts
268 * even if they are 64-bit.
269 */
270 traffic_wds = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt) +
271 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
272 spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
273 traffic_wds -= dd->ipath_traffic_wds;
274 dd->ipath_traffic_wds += traffic_wds;
275 if (traffic_wds >= IPATH_TRAFFIC_ACTIVE_THRESHOLD)
276 atomic_add(5, &dd->ipath_active_time); /* S/B #define */
277 spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
278
279 if (dd->ipath_flags & IPATH_32BITCOUNTERS) {
280 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
281 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
282 }
283
284 ipath_qcheck(dd);
285
286 /*
287 * deal with repeat error suppression. Doesn't really matter if
288 * last error was almost a full interval ago, or just a few usecs
289 * ago; still won't get more than 2 per interval. We may want
290 * longer intervals for this eventually, could do with mod, counter
291 * or separate timer. Also see code in ipath_handle_errors() and
292 * ipath_handle_hwerrors().
293 */
294
295 if (dd->ipath_lasterror)
296 dd->ipath_lasterror = 0;
297 if (dd->ipath_lasthwerror)
298 dd->ipath_lasthwerror = 0;
299 if (dd->ipath_maskederrs
300 && time_after(jiffies, dd->ipath_unmasktime)) {
301 char ebuf[256];
302 int iserr;
303 iserr = ipath_decode_err(dd, ebuf, sizeof ebuf,
304 dd->ipath_maskederrs);
305 if (dd->ipath_maskederrs &
306 ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
307 INFINIPATH_E_PKTERRS))
308 ipath_dev_err(dd, "Re-enabling masked errors "
309 "(%s)\n", ebuf);
310 else {
311 /*
312 * rcvegrfull and rcvhdrqfull are "normal", for some
313 * types of processes (mostly benchmarks) that send
314 * huge numbers of messages, while not processing
315 * them. So only complain about these at debug
316 * level.
317 */
318 if (iserr)
319 ipath_dbg(
320 "Re-enabling queue full errors (%s)\n",
321 ebuf);
322 else
323 ipath_cdbg(ERRPKT, "Re-enabling packet"
324 " problem interrupt (%s)\n", ebuf);
325 }
326
327 /* re-enable masked errors */
328 dd->ipath_errormask |= dd->ipath_maskederrs;
329 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
330 dd->ipath_errormask);
331 dd->ipath_maskederrs = 0;
332 }
333
334 /* limit qfull messages to ~one per minute per port */
335 if ((++cnt & 0x10)) {
336 for (i = (int) dd->ipath_cfgports; --i >= 0; ) {
337 struct ipath_portdata *pd = dd->ipath_pd[i];
338
339 if (pd && pd->port_lastrcvhdrqtail != -1)
340 pd->port_lastrcvhdrqtail = -1;
341 }
342 }
343
344 ipath_chk_errormask(dd);
345done:
346 mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);
347}
diff --git a/drivers/staging/rdma/ipath/ipath_sysfs.c b/drivers/staging/rdma/ipath/ipath_sysfs.c
deleted file mode 100644
index b12b1f6caf59..000000000000
--- a/drivers/staging/rdma/ipath/ipath_sysfs.c
+++ /dev/null
@@ -1,1237 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/ctype.h>
35#include <linux/stat.h>
36
37#include "ipath_kernel.h"
38#include "ipath_verbs.h"
39#include "ipath_common.h"
40
41/**
42 * ipath_parse_ushort - parse an unsigned short value in an arbitrary base
43 * @str: the string containing the number
44 * @valp: where to put the result
45 *
46 * returns the number of bytes consumed, or negative value on error
47 */
48int ipath_parse_ushort(const char *str, unsigned short *valp)
49{
50 unsigned long val;
51 char *end;
52 int ret;
53
54 if (!isdigit(str[0])) {
55 ret = -EINVAL;
56 goto bail;
57 }
58
59 val = simple_strtoul(str, &end, 0);
60
61 if (val > 0xffff) {
62 ret = -EINVAL;
63 goto bail;
64 }
65
66 *valp = val;
67
68 ret = end + 1 - str;
69 if (ret == 0)
70 ret = -EINVAL;
71
72bail:
73 return ret;
74}
75
76static ssize_t show_version(struct device_driver *dev, char *buf)
77{
78 /* The string printed here is already newline-terminated. */
79 return scnprintf(buf, PAGE_SIZE, "%s", ib_ipath_version);
80}
81
82static ssize_t show_num_units(struct device_driver *dev, char *buf)
83{
84 return scnprintf(buf, PAGE_SIZE, "%d\n",
85 ipath_count_units(NULL, NULL, NULL));
86}
87
88static ssize_t show_status(struct device *dev,
89 struct device_attribute *attr,
90 char *buf)
91{
92 struct ipath_devdata *dd = dev_get_drvdata(dev);
93 ssize_t ret;
94
95 if (!dd->ipath_statusp) {
96 ret = -EINVAL;
97 goto bail;
98 }
99
100 ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
101 (unsigned long long) *(dd->ipath_statusp));
102
103bail:
104 return ret;
105}
106
107static const char *ipath_status_str[] = {
108 "Initted",
109 "Disabled",
110 "Admin_Disabled",
111 "", /* This used to be the old "OIB_SMA" status. */
112 "", /* This used to be the old "SMA" status. */
113 "Present",
114 "IB_link_up",
115 "IB_configured",
116 "NoIBcable",
117 "Fatal_Hardware_Error",
118 NULL,
119};
120
121static ssize_t show_status_str(struct device *dev,
122 struct device_attribute *attr,
123 char *buf)
124{
125 struct ipath_devdata *dd = dev_get_drvdata(dev);
126 int i, any;
127 u64 s;
128 ssize_t ret;
129
130 if (!dd->ipath_statusp) {
131 ret = -EINVAL;
132 goto bail;
133 }
134
135 s = *(dd->ipath_statusp);
136 *buf = '\0';
137 for (any = i = 0; s && ipath_status_str[i]; i++) {
138 if (s & 1) {
139 if (any && strlcat(buf, " ", PAGE_SIZE) >=
140 PAGE_SIZE)
141 /* overflow */
142 break;
143 if (strlcat(buf, ipath_status_str[i],
144 PAGE_SIZE) >= PAGE_SIZE)
145 break;
146 any = 1;
147 }
148 s >>= 1;
149 }
150 if (any)
151 strlcat(buf, "\n", PAGE_SIZE);
152
153 ret = strlen(buf);
154
155bail:
156 return ret;
157}
158
159static ssize_t show_boardversion(struct device *dev,
160 struct device_attribute *attr,
161 char *buf)
162{
163 struct ipath_devdata *dd = dev_get_drvdata(dev);
164 /* The string printed here is already newline-terminated. */
165 return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_boardversion);
166}
167
168static ssize_t show_localbus_info(struct device *dev,
169 struct device_attribute *attr,
170 char *buf)
171{
172 struct ipath_devdata *dd = dev_get_drvdata(dev);
173 /* The string printed here is already newline-terminated. */
174 return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_lbus_info);
175}
176
177static ssize_t show_lmc(struct device *dev,
178 struct device_attribute *attr,
179 char *buf)
180{
181 struct ipath_devdata *dd = dev_get_drvdata(dev);
182
183 return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_lmc);
184}
185
186static ssize_t store_lmc(struct device *dev,
187 struct device_attribute *attr,
188 const char *buf,
189 size_t count)
190{
191 struct ipath_devdata *dd = dev_get_drvdata(dev);
192 u16 lmc = 0;
193 int ret;
194
195 ret = ipath_parse_ushort(buf, &lmc);
196 if (ret < 0)
197 goto invalid;
198
199 if (lmc > 7) {
200 ret = -EINVAL;
201 goto invalid;
202 }
203
204 ipath_set_lid(dd, dd->ipath_lid, lmc);
205
206 goto bail;
207invalid:
208 ipath_dev_err(dd, "attempt to set invalid LMC %u\n", lmc);
209bail:
210 return ret;
211}
212
213static ssize_t show_lid(struct device *dev,
214 struct device_attribute *attr,
215 char *buf)
216{
217 struct ipath_devdata *dd = dev_get_drvdata(dev);
218
219 return scnprintf(buf, PAGE_SIZE, "0x%x\n", dd->ipath_lid);
220}
221
222static ssize_t store_lid(struct device *dev,
223 struct device_attribute *attr,
224 const char *buf,
225 size_t count)
226{
227 struct ipath_devdata *dd = dev_get_drvdata(dev);
228 u16 lid = 0;
229 int ret;
230
231 ret = ipath_parse_ushort(buf, &lid);
232 if (ret < 0)
233 goto invalid;
234
235 if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE) {
236 ret = -EINVAL;
237 goto invalid;
238 }
239
240 ipath_set_lid(dd, lid, dd->ipath_lmc);
241
242 goto bail;
243invalid:
244 ipath_dev_err(dd, "attempt to set invalid LID 0x%x\n", lid);
245bail:
246 return ret;
247}
248
249static ssize_t show_mlid(struct device *dev,
250 struct device_attribute *attr,
251 char *buf)
252{
253 struct ipath_devdata *dd = dev_get_drvdata(dev);
254
255 return scnprintf(buf, PAGE_SIZE, "0x%x\n", dd->ipath_mlid);
256}
257
258static ssize_t store_mlid(struct device *dev,
259 struct device_attribute *attr,
260 const char *buf,
261 size_t count)
262{
263 struct ipath_devdata *dd = dev_get_drvdata(dev);
264 u16 mlid;
265 int ret;
266
267 ret = ipath_parse_ushort(buf, &mlid);
268 if (ret < 0 || mlid < IPATH_MULTICAST_LID_BASE)
269 goto invalid;
270
271 dd->ipath_mlid = mlid;
272
273 goto bail;
274invalid:
275 ipath_dev_err(dd, "attempt to set invalid MLID\n");
276bail:
277 return ret;
278}
279
280static ssize_t show_guid(struct device *dev,
281 struct device_attribute *attr,
282 char *buf)
283{
284 struct ipath_devdata *dd = dev_get_drvdata(dev);
285 u8 *guid;
286
287 guid = (u8 *) & (dd->ipath_guid);
288
289 return scnprintf(buf, PAGE_SIZE,
290 "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
291 guid[0], guid[1], guid[2], guid[3],
292 guid[4], guid[5], guid[6], guid[7]);
293}
294
295static ssize_t store_guid(struct device *dev,
296 struct device_attribute *attr,
297 const char *buf,
298 size_t count)
299{
300 struct ipath_devdata *dd = dev_get_drvdata(dev);
301 ssize_t ret;
302 unsigned short guid[8];
303 __be64 new_guid;
304 u8 *ng;
305 int i;
306
307 if (sscanf(buf, "%hx:%hx:%hx:%hx:%hx:%hx:%hx:%hx",
308 &guid[0], &guid[1], &guid[2], &guid[3],
309 &guid[4], &guid[5], &guid[6], &guid[7]) != 8)
310 goto invalid;
311
312 ng = (u8 *) &new_guid;
313
314 for (i = 0; i < 8; i++) {
315 if (guid[i] > 0xff)
316 goto invalid;
317 ng[i] = guid[i];
318 }
319
320 if (new_guid == 0)
321 goto invalid;
322
323 dd->ipath_guid = new_guid;
324 dd->ipath_nguid = 1;
325 if (dd->verbs_dev)
326 dd->verbs_dev->ibdev.node_guid = new_guid;
327
328 ret = strlen(buf);
329 goto bail;
330
331invalid:
332 ipath_dev_err(dd, "attempt to set invalid GUID\n");
333 ret = -EINVAL;
334
335bail:
336 return ret;
337}
338
339static ssize_t show_nguid(struct device *dev,
340 struct device_attribute *attr,
341 char *buf)
342{
343 struct ipath_devdata *dd = dev_get_drvdata(dev);
344
345 return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_nguid);
346}
347
348static ssize_t show_nports(struct device *dev,
349 struct device_attribute *attr,
350 char *buf)
351{
352 struct ipath_devdata *dd = dev_get_drvdata(dev);
353
354 /* Return the number of user ports available. */
355 return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_cfgports - 1);
356}
357
358static ssize_t show_serial(struct device *dev,
359 struct device_attribute *attr,
360 char *buf)
361{
362 struct ipath_devdata *dd = dev_get_drvdata(dev);
363
364 buf[sizeof dd->ipath_serial] = '\0';
365 memcpy(buf, dd->ipath_serial, sizeof dd->ipath_serial);
366 strcat(buf, "\n");
367 return strlen(buf);
368}
369
370static ssize_t show_unit(struct device *dev,
371 struct device_attribute *attr,
372 char *buf)
373{
374 struct ipath_devdata *dd = dev_get_drvdata(dev);
375
376 return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_unit);
377}
378
379static ssize_t show_jint_max_packets(struct device *dev,
380 struct device_attribute *attr,
381 char *buf)
382{
383 struct ipath_devdata *dd = dev_get_drvdata(dev);
384
385 return scnprintf(buf, PAGE_SIZE, "%hu\n", dd->ipath_jint_max_packets);
386}
387
388static ssize_t store_jint_max_packets(struct device *dev,
389 struct device_attribute *attr,
390 const char *buf,
391 size_t count)
392{
393 struct ipath_devdata *dd = dev_get_drvdata(dev);
394 u16 v = 0;
395 int ret;
396
397 ret = ipath_parse_ushort(buf, &v);
398 if (ret < 0)
399 ipath_dev_err(dd, "invalid jint_max_packets.\n");
400 else
401 dd->ipath_f_config_jint(dd, dd->ipath_jint_idle_ticks, v);
402
403 return ret;
404}
405
406static ssize_t show_jint_idle_ticks(struct device *dev,
407 struct device_attribute *attr,
408 char *buf)
409{
410 struct ipath_devdata *dd = dev_get_drvdata(dev);
411
412 return scnprintf(buf, PAGE_SIZE, "%hu\n", dd->ipath_jint_idle_ticks);
413}
414
415static ssize_t store_jint_idle_ticks(struct device *dev,
416 struct device_attribute *attr,
417 const char *buf,
418 size_t count)
419{
420 struct ipath_devdata *dd = dev_get_drvdata(dev);
421 u16 v = 0;
422 int ret;
423
424 ret = ipath_parse_ushort(buf, &v);
425 if (ret < 0)
426 ipath_dev_err(dd, "invalid jint_idle_ticks.\n");
427 else
428 dd->ipath_f_config_jint(dd, v, dd->ipath_jint_max_packets);
429
430 return ret;
431}
432
433#define DEVICE_COUNTER(name, attr) \
434 static ssize_t show_counter_##name(struct device *dev, \
435 struct device_attribute *attr, \
436 char *buf) \
437 { \
438 struct ipath_devdata *dd = dev_get_drvdata(dev); \
439 return scnprintf(\
440 buf, PAGE_SIZE, "%llu\n", (unsigned long long) \
441 ipath_snap_cntr( \
442 dd, offsetof(struct infinipath_counters, \
443 attr) / sizeof(u64))); \
444 } \
445 static DEVICE_ATTR(name, S_IRUGO, show_counter_##name, NULL);
446
447DEVICE_COUNTER(ib_link_downeds, IBLinkDownedCnt);
448DEVICE_COUNTER(ib_link_err_recoveries, IBLinkErrRecoveryCnt);
449DEVICE_COUNTER(ib_status_changes, IBStatusChangeCnt);
450DEVICE_COUNTER(ib_symbol_errs, IBSymbolErrCnt);
451DEVICE_COUNTER(lb_flow_stalls, LBFlowStallCnt);
452DEVICE_COUNTER(lb_ints, LBIntCnt);
453DEVICE_COUNTER(rx_bad_formats, RxBadFormatCnt);
454DEVICE_COUNTER(rx_buf_ovfls, RxBufOvflCnt);
455DEVICE_COUNTER(rx_data_pkts, RxDataPktCnt);
456DEVICE_COUNTER(rx_dropped_pkts, RxDroppedPktCnt);
457DEVICE_COUNTER(rx_dwords, RxDwordCnt);
458DEVICE_COUNTER(rx_ebps, RxEBPCnt);
459DEVICE_COUNTER(rx_flow_ctrl_errs, RxFlowCtrlErrCnt);
460DEVICE_COUNTER(rx_flow_pkts, RxFlowPktCnt);
461DEVICE_COUNTER(rx_icrc_errs, RxICRCErrCnt);
462DEVICE_COUNTER(rx_len_errs, RxLenErrCnt);
463DEVICE_COUNTER(rx_link_problems, RxLinkProblemCnt);
464DEVICE_COUNTER(rx_lpcrc_errs, RxLPCRCErrCnt);
465DEVICE_COUNTER(rx_max_min_len_errs, RxMaxMinLenErrCnt);
466DEVICE_COUNTER(rx_p0_hdr_egr_ovfls, RxP0HdrEgrOvflCnt);
467DEVICE_COUNTER(rx_p1_hdr_egr_ovfls, RxP1HdrEgrOvflCnt);
468DEVICE_COUNTER(rx_p2_hdr_egr_ovfls, RxP2HdrEgrOvflCnt);
469DEVICE_COUNTER(rx_p3_hdr_egr_ovfls, RxP3HdrEgrOvflCnt);
470DEVICE_COUNTER(rx_p4_hdr_egr_ovfls, RxP4HdrEgrOvflCnt);
471DEVICE_COUNTER(rx_p5_hdr_egr_ovfls, RxP5HdrEgrOvflCnt);
472DEVICE_COUNTER(rx_p6_hdr_egr_ovfls, RxP6HdrEgrOvflCnt);
473DEVICE_COUNTER(rx_p7_hdr_egr_ovfls, RxP7HdrEgrOvflCnt);
474DEVICE_COUNTER(rx_p8_hdr_egr_ovfls, RxP8HdrEgrOvflCnt);
475DEVICE_COUNTER(rx_pkey_mismatches, RxPKeyMismatchCnt);
476DEVICE_COUNTER(rx_tid_full_errs, RxTIDFullErrCnt);
477DEVICE_COUNTER(rx_tid_valid_errs, RxTIDValidErrCnt);
478DEVICE_COUNTER(rx_vcrc_errs, RxVCRCErrCnt);
479DEVICE_COUNTER(tx_data_pkts, TxDataPktCnt);
480DEVICE_COUNTER(tx_dropped_pkts, TxDroppedPktCnt);
481DEVICE_COUNTER(tx_dwords, TxDwordCnt);
482DEVICE_COUNTER(tx_flow_pkts, TxFlowPktCnt);
483DEVICE_COUNTER(tx_flow_stalls, TxFlowStallCnt);
484DEVICE_COUNTER(tx_len_errs, TxLenErrCnt);
485DEVICE_COUNTER(tx_max_min_len_errs, TxMaxMinLenErrCnt);
486DEVICE_COUNTER(tx_underruns, TxUnderrunCnt);
487DEVICE_COUNTER(tx_unsup_vl_errs, TxUnsupVLErrCnt);
488
489static struct attribute *dev_counter_attributes[] = {
490 &dev_attr_ib_link_downeds.attr,
491 &dev_attr_ib_link_err_recoveries.attr,
492 &dev_attr_ib_status_changes.attr,
493 &dev_attr_ib_symbol_errs.attr,
494 &dev_attr_lb_flow_stalls.attr,
495 &dev_attr_lb_ints.attr,
496 &dev_attr_rx_bad_formats.attr,
497 &dev_attr_rx_buf_ovfls.attr,
498 &dev_attr_rx_data_pkts.attr,
499 &dev_attr_rx_dropped_pkts.attr,
500 &dev_attr_rx_dwords.attr,
501 &dev_attr_rx_ebps.attr,
502 &dev_attr_rx_flow_ctrl_errs.attr,
503 &dev_attr_rx_flow_pkts.attr,
504 &dev_attr_rx_icrc_errs.attr,
505 &dev_attr_rx_len_errs.attr,
506 &dev_attr_rx_link_problems.attr,
507 &dev_attr_rx_lpcrc_errs.attr,
508 &dev_attr_rx_max_min_len_errs.attr,
509 &dev_attr_rx_p0_hdr_egr_ovfls.attr,
510 &dev_attr_rx_p1_hdr_egr_ovfls.attr,
511 &dev_attr_rx_p2_hdr_egr_ovfls.attr,
512 &dev_attr_rx_p3_hdr_egr_ovfls.attr,
513 &dev_attr_rx_p4_hdr_egr_ovfls.attr,
514 &dev_attr_rx_p5_hdr_egr_ovfls.attr,
515 &dev_attr_rx_p6_hdr_egr_ovfls.attr,
516 &dev_attr_rx_p7_hdr_egr_ovfls.attr,
517 &dev_attr_rx_p8_hdr_egr_ovfls.attr,
518 &dev_attr_rx_pkey_mismatches.attr,
519 &dev_attr_rx_tid_full_errs.attr,
520 &dev_attr_rx_tid_valid_errs.attr,
521 &dev_attr_rx_vcrc_errs.attr,
522 &dev_attr_tx_data_pkts.attr,
523 &dev_attr_tx_dropped_pkts.attr,
524 &dev_attr_tx_dwords.attr,
525 &dev_attr_tx_flow_pkts.attr,
526 &dev_attr_tx_flow_stalls.attr,
527 &dev_attr_tx_len_errs.attr,
528 &dev_attr_tx_max_min_len_errs.attr,
529 &dev_attr_tx_underruns.attr,
530 &dev_attr_tx_unsup_vl_errs.attr,
531 NULL
532};
533
534static struct attribute_group dev_counter_attr_group = {
535 .name = "counters",
536 .attrs = dev_counter_attributes
537};
538
539static ssize_t store_reset(struct device *dev,
540 struct device_attribute *attr,
541 const char *buf,
542 size_t count)
543{
544 struct ipath_devdata *dd = dev_get_drvdata(dev);
545 int ret;
546
547 if (count < 5 || memcmp(buf, "reset", 5)) {
548 ret = -EINVAL;
549 goto bail;
550 }
551
552 if (dd->ipath_flags & IPATH_DISABLED) {
553 /*
554 * post-reset init would re-enable interrupts, etc.
555 * so don't allow reset on disabled devices. Not
556 * perfect error, but about the best choice.
557 */
558 dev_info(dev,"Unit %d is disabled, can't reset\n",
559 dd->ipath_unit);
560 ret = -EINVAL;
561 goto bail;
562 }
563 ret = ipath_reset_device(dd->ipath_unit);
564bail:
565 return ret<0 ? ret : count;
566}
567
568static ssize_t store_link_state(struct device *dev,
569 struct device_attribute *attr,
570 const char *buf,
571 size_t count)
572{
573 struct ipath_devdata *dd = dev_get_drvdata(dev);
574 int ret, r;
575 u16 state;
576
577 ret = ipath_parse_ushort(buf, &state);
578 if (ret < 0)
579 goto invalid;
580
581 r = ipath_set_linkstate(dd, state);
582 if (r < 0) {
583 ret = r;
584 goto bail;
585 }
586
587 goto bail;
588invalid:
589 ipath_dev_err(dd, "attempt to set invalid link state\n");
590bail:
591 return ret;
592}
593
594static ssize_t show_mtu(struct device *dev,
595 struct device_attribute *attr,
596 char *buf)
597{
598 struct ipath_devdata *dd = dev_get_drvdata(dev);
599 return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_ibmtu);
600}
601
602static ssize_t store_mtu(struct device *dev,
603 struct device_attribute *attr,
604 const char *buf,
605 size_t count)
606{
607 struct ipath_devdata *dd = dev_get_drvdata(dev);
608 ssize_t ret;
609 u16 mtu = 0;
610 int r;
611
612 ret = ipath_parse_ushort(buf, &mtu);
613 if (ret < 0)
614 goto invalid;
615
616 r = ipath_set_mtu(dd, mtu);
617 if (r < 0)
618 ret = r;
619
620 goto bail;
621invalid:
622 ipath_dev_err(dd, "attempt to set invalid MTU\n");
623bail:
624 return ret;
625}
626
627static ssize_t show_enabled(struct device *dev,
628 struct device_attribute *attr,
629 char *buf)
630{
631 struct ipath_devdata *dd = dev_get_drvdata(dev);
632 return scnprintf(buf, PAGE_SIZE, "%u\n",
633 (dd->ipath_flags & IPATH_DISABLED) ? 0 : 1);
634}
635
636static ssize_t store_enabled(struct device *dev,
637 struct device_attribute *attr,
638 const char *buf,
639 size_t count)
640{
641 struct ipath_devdata *dd = dev_get_drvdata(dev);
642 ssize_t ret;
643 u16 enable = 0;
644
645 ret = ipath_parse_ushort(buf, &enable);
646 if (ret < 0) {
647 ipath_dev_err(dd, "attempt to use non-numeric on enable\n");
648 goto bail;
649 }
650
651 if (enable) {
652 if (!(dd->ipath_flags & IPATH_DISABLED))
653 goto bail;
654
655 dev_info(dev, "Enabling unit %d\n", dd->ipath_unit);
656 /* same as post-reset */
657 ret = ipath_init_chip(dd, 1);
658 if (ret)
659 ipath_dev_err(dd, "Failed to enable unit %d\n",
660 dd->ipath_unit);
661 else {
662 dd->ipath_flags &= ~IPATH_DISABLED;
663 *dd->ipath_statusp &= ~IPATH_STATUS_ADMIN_DISABLED;
664 }
665 } else if (!(dd->ipath_flags & IPATH_DISABLED)) {
666 dev_info(dev, "Disabling unit %d\n", dd->ipath_unit);
667 ipath_shutdown_device(dd);
668 dd->ipath_flags |= IPATH_DISABLED;
669 *dd->ipath_statusp |= IPATH_STATUS_ADMIN_DISABLED;
670 }
671
672bail:
673 return ret;
674}
675
676static ssize_t store_rx_pol_inv(struct device *dev,
677 struct device_attribute *attr,
678 const char *buf,
679 size_t count)
680{
681 struct ipath_devdata *dd = dev_get_drvdata(dev);
682 int ret, r;
683 u16 val;
684
685 ret = ipath_parse_ushort(buf, &val);
686 if (ret < 0)
687 goto invalid;
688
689 r = ipath_set_rx_pol_inv(dd, val);
690 if (r < 0) {
691 ret = r;
692 goto bail;
693 }
694
695 goto bail;
696invalid:
697 ipath_dev_err(dd, "attempt to set invalid Rx Polarity invert\n");
698bail:
699 return ret;
700}
701
702static ssize_t store_led_override(struct device *dev,
703 struct device_attribute *attr,
704 const char *buf,
705 size_t count)
706{
707 struct ipath_devdata *dd = dev_get_drvdata(dev);
708 int ret;
709 u16 val;
710
711 ret = ipath_parse_ushort(buf, &val);
712 if (ret > 0)
713 ipath_set_led_override(dd, val);
714 else
715 ipath_dev_err(dd, "attempt to set invalid LED override\n");
716 return ret;
717}
718
719static ssize_t show_logged_errs(struct device *dev,
720 struct device_attribute *attr,
721 char *buf)
722{
723 struct ipath_devdata *dd = dev_get_drvdata(dev);
724 int idx, count;
725
726 /* force consistency with actual EEPROM */
727 if (ipath_update_eeprom_log(dd) != 0)
728 return -ENXIO;
729
730 count = 0;
731 for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
732 count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
733 dd->ipath_eep_st_errs[idx],
734 idx == (IPATH_EEP_LOG_CNT - 1) ? '\n' : ' ');
735 }
736
737 return count;
738}
739
740/*
741 * New sysfs entries to control various IB config. These all turn into
742 * accesses via ipath_f_get/set_ib_cfg.
743 *
744 * Get/Set heartbeat enable. Or of 1=enabled, 2=auto
745 */
746static ssize_t show_hrtbt_enb(struct device *dev,
747 struct device_attribute *attr,
748 char *buf)
749{
750 struct ipath_devdata *dd = dev_get_drvdata(dev);
751 int ret;
752
753 ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_HRTBT);
754 if (ret >= 0)
755 ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
756 return ret;
757}
758
759static ssize_t store_hrtbt_enb(struct device *dev,
760 struct device_attribute *attr,
761 const char *buf,
762 size_t count)
763{
764 struct ipath_devdata *dd = dev_get_drvdata(dev);
765 int ret, r;
766 u16 val;
767
768 ret = ipath_parse_ushort(buf, &val);
769 if (ret >= 0 && val > 3)
770 ret = -EINVAL;
771 if (ret < 0) {
772 ipath_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
773 goto bail;
774 }
775
776 /*
777 * Set the "intentional" heartbeat enable per either of
778 * "Enable" and "Auto", as these are normally set together.
779 * This bit is consulted when leaving loopback mode,
780 * because entering loopback mode overrides it and automatically
781 * disables heartbeat.
782 */
783 r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT, val);
784 if (r < 0)
785 ret = r;
786 else if (val == IPATH_IB_HRTBT_OFF)
787 dd->ipath_flags |= IPATH_NO_HRTBT;
788 else
789 dd->ipath_flags &= ~IPATH_NO_HRTBT;
790
791bail:
792 return ret;
793}
794
795/*
796 * Get/Set Link-widths enabled. Or of 1=1x, 2=4x (this is human/IB centric,
797 * _not_ the particular encoding of any given chip)
798 */
799static ssize_t show_lwid_enb(struct device *dev,
800 struct device_attribute *attr,
801 char *buf)
802{
803 struct ipath_devdata *dd = dev_get_drvdata(dev);
804 int ret;
805
806 ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB);
807 if (ret >= 0)
808 ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
809 return ret;
810}
811
812static ssize_t store_lwid_enb(struct device *dev,
813 struct device_attribute *attr,
814 const char *buf,
815 size_t count)
816{
817 struct ipath_devdata *dd = dev_get_drvdata(dev);
818 int ret, r;
819 u16 val;
820
821 ret = ipath_parse_ushort(buf, &val);
822 if (ret >= 0 && (val == 0 || val > 3))
823 ret = -EINVAL;
824 if (ret < 0) {
825 ipath_dev_err(dd,
826 "attempt to set invalid Link Width (enable)\n");
827 goto bail;
828 }
829
830 r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, val);
831 if (r < 0)
832 ret = r;
833
834bail:
835 return ret;
836}
837
838/* Get current link width */
839static ssize_t show_lwid(struct device *dev,
840 struct device_attribute *attr,
841 char *buf)
842
843{
844 struct ipath_devdata *dd = dev_get_drvdata(dev);
845 int ret;
846
847 ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LWID);
848 if (ret >= 0)
849 ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
850 return ret;
851}
852
853/*
854 * Get/Set Link-speeds enabled. Or of 1=SDR 2=DDR.
855 */
856static ssize_t show_spd_enb(struct device *dev,
857 struct device_attribute *attr,
858 char *buf)
859{
860 struct ipath_devdata *dd = dev_get_drvdata(dev);
861 int ret;
862
863 ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB);
864 if (ret >= 0)
865 ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
866 return ret;
867}
868
869static ssize_t store_spd_enb(struct device *dev,
870 struct device_attribute *attr,
871 const char *buf,
872 size_t count)
873{
874 struct ipath_devdata *dd = dev_get_drvdata(dev);
875 int ret, r;
876 u16 val;
877
878 ret = ipath_parse_ushort(buf, &val);
879 if (ret >= 0 && (val == 0 || val > (IPATH_IB_SDR | IPATH_IB_DDR)))
880 ret = -EINVAL;
881 if (ret < 0) {
882 ipath_dev_err(dd,
883 "attempt to set invalid Link Speed (enable)\n");
884 goto bail;
885 }
886
887 r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, val);
888 if (r < 0)
889 ret = r;
890
891bail:
892 return ret;
893}
894
895/* Get current link speed */
896static ssize_t show_spd(struct device *dev,
897 struct device_attribute *attr,
898 char *buf)
899{
900 struct ipath_devdata *dd = dev_get_drvdata(dev);
901 int ret;
902
903 ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_SPD);
904 if (ret >= 0)
905 ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
906 return ret;
907}
908
909/*
910 * Get/Set RX polarity-invert enable. 0=no, 1=yes.
911 */
912static ssize_t show_rx_polinv_enb(struct device *dev,
913 struct device_attribute *attr,
914 char *buf)
915{
916 struct ipath_devdata *dd = dev_get_drvdata(dev);
917 int ret;
918
919 ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB);
920 if (ret >= 0)
921 ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
922 return ret;
923}
924
925static ssize_t store_rx_polinv_enb(struct device *dev,
926 struct device_attribute *attr,
927 const char *buf,
928 size_t count)
929{
930 struct ipath_devdata *dd = dev_get_drvdata(dev);
931 int ret, r;
932 u16 val;
933
934 ret = ipath_parse_ushort(buf, &val);
935 if (ret >= 0 && val > 1) {
936 ipath_dev_err(dd,
937 "attempt to set invalid Rx Polarity (enable)\n");
938 ret = -EINVAL;
939 goto bail;
940 }
941
942 r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB, val);
943 if (r < 0)
944 ret = r;
945
946bail:
947 return ret;
948}
949
950/*
951 * Get/Set RX lane-reversal enable. 0=no, 1=yes.
952 */
953static ssize_t show_lanerev_enb(struct device *dev,
954 struct device_attribute *attr,
955 char *buf)
956{
957 struct ipath_devdata *dd = dev_get_drvdata(dev);
958 int ret;
959
960 ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LREV_ENB);
961 if (ret >= 0)
962 ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
963 return ret;
964}
965
966static ssize_t store_lanerev_enb(struct device *dev,
967 struct device_attribute *attr,
968 const char *buf,
969 size_t count)
970{
971 struct ipath_devdata *dd = dev_get_drvdata(dev);
972 int ret, r;
973 u16 val;
974
975 ret = ipath_parse_ushort(buf, &val);
976 if (ret >= 0 && val > 1) {
977 ret = -EINVAL;
978 ipath_dev_err(dd,
979 "attempt to set invalid Lane reversal (enable)\n");
980 goto bail;
981 }
982
983 r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LREV_ENB, val);
984 if (r < 0)
985 ret = r;
986
987bail:
988 return ret;
989}
990
991static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL);
992static DRIVER_ATTR(version, S_IRUGO, show_version, NULL);
993
994static struct attribute *driver_attributes[] = {
995 &driver_attr_num_units.attr,
996 &driver_attr_version.attr,
997 NULL
998};
999
1000static struct attribute_group driver_attr_group = {
1001 .attrs = driver_attributes
1002};
1003
1004static ssize_t store_tempsense(struct device *dev,
1005 struct device_attribute *attr,
1006 const char *buf,
1007 size_t count)
1008{
1009 struct ipath_devdata *dd = dev_get_drvdata(dev);
1010 int ret, stat;
1011 u16 val;
1012
1013 ret = ipath_parse_ushort(buf, &val);
1014 if (ret <= 0) {
1015 ipath_dev_err(dd, "attempt to set invalid tempsense config\n");
1016 goto bail;
1017 }
1018 /* If anything but the highest limit, enable T_CRIT_A "interrupt" */
1019 stat = ipath_tempsense_write(dd, 9, (val == 0x7f7f) ? 0x80 : 0);
1020 if (stat) {
1021 ipath_dev_err(dd, "Unable to set tempsense config\n");
1022 ret = -1;
1023 goto bail;
1024 }
1025 stat = ipath_tempsense_write(dd, 0xB, (u8) (val & 0xFF));
1026 if (stat) {
1027 ipath_dev_err(dd, "Unable to set local Tcrit\n");
1028 ret = -1;
1029 goto bail;
1030 }
1031 stat = ipath_tempsense_write(dd, 0xD, (u8) (val >> 8));
1032 if (stat) {
1033 ipath_dev_err(dd, "Unable to set remote Tcrit\n");
1034 ret = -1;
1035 goto bail;
1036 }
1037
1038bail:
1039 return ret;
1040}
1041
1042/*
1043 * dump tempsense regs. in decimal, to ease shell-scripts.
1044 */
1045static ssize_t show_tempsense(struct device *dev,
1046 struct device_attribute *attr,
1047 char *buf)
1048{
1049 struct ipath_devdata *dd = dev_get_drvdata(dev);
1050 int ret;
1051 int idx;
1052 u8 regvals[8];
1053
1054 ret = -ENXIO;
1055 for (idx = 0; idx < 8; ++idx) {
1056 if (idx == 6)
1057 continue;
1058 ret = ipath_tempsense_read(dd, idx);
1059 if (ret < 0)
1060 break;
1061 regvals[idx] = ret;
1062 }
1063 if (idx == 8)
1064 ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
1065 *(signed char *)(regvals),
1066 *(signed char *)(regvals + 1),
1067 regvals[2], regvals[3],
1068 *(signed char *)(regvals + 5),
1069 *(signed char *)(regvals + 7));
1070 return ret;
1071}
1072
1073const struct attribute_group *ipath_driver_attr_groups[] = {
1074 &driver_attr_group,
1075 NULL,
1076};
1077
1078static DEVICE_ATTR(guid, S_IWUSR | S_IRUGO, show_guid, store_guid);
1079static DEVICE_ATTR(lmc, S_IWUSR | S_IRUGO, show_lmc, store_lmc);
1080static DEVICE_ATTR(lid, S_IWUSR | S_IRUGO, show_lid, store_lid);
1081static DEVICE_ATTR(link_state, S_IWUSR, NULL, store_link_state);
1082static DEVICE_ATTR(mlid, S_IWUSR | S_IRUGO, show_mlid, store_mlid);
1083static DEVICE_ATTR(mtu, S_IWUSR | S_IRUGO, show_mtu, store_mtu);
1084static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO, show_enabled, store_enabled);
1085static DEVICE_ATTR(nguid, S_IRUGO, show_nguid, NULL);
1086static DEVICE_ATTR(nports, S_IRUGO, show_nports, NULL);
1087static DEVICE_ATTR(reset, S_IWUSR, NULL, store_reset);
1088static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
1089static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1090static DEVICE_ATTR(status_str, S_IRUGO, show_status_str, NULL);
1091static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
1092static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL);
1093static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv);
1094static DEVICE_ATTR(led_override, S_IWUSR, NULL, store_led_override);
1095static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
1096static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
1097static DEVICE_ATTR(jint_max_packets, S_IWUSR | S_IRUGO,
1098 show_jint_max_packets, store_jint_max_packets);
1099static DEVICE_ATTR(jint_idle_ticks, S_IWUSR | S_IRUGO,
1100 show_jint_idle_ticks, store_jint_idle_ticks);
1101static DEVICE_ATTR(tempsense, S_IWUSR | S_IRUGO,
1102 show_tempsense, store_tempsense);
1103
1104static struct attribute *dev_attributes[] = {
1105 &dev_attr_guid.attr,
1106 &dev_attr_lmc.attr,
1107 &dev_attr_lid.attr,
1108 &dev_attr_link_state.attr,
1109 &dev_attr_mlid.attr,
1110 &dev_attr_mtu.attr,
1111 &dev_attr_nguid.attr,
1112 &dev_attr_nports.attr,
1113 &dev_attr_serial.attr,
1114 &dev_attr_status.attr,
1115 &dev_attr_status_str.attr,
1116 &dev_attr_boardversion.attr,
1117 &dev_attr_unit.attr,
1118 &dev_attr_enabled.attr,
1119 &dev_attr_rx_pol_inv.attr,
1120 &dev_attr_led_override.attr,
1121 &dev_attr_logged_errors.attr,
1122 &dev_attr_tempsense.attr,
1123 &dev_attr_localbus_info.attr,
1124 NULL
1125};
1126
1127static struct attribute_group dev_attr_group = {
1128 .attrs = dev_attributes
1129};
1130
1131static DEVICE_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
1132 store_hrtbt_enb);
1133static DEVICE_ATTR(link_width_enable, S_IWUSR | S_IRUGO, show_lwid_enb,
1134 store_lwid_enb);
1135static DEVICE_ATTR(link_width, S_IRUGO, show_lwid, NULL);
1136static DEVICE_ATTR(link_speed_enable, S_IWUSR | S_IRUGO, show_spd_enb,
1137 store_spd_enb);
1138static DEVICE_ATTR(link_speed, S_IRUGO, show_spd, NULL);
1139static DEVICE_ATTR(rx_pol_inv_enable, S_IWUSR | S_IRUGO, show_rx_polinv_enb,
1140 store_rx_polinv_enb);
1141static DEVICE_ATTR(rx_lane_rev_enable, S_IWUSR | S_IRUGO, show_lanerev_enb,
1142 store_lanerev_enb);
1143
1144static struct attribute *dev_ibcfg_attributes[] = {
1145 &dev_attr_hrtbt_enable.attr,
1146 &dev_attr_link_width_enable.attr,
1147 &dev_attr_link_width.attr,
1148 &dev_attr_link_speed_enable.attr,
1149 &dev_attr_link_speed.attr,
1150 &dev_attr_rx_pol_inv_enable.attr,
1151 &dev_attr_rx_lane_rev_enable.attr,
1152 NULL
1153};
1154
1155static struct attribute_group dev_ibcfg_attr_group = {
1156 .attrs = dev_ibcfg_attributes
1157};
1158
1159/**
1160 * ipath_expose_reset - create a device reset file
1161 * @dev: the device structure
1162 *
1163 * Only expose a file that lets us reset the device after someone
1164 * enters diag mode. A device reset is quite likely to crash the
1165 * machine entirely, so we don't want to normally make it
1166 * available.
1167 *
1168 * Called with ipath_mutex held.
1169 */
1170int ipath_expose_reset(struct device *dev)
1171{
1172 static int exposed;
1173 int ret;
1174
1175 if (!exposed) {
1176 ret = device_create_file(dev, &dev_attr_reset);
1177 exposed = 1;
1178 } else {
1179 ret = 0;
1180 }
1181
1182 return ret;
1183}
1184
1185int ipath_device_create_group(struct device *dev, struct ipath_devdata *dd)
1186{
1187 int ret;
1188
1189 ret = sysfs_create_group(&dev->kobj, &dev_attr_group);
1190 if (ret)
1191 goto bail;
1192
1193 ret = sysfs_create_group(&dev->kobj, &dev_counter_attr_group);
1194 if (ret)
1195 goto bail_attrs;
1196
1197 if (dd->ipath_flags & IPATH_HAS_MULT_IB_SPEED) {
1198 ret = device_create_file(dev, &dev_attr_jint_idle_ticks);
1199 if (ret)
1200 goto bail_counter;
1201 ret = device_create_file(dev, &dev_attr_jint_max_packets);
1202 if (ret)
1203 goto bail_idle;
1204
1205 ret = sysfs_create_group(&dev->kobj, &dev_ibcfg_attr_group);
1206 if (ret)
1207 goto bail_max;
1208 }
1209
1210 return 0;
1211
1212bail_max:
1213 device_remove_file(dev, &dev_attr_jint_max_packets);
1214bail_idle:
1215 device_remove_file(dev, &dev_attr_jint_idle_ticks);
1216bail_counter:
1217 sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
1218bail_attrs:
1219 sysfs_remove_group(&dev->kobj, &dev_attr_group);
1220bail:
1221 return ret;
1222}
1223
1224void ipath_device_remove_group(struct device *dev, struct ipath_devdata *dd)
1225{
1226 sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
1227
1228 if (dd->ipath_flags & IPATH_HAS_MULT_IB_SPEED) {
1229 sysfs_remove_group(&dev->kobj, &dev_ibcfg_attr_group);
1230 device_remove_file(dev, &dev_attr_jint_idle_ticks);
1231 device_remove_file(dev, &dev_attr_jint_max_packets);
1232 }
1233
1234 sysfs_remove_group(&dev->kobj, &dev_attr_group);
1235
1236 device_remove_file(dev, &dev_attr_reset);
1237}
diff --git a/drivers/staging/rdma/ipath/ipath_uc.c b/drivers/staging/rdma/ipath/ipath_uc.c
deleted file mode 100644
index 0246b30280b9..000000000000
--- a/drivers/staging/rdma/ipath/ipath_uc.c
+++ /dev/null
@@ -1,547 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include "ipath_verbs.h"
35#include "ipath_kernel.h"
36
37/* cut down ridiculously long IB macro names */
38#define OP(x) IB_OPCODE_UC_##x
39
40/**
41 * ipath_make_uc_req - construct a request packet (SEND, RDMA write)
42 * @qp: a pointer to the QP
43 *
44 * Return 1 if constructed; otherwise, return 0.
45 */
46int ipath_make_uc_req(struct ipath_qp *qp)
47{
48 struct ipath_other_headers *ohdr;
49 struct ipath_swqe *wqe;
50 unsigned long flags;
51 u32 hwords;
52 u32 bth0;
53 u32 len;
54 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
55 int ret = 0;
56
57 spin_lock_irqsave(&qp->s_lock, flags);
58
59 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) {
60 if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
61 goto bail;
62 /* We are in the error state, flush the work request. */
63 if (qp->s_last == qp->s_head)
64 goto bail;
65 /* If DMAs are in progress, we can't flush immediately. */
66 if (atomic_read(&qp->s_dma_busy)) {
67 qp->s_flags |= IPATH_S_WAIT_DMA;
68 goto bail;
69 }
70 wqe = get_swqe_ptr(qp, qp->s_last);
71 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
72 goto done;
73 }
74
75 ohdr = &qp->s_hdr.u.oth;
76 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
77 ohdr = &qp->s_hdr.u.l.oth;
78
79 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
80 hwords = 5;
81 bth0 = 1 << 22; /* Set M bit */
82
83 /* Get the next send request. */
84 wqe = get_swqe_ptr(qp, qp->s_cur);
85 qp->s_wqe = NULL;
86 switch (qp->s_state) {
87 default:
88 if (!(ib_ipath_state_ops[qp->state] &
89 IPATH_PROCESS_NEXT_SEND_OK))
90 goto bail;
91 /* Check if send work queue is empty. */
92 if (qp->s_cur == qp->s_head)
93 goto bail;
94 /*
95 * Start a new request.
96 */
97 qp->s_psn = wqe->psn = qp->s_next_psn;
98 qp->s_sge.sge = wqe->sg_list[0];
99 qp->s_sge.sg_list = wqe->sg_list + 1;
100 qp->s_sge.num_sge = wqe->wr.num_sge;
101 qp->s_len = len = wqe->length;
102 switch (wqe->wr.opcode) {
103 case IB_WR_SEND:
104 case IB_WR_SEND_WITH_IMM:
105 if (len > pmtu) {
106 qp->s_state = OP(SEND_FIRST);
107 len = pmtu;
108 break;
109 }
110 if (wqe->wr.opcode == IB_WR_SEND)
111 qp->s_state = OP(SEND_ONLY);
112 else {
113 qp->s_state =
114 OP(SEND_ONLY_WITH_IMMEDIATE);
115 /* Immediate data comes after the BTH */
116 ohdr->u.imm_data = wqe->wr.ex.imm_data;
117 hwords += 1;
118 }
119 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
120 bth0 |= 1 << 23;
121 qp->s_wqe = wqe;
122 if (++qp->s_cur >= qp->s_size)
123 qp->s_cur = 0;
124 break;
125
126 case IB_WR_RDMA_WRITE:
127 case IB_WR_RDMA_WRITE_WITH_IMM:
128 ohdr->u.rc.reth.vaddr =
129 cpu_to_be64(wqe->rdma_wr.remote_addr);
130 ohdr->u.rc.reth.rkey =
131 cpu_to_be32(wqe->rdma_wr.rkey);
132 ohdr->u.rc.reth.length = cpu_to_be32(len);
133 hwords += sizeof(struct ib_reth) / 4;
134 if (len > pmtu) {
135 qp->s_state = OP(RDMA_WRITE_FIRST);
136 len = pmtu;
137 break;
138 }
139 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
140 qp->s_state = OP(RDMA_WRITE_ONLY);
141 else {
142 qp->s_state =
143 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
144 /* Immediate data comes after the RETH */
145 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
146 hwords += 1;
147 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
148 bth0 |= 1 << 23;
149 }
150 qp->s_wqe = wqe;
151 if (++qp->s_cur >= qp->s_size)
152 qp->s_cur = 0;
153 break;
154
155 default:
156 goto bail;
157 }
158 break;
159
160 case OP(SEND_FIRST):
161 qp->s_state = OP(SEND_MIDDLE);
162 /* FALLTHROUGH */
163 case OP(SEND_MIDDLE):
164 len = qp->s_len;
165 if (len > pmtu) {
166 len = pmtu;
167 break;
168 }
169 if (wqe->wr.opcode == IB_WR_SEND)
170 qp->s_state = OP(SEND_LAST);
171 else {
172 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
173 /* Immediate data comes after the BTH */
174 ohdr->u.imm_data = wqe->wr.ex.imm_data;
175 hwords += 1;
176 }
177 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
178 bth0 |= 1 << 23;
179 qp->s_wqe = wqe;
180 if (++qp->s_cur >= qp->s_size)
181 qp->s_cur = 0;
182 break;
183
184 case OP(RDMA_WRITE_FIRST):
185 qp->s_state = OP(RDMA_WRITE_MIDDLE);
186 /* FALLTHROUGH */
187 case OP(RDMA_WRITE_MIDDLE):
188 len = qp->s_len;
189 if (len > pmtu) {
190 len = pmtu;
191 break;
192 }
193 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
194 qp->s_state = OP(RDMA_WRITE_LAST);
195 else {
196 qp->s_state =
197 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
198 /* Immediate data comes after the BTH */
199 ohdr->u.imm_data = wqe->wr.ex.imm_data;
200 hwords += 1;
201 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
202 bth0 |= 1 << 23;
203 }
204 qp->s_wqe = wqe;
205 if (++qp->s_cur >= qp->s_size)
206 qp->s_cur = 0;
207 break;
208 }
209 qp->s_len -= len;
210 qp->s_hdrwords = hwords;
211 qp->s_cur_sge = &qp->s_sge;
212 qp->s_cur_size = len;
213 ipath_make_ruc_header(to_idev(qp->ibqp.device),
214 qp, ohdr, bth0 | (qp->s_state << 24),
215 qp->s_next_psn++ & IPATH_PSN_MASK);
216done:
217 ret = 1;
218 goto unlock;
219
220bail:
221 qp->s_flags &= ~IPATH_S_BUSY;
222unlock:
223 spin_unlock_irqrestore(&qp->s_lock, flags);
224 return ret;
225}
226
227/**
228 * ipath_uc_rcv - handle an incoming UC packet
229 * @dev: the device the packet came in on
230 * @hdr: the header of the packet
231 * @has_grh: true if the packet has a GRH
232 * @data: the packet data
233 * @tlen: the length of the packet
234 * @qp: the QP for this packet.
235 *
236 * This is called from ipath_qp_rcv() to process an incoming UC packet
237 * for the given QP.
238 * Called at interrupt level.
239 */
240void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
241 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
242{
243 struct ipath_other_headers *ohdr;
244 int opcode;
245 u32 hdrsize;
246 u32 psn;
247 u32 pad;
248 struct ib_wc wc;
249 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
250 struct ib_reth *reth;
251 int header_in_data;
252
253 /* Validate the SLID. See Ch. 9.6.1.5 */
254 if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
255 goto done;
256
257 /* Check for GRH */
258 if (!has_grh) {
259 ohdr = &hdr->u.oth;
260 hdrsize = 8 + 12; /* LRH + BTH */
261 psn = be32_to_cpu(ohdr->bth[2]);
262 header_in_data = 0;
263 } else {
264 ohdr = &hdr->u.l.oth;
265 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
266 /*
267 * The header with GRH is 60 bytes and the
268 * core driver sets the eager header buffer
269 * size to 56 bytes so the last 4 bytes of
270 * the BTH header (PSN) is in the data buffer.
271 */
272 header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
273 if (header_in_data) {
274 psn = be32_to_cpu(((__be32 *) data)[0]);
275 data += sizeof(__be32);
276 } else
277 psn = be32_to_cpu(ohdr->bth[2]);
278 }
279 /*
280 * The opcode is in the low byte when its in network order
281 * (top byte when in host order).
282 */
283 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
284
285 memset(&wc, 0, sizeof wc);
286
287 /* Compare the PSN verses the expected PSN. */
288 if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) {
289 /*
290 * Handle a sequence error.
291 * Silently drop any current message.
292 */
293 qp->r_psn = psn;
294 inv:
295 qp->r_state = OP(SEND_LAST);
296 switch (opcode) {
297 case OP(SEND_FIRST):
298 case OP(SEND_ONLY):
299 case OP(SEND_ONLY_WITH_IMMEDIATE):
300 goto send_first;
301
302 case OP(RDMA_WRITE_FIRST):
303 case OP(RDMA_WRITE_ONLY):
304 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
305 goto rdma_first;
306
307 default:
308 dev->n_pkt_drops++;
309 goto done;
310 }
311 }
312
313 /* Check for opcode sequence errors. */
314 switch (qp->r_state) {
315 case OP(SEND_FIRST):
316 case OP(SEND_MIDDLE):
317 if (opcode == OP(SEND_MIDDLE) ||
318 opcode == OP(SEND_LAST) ||
319 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
320 break;
321 goto inv;
322
323 case OP(RDMA_WRITE_FIRST):
324 case OP(RDMA_WRITE_MIDDLE):
325 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
326 opcode == OP(RDMA_WRITE_LAST) ||
327 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
328 break;
329 goto inv;
330
331 default:
332 if (opcode == OP(SEND_FIRST) ||
333 opcode == OP(SEND_ONLY) ||
334 opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
335 opcode == OP(RDMA_WRITE_FIRST) ||
336 opcode == OP(RDMA_WRITE_ONLY) ||
337 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
338 break;
339 goto inv;
340 }
341
342 /* OK, process the packet. */
343 switch (opcode) {
344 case OP(SEND_FIRST):
345 case OP(SEND_ONLY):
346 case OP(SEND_ONLY_WITH_IMMEDIATE):
347 send_first:
348 if (qp->r_flags & IPATH_R_REUSE_SGE) {
349 qp->r_flags &= ~IPATH_R_REUSE_SGE;
350 qp->r_sge = qp->s_rdma_read_sge;
351 } else if (!ipath_get_rwqe(qp, 0)) {
352 dev->n_pkt_drops++;
353 goto done;
354 }
355 /* Save the WQE so we can reuse it in case of an error. */
356 qp->s_rdma_read_sge = qp->r_sge;
357 qp->r_rcv_len = 0;
358 if (opcode == OP(SEND_ONLY))
359 goto send_last;
360 else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
361 goto send_last_imm;
362 /* FALLTHROUGH */
363 case OP(SEND_MIDDLE):
364 /* Check for invalid length PMTU or posted rwqe len. */
365 if (unlikely(tlen != (hdrsize + pmtu + 4))) {
366 qp->r_flags |= IPATH_R_REUSE_SGE;
367 dev->n_pkt_drops++;
368 goto done;
369 }
370 qp->r_rcv_len += pmtu;
371 if (unlikely(qp->r_rcv_len > qp->r_len)) {
372 qp->r_flags |= IPATH_R_REUSE_SGE;
373 dev->n_pkt_drops++;
374 goto done;
375 }
376 ipath_copy_sge(&qp->r_sge, data, pmtu);
377 break;
378
379 case OP(SEND_LAST_WITH_IMMEDIATE):
380 send_last_imm:
381 if (header_in_data) {
382 wc.ex.imm_data = *(__be32 *) data;
383 data += sizeof(__be32);
384 } else {
385 /* Immediate data comes after BTH */
386 wc.ex.imm_data = ohdr->u.imm_data;
387 }
388 hdrsize += 4;
389 wc.wc_flags = IB_WC_WITH_IMM;
390 /* FALLTHROUGH */
391 case OP(SEND_LAST):
392 send_last:
393 /* Get the number of bytes the message was padded by. */
394 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
395 /* Check for invalid length. */
396 /* XXX LAST len should be >= 1 */
397 if (unlikely(tlen < (hdrsize + pad + 4))) {
398 qp->r_flags |= IPATH_R_REUSE_SGE;
399 dev->n_pkt_drops++;
400 goto done;
401 }
402 /* Don't count the CRC. */
403 tlen -= (hdrsize + pad + 4);
404 wc.byte_len = tlen + qp->r_rcv_len;
405 if (unlikely(wc.byte_len > qp->r_len)) {
406 qp->r_flags |= IPATH_R_REUSE_SGE;
407 dev->n_pkt_drops++;
408 goto done;
409 }
410 wc.opcode = IB_WC_RECV;
411 last_imm:
412 ipath_copy_sge(&qp->r_sge, data, tlen);
413 wc.wr_id = qp->r_wr_id;
414 wc.status = IB_WC_SUCCESS;
415 wc.qp = &qp->ibqp;
416 wc.src_qp = qp->remote_qpn;
417 wc.slid = qp->remote_ah_attr.dlid;
418 wc.sl = qp->remote_ah_attr.sl;
419 /* Signal completion event if the solicited bit is set. */
420 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
421 (ohdr->bth[0] &
422 cpu_to_be32(1 << 23)) != 0);
423 break;
424
425 case OP(RDMA_WRITE_FIRST):
426 case OP(RDMA_WRITE_ONLY):
427 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
428 rdma_first:
429 /* RETH comes after BTH */
430 if (!header_in_data)
431 reth = &ohdr->u.rc.reth;
432 else {
433 reth = (struct ib_reth *)data;
434 data += sizeof(*reth);
435 }
436 hdrsize += sizeof(*reth);
437 qp->r_len = be32_to_cpu(reth->length);
438 qp->r_rcv_len = 0;
439 if (qp->r_len != 0) {
440 u32 rkey = be32_to_cpu(reth->rkey);
441 u64 vaddr = be64_to_cpu(reth->vaddr);
442 int ok;
443
444 /* Check rkey */
445 ok = ipath_rkey_ok(qp, &qp->r_sge, qp->r_len,
446 vaddr, rkey,
447 IB_ACCESS_REMOTE_WRITE);
448 if (unlikely(!ok)) {
449 dev->n_pkt_drops++;
450 goto done;
451 }
452 } else {
453 qp->r_sge.sg_list = NULL;
454 qp->r_sge.sge.mr = NULL;
455 qp->r_sge.sge.vaddr = NULL;
456 qp->r_sge.sge.length = 0;
457 qp->r_sge.sge.sge_length = 0;
458 }
459 if (unlikely(!(qp->qp_access_flags &
460 IB_ACCESS_REMOTE_WRITE))) {
461 dev->n_pkt_drops++;
462 goto done;
463 }
464 if (opcode == OP(RDMA_WRITE_ONLY))
465 goto rdma_last;
466 else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
467 goto rdma_last_imm;
468 /* FALLTHROUGH */
469 case OP(RDMA_WRITE_MIDDLE):
470 /* Check for invalid length PMTU or posted rwqe len. */
471 if (unlikely(tlen != (hdrsize + pmtu + 4))) {
472 dev->n_pkt_drops++;
473 goto done;
474 }
475 qp->r_rcv_len += pmtu;
476 if (unlikely(qp->r_rcv_len > qp->r_len)) {
477 dev->n_pkt_drops++;
478 goto done;
479 }
480 ipath_copy_sge(&qp->r_sge, data, pmtu);
481 break;
482
483 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
484 rdma_last_imm:
485 if (header_in_data) {
486 wc.ex.imm_data = *(__be32 *) data;
487 data += sizeof(__be32);
488 } else {
489 /* Immediate data comes after BTH */
490 wc.ex.imm_data = ohdr->u.imm_data;
491 }
492 hdrsize += 4;
493 wc.wc_flags = IB_WC_WITH_IMM;
494
495 /* Get the number of bytes the message was padded by. */
496 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
497 /* Check for invalid length. */
498 /* XXX LAST len should be >= 1 */
499 if (unlikely(tlen < (hdrsize + pad + 4))) {
500 dev->n_pkt_drops++;
501 goto done;
502 }
503 /* Don't count the CRC. */
504 tlen -= (hdrsize + pad + 4);
505 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
506 dev->n_pkt_drops++;
507 goto done;
508 }
509 if (qp->r_flags & IPATH_R_REUSE_SGE)
510 qp->r_flags &= ~IPATH_R_REUSE_SGE;
511 else if (!ipath_get_rwqe(qp, 1)) {
512 dev->n_pkt_drops++;
513 goto done;
514 }
515 wc.byte_len = qp->r_len;
516 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
517 goto last_imm;
518
519 case OP(RDMA_WRITE_LAST):
520 rdma_last:
521 /* Get the number of bytes the message was padded by. */
522 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
523 /* Check for invalid length. */
524 /* XXX LAST len should be >= 1 */
525 if (unlikely(tlen < (hdrsize + pad + 4))) {
526 dev->n_pkt_drops++;
527 goto done;
528 }
529 /* Don't count the CRC. */
530 tlen -= (hdrsize + pad + 4);
531 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
532 dev->n_pkt_drops++;
533 goto done;
534 }
535 ipath_copy_sge(&qp->r_sge, data, tlen);
536 break;
537
538 default:
539 /* Drop packet for unknown opcodes. */
540 dev->n_pkt_drops++;
541 goto done;
542 }
543 qp->r_psn++;
544 qp->r_state = opcode;
545done:
546 return;
547}
diff --git a/drivers/staging/rdma/ipath/ipath_ud.c b/drivers/staging/rdma/ipath/ipath_ud.c
deleted file mode 100644
index 385d9410a51e..000000000000
--- a/drivers/staging/rdma/ipath/ipath_ud.c
+++ /dev/null
@@ -1,579 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <rdma/ib_smi.h>
35
36#include "ipath_verbs.h"
37#include "ipath_kernel.h"
38
39/**
40 * ipath_ud_loopback - handle send on loopback QPs
41 * @sqp: the sending QP
42 * @swqe: the send work request
43 *
44 * This is called from ipath_make_ud_req() to forward a WQE addressed
45 * to the same HCA.
46 * Note that the receive interrupt handler may be calling ipath_ud_rcv()
47 * while this is being called.
48 */
49static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
50{
51 struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
52 struct ipath_qp *qp;
53 struct ib_ah_attr *ah_attr;
54 unsigned long flags;
55 struct ipath_rq *rq;
56 struct ipath_srq *srq;
57 struct ipath_sge_state rsge;
58 struct ipath_sge *sge;
59 struct ipath_rwq *wq;
60 struct ipath_rwqe *wqe;
61 void (*handler)(struct ib_event *, void *);
62 struct ib_wc wc;
63 u32 tail;
64 u32 rlen;
65 u32 length;
66
67 qp = ipath_lookup_qpn(&dev->qp_table, swqe->ud_wr.remote_qpn);
68 if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
69 dev->n_pkt_drops++;
70 goto done;
71 }
72
73 /*
74 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
75 * Qkeys with the high order bit set mean use the
76 * qkey from the QP context instead of the WR (see 10.2.5).
77 */
78 if (unlikely(qp->ibqp.qp_num &&
79 ((int) swqe->ud_wr.remote_qkey < 0 ?
80 sqp->qkey : swqe->ud_wr.remote_qkey) != qp->qkey)) {
81 /* XXX OK to lose a count once in a while. */
82 dev->qkey_violations++;
83 dev->n_pkt_drops++;
84 goto drop;
85 }
86
87 /*
88 * A GRH is expected to precede the data even if not
89 * present on the wire.
90 */
91 length = swqe->length;
92 memset(&wc, 0, sizeof wc);
93 wc.byte_len = length + sizeof(struct ib_grh);
94
95 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
96 wc.wc_flags = IB_WC_WITH_IMM;
97 wc.ex.imm_data = swqe->wr.ex.imm_data;
98 }
99
100 /*
101 * This would be a lot simpler if we could call ipath_get_rwqe()
102 * but that uses state that the receive interrupt handler uses
103 * so we would need to lock out receive interrupts while doing
104 * local loopback.
105 */
106 if (qp->ibqp.srq) {
107 srq = to_isrq(qp->ibqp.srq);
108 handler = srq->ibsrq.event_handler;
109 rq = &srq->rq;
110 } else {
111 srq = NULL;
112 handler = NULL;
113 rq = &qp->r_rq;
114 }
115
116 /*
117 * Get the next work request entry to find where to put the data.
118 * Note that it is safe to drop the lock after changing rq->tail
119 * since ipath_post_receive() won't fill the empty slot.
120 */
121 spin_lock_irqsave(&rq->lock, flags);
122 wq = rq->wq;
123 tail = wq->tail;
124 /* Validate tail before using it since it is user writable. */
125 if (tail >= rq->size)
126 tail = 0;
127 if (unlikely(tail == wq->head)) {
128 spin_unlock_irqrestore(&rq->lock, flags);
129 dev->n_pkt_drops++;
130 goto drop;
131 }
132 wqe = get_rwqe_ptr(rq, tail);
133 rsge.sg_list = qp->r_ud_sg_list;
134 if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) {
135 spin_unlock_irqrestore(&rq->lock, flags);
136 dev->n_pkt_drops++;
137 goto drop;
138 }
139 /* Silently drop packets which are too big. */
140 if (wc.byte_len > rlen) {
141 spin_unlock_irqrestore(&rq->lock, flags);
142 dev->n_pkt_drops++;
143 goto drop;
144 }
145 if (++tail >= rq->size)
146 tail = 0;
147 wq->tail = tail;
148 wc.wr_id = wqe->wr_id;
149 if (handler) {
150 u32 n;
151
152 /*
153 * validate head pointer value and compute
154 * the number of remaining WQEs.
155 */
156 n = wq->head;
157 if (n >= rq->size)
158 n = 0;
159 if (n < tail)
160 n += rq->size - tail;
161 else
162 n -= tail;
163 if (n < srq->limit) {
164 struct ib_event ev;
165
166 srq->limit = 0;
167 spin_unlock_irqrestore(&rq->lock, flags);
168 ev.device = qp->ibqp.device;
169 ev.element.srq = qp->ibqp.srq;
170 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
171 handler(&ev, srq->ibsrq.srq_context);
172 } else
173 spin_unlock_irqrestore(&rq->lock, flags);
174 } else
175 spin_unlock_irqrestore(&rq->lock, flags);
176
177 ah_attr = &to_iah(swqe->ud_wr.ah)->attr;
178 if (ah_attr->ah_flags & IB_AH_GRH) {
179 ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh));
180 wc.wc_flags |= IB_WC_GRH;
181 } else
182 ipath_skip_sge(&rsge, sizeof(struct ib_grh));
183 sge = swqe->sg_list;
184 while (length) {
185 u32 len = sge->length;
186
187 if (len > length)
188 len = length;
189 if (len > sge->sge_length)
190 len = sge->sge_length;
191 BUG_ON(len == 0);
192 ipath_copy_sge(&rsge, sge->vaddr, len);
193 sge->vaddr += len;
194 sge->length -= len;
195 sge->sge_length -= len;
196 if (sge->sge_length == 0) {
197 if (--swqe->wr.num_sge)
198 sge++;
199 } else if (sge->length == 0 && sge->mr != NULL) {
200 if (++sge->n >= IPATH_SEGSZ) {
201 if (++sge->m >= sge->mr->mapsz)
202 break;
203 sge->n = 0;
204 }
205 sge->vaddr =
206 sge->mr->map[sge->m]->segs[sge->n].vaddr;
207 sge->length =
208 sge->mr->map[sge->m]->segs[sge->n].length;
209 }
210 length -= len;
211 }
212 wc.status = IB_WC_SUCCESS;
213 wc.opcode = IB_WC_RECV;
214 wc.qp = &qp->ibqp;
215 wc.src_qp = sqp->ibqp.qp_num;
216 /* XXX do we know which pkey matched? Only needed for GSI. */
217 wc.pkey_index = 0;
218 wc.slid = dev->dd->ipath_lid |
219 (ah_attr->src_path_bits &
220 ((1 << dev->dd->ipath_lmc) - 1));
221 wc.sl = ah_attr->sl;
222 wc.dlid_path_bits =
223 ah_attr->dlid & ((1 << dev->dd->ipath_lmc) - 1);
224 wc.port_num = 1;
225 /* Signal completion event if the solicited bit is set. */
226 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
227 swqe->ud_wr.wr.send_flags & IB_SEND_SOLICITED);
228drop:
229 if (atomic_dec_and_test(&qp->refcount))
230 wake_up(&qp->wait);
231done:;
232}
233
234/**
235 * ipath_make_ud_req - construct a UD request packet
236 * @qp: the QP
237 *
238 * Return 1 if constructed; otherwise, return 0.
239 */
240int ipath_make_ud_req(struct ipath_qp *qp)
241{
242 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
243 struct ipath_other_headers *ohdr;
244 struct ib_ah_attr *ah_attr;
245 struct ipath_swqe *wqe;
246 unsigned long flags;
247 u32 nwords;
248 u32 extra_bytes;
249 u32 bth0;
250 u16 lrh0;
251 u16 lid;
252 int ret = 0;
253 int next_cur;
254
255 spin_lock_irqsave(&qp->s_lock, flags);
256
257 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_NEXT_SEND_OK)) {
258 if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
259 goto bail;
260 /* We are in the error state, flush the work request. */
261 if (qp->s_last == qp->s_head)
262 goto bail;
263 /* If DMAs are in progress, we can't flush immediately. */
264 if (atomic_read(&qp->s_dma_busy)) {
265 qp->s_flags |= IPATH_S_WAIT_DMA;
266 goto bail;
267 }
268 wqe = get_swqe_ptr(qp, qp->s_last);
269 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
270 goto done;
271 }
272
273 if (qp->s_cur == qp->s_head)
274 goto bail;
275
276 wqe = get_swqe_ptr(qp, qp->s_cur);
277 next_cur = qp->s_cur + 1;
278 if (next_cur >= qp->s_size)
279 next_cur = 0;
280
281 /* Construct the header. */
282 ah_attr = &to_iah(wqe->ud_wr.ah)->attr;
283 if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE) {
284 if (ah_attr->dlid != IPATH_PERMISSIVE_LID)
285 dev->n_multicast_xmit++;
286 else
287 dev->n_unicast_xmit++;
288 } else {
289 dev->n_unicast_xmit++;
290 lid = ah_attr->dlid & ~((1 << dev->dd->ipath_lmc) - 1);
291 if (unlikely(lid == dev->dd->ipath_lid)) {
292 /*
293 * If DMAs are in progress, we can't generate
294 * a completion for the loopback packet since
295 * it would be out of order.
296 * XXX Instead of waiting, we could queue a
297 * zero length descriptor so we get a callback.
298 */
299 if (atomic_read(&qp->s_dma_busy)) {
300 qp->s_flags |= IPATH_S_WAIT_DMA;
301 goto bail;
302 }
303 qp->s_cur = next_cur;
304 spin_unlock_irqrestore(&qp->s_lock, flags);
305 ipath_ud_loopback(qp, wqe);
306 spin_lock_irqsave(&qp->s_lock, flags);
307 ipath_send_complete(qp, wqe, IB_WC_SUCCESS);
308 goto done;
309 }
310 }
311
312 qp->s_cur = next_cur;
313 extra_bytes = -wqe->length & 3;
314 nwords = (wqe->length + extra_bytes) >> 2;
315
316 /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
317 qp->s_hdrwords = 7;
318 qp->s_cur_size = wqe->length;
319 qp->s_cur_sge = &qp->s_sge;
320 qp->s_dmult = ah_attr->static_rate;
321 qp->s_wqe = wqe;
322 qp->s_sge.sge = wqe->sg_list[0];
323 qp->s_sge.sg_list = wqe->sg_list + 1;
324 qp->s_sge.num_sge = wqe->ud_wr.wr.num_sge;
325
326 if (ah_attr->ah_flags & IB_AH_GRH) {
327 /* Header size in 32-bit words. */
328 qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
329 &ah_attr->grh,
330 qp->s_hdrwords, nwords);
331 lrh0 = IPATH_LRH_GRH;
332 ohdr = &qp->s_hdr.u.l.oth;
333 /*
334 * Don't worry about sending to locally attached multicast
335 * QPs. It is unspecified by the spec. what happens.
336 */
337 } else {
338 /* Header size in 32-bit words. */
339 lrh0 = IPATH_LRH_BTH;
340 ohdr = &qp->s_hdr.u.oth;
341 }
342 if (wqe->ud_wr.wr.opcode == IB_WR_SEND_WITH_IMM) {
343 qp->s_hdrwords++;
344 ohdr->u.ud.imm_data = wqe->ud_wr.wr.ex.imm_data;
345 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
346 } else
347 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
348 lrh0 |= ah_attr->sl << 4;
349 if (qp->ibqp.qp_type == IB_QPT_SMI)
350 lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
351 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
352 qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
353 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
354 SIZE_OF_CRC);
355 lid = dev->dd->ipath_lid;
356 if (lid) {
357 lid |= ah_attr->src_path_bits &
358 ((1 << dev->dd->ipath_lmc) - 1);
359 qp->s_hdr.lrh[3] = cpu_to_be16(lid);
360 } else
361 qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
362 if (wqe->ud_wr.wr.send_flags & IB_SEND_SOLICITED)
363 bth0 |= 1 << 23;
364 bth0 |= extra_bytes << 20;
365 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY :
366 ipath_get_pkey(dev->dd, qp->s_pkey_index);
367 ohdr->bth[0] = cpu_to_be32(bth0);
368 /*
369 * Use the multicast QP if the destination LID is a multicast LID.
370 */
371 ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
372 ah_attr->dlid != IPATH_PERMISSIVE_LID ?
373 cpu_to_be32(IPATH_MULTICAST_QPN) :
374 cpu_to_be32(wqe->ud_wr.remote_qpn);
375 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK);
376 /*
377 * Qkeys with the high order bit set mean use the
378 * qkey from the QP context instead of the WR (see 10.2.5).
379 */
380 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
381 qp->qkey : wqe->ud_wr.remote_qkey);
382 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
383
384done:
385 ret = 1;
386 goto unlock;
387
388bail:
389 qp->s_flags &= ~IPATH_S_BUSY;
390unlock:
391 spin_unlock_irqrestore(&qp->s_lock, flags);
392 return ret;
393}
394
395/**
396 * ipath_ud_rcv - receive an incoming UD packet
397 * @dev: the device the packet came in on
398 * @hdr: the packet header
399 * @has_grh: true if the packet has a GRH
400 * @data: the packet data
401 * @tlen: the packet length
402 * @qp: the QP the packet came on
403 *
404 * This is called from ipath_qp_rcv() to process an incoming UD packet
405 * for the given QP.
406 * Called at interrupt level.
407 */
408void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
409 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
410{
411 struct ipath_other_headers *ohdr;
412 int opcode;
413 u32 hdrsize;
414 u32 pad;
415 struct ib_wc wc;
416 u32 qkey;
417 u32 src_qp;
418 u16 dlid;
419 int header_in_data;
420
421 /* Check for GRH */
422 if (!has_grh) {
423 ohdr = &hdr->u.oth;
424 hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
425 qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
426 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
427 header_in_data = 0;
428 } else {
429 ohdr = &hdr->u.l.oth;
430 hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
431 /*
432 * The header with GRH is 68 bytes and the core driver sets
433 * the eager header buffer size to 56 bytes so the last 12
434 * bytes of the IB header is in the data buffer.
435 */
436 header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
437 if (header_in_data) {
438 qkey = be32_to_cpu(((__be32 *) data)[1]);
439 src_qp = be32_to_cpu(((__be32 *) data)[2]);
440 data += 12;
441 } else {
442 qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
443 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
444 }
445 }
446 src_qp &= IPATH_QPN_MASK;
447
448 /*
449 * Check that the permissive LID is only used on QP0
450 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
451 */
452 if (qp->ibqp.qp_num) {
453 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
454 hdr->lrh[3] == IB_LID_PERMISSIVE)) {
455 dev->n_pkt_drops++;
456 goto bail;
457 }
458 if (unlikely(qkey != qp->qkey)) {
459 /* XXX OK to lose a count once in a while. */
460 dev->qkey_violations++;
461 dev->n_pkt_drops++;
462 goto bail;
463 }
464 } else if (hdr->lrh[1] == IB_LID_PERMISSIVE ||
465 hdr->lrh[3] == IB_LID_PERMISSIVE) {
466 struct ib_smp *smp = (struct ib_smp *) data;
467
468 if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
469 dev->n_pkt_drops++;
470 goto bail;
471 }
472 }
473
474 /*
475 * The opcode is in the low byte when its in network order
476 * (top byte when in host order).
477 */
478 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
479 if (qp->ibqp.qp_num > 1 &&
480 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
481 if (header_in_data) {
482 wc.ex.imm_data = *(__be32 *) data;
483 data += sizeof(__be32);
484 } else
485 wc.ex.imm_data = ohdr->u.ud.imm_data;
486 wc.wc_flags = IB_WC_WITH_IMM;
487 hdrsize += sizeof(u32);
488 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
489 wc.ex.imm_data = 0;
490 wc.wc_flags = 0;
491 } else {
492 dev->n_pkt_drops++;
493 goto bail;
494 }
495
496 /* Get the number of bytes the message was padded by. */
497 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
498 if (unlikely(tlen < (hdrsize + pad + 4))) {
499 /* Drop incomplete packets. */
500 dev->n_pkt_drops++;
501 goto bail;
502 }
503 tlen -= hdrsize + pad + 4;
504
505 /* Drop invalid MAD packets (see 13.5.3.1). */
506 if (unlikely((qp->ibqp.qp_num == 0 &&
507 (tlen != 256 ||
508 (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)) ||
509 (qp->ibqp.qp_num == 1 &&
510 (tlen != 256 ||
511 (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))) {
512 dev->n_pkt_drops++;
513 goto bail;
514 }
515
516 /*
517 * A GRH is expected to precede the data even if not
518 * present on the wire.
519 */
520 wc.byte_len = tlen + sizeof(struct ib_grh);
521
522 /*
523 * Get the next work request entry to find where to put the data.
524 */
525 if (qp->r_flags & IPATH_R_REUSE_SGE)
526 qp->r_flags &= ~IPATH_R_REUSE_SGE;
527 else if (!ipath_get_rwqe(qp, 0)) {
528 /*
529 * Count VL15 packets dropped due to no receive buffer.
530 * Otherwise, count them as buffer overruns since usually,
531 * the HW will be able to receive packets even if there are
532 * no QPs with posted receive buffers.
533 */
534 if (qp->ibqp.qp_num == 0)
535 dev->n_vl15_dropped++;
536 else
537 dev->rcv_errors++;
538 goto bail;
539 }
540 /* Silently drop packets which are too big. */
541 if (wc.byte_len > qp->r_len) {
542 qp->r_flags |= IPATH_R_REUSE_SGE;
543 dev->n_pkt_drops++;
544 goto bail;
545 }
546 if (has_grh) {
547 ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh,
548 sizeof(struct ib_grh));
549 wc.wc_flags |= IB_WC_GRH;
550 } else
551 ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh));
552 ipath_copy_sge(&qp->r_sge, data,
553 wc.byte_len - sizeof(struct ib_grh));
554 if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
555 goto bail;
556 wc.wr_id = qp->r_wr_id;
557 wc.status = IB_WC_SUCCESS;
558 wc.opcode = IB_WC_RECV;
559 wc.vendor_err = 0;
560 wc.qp = &qp->ibqp;
561 wc.src_qp = src_qp;
562 /* XXX do we know which pkey matched? Only needed for GSI. */
563 wc.pkey_index = 0;
564 wc.slid = be16_to_cpu(hdr->lrh[3]);
565 wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
566 dlid = be16_to_cpu(hdr->lrh[1]);
567 /*
568 * Save the LMC lower bits if the destination LID is a unicast LID.
569 */
570 wc.dlid_path_bits = dlid >= IPATH_MULTICAST_LID_BASE ? 0 :
571 dlid & ((1 << dev->dd->ipath_lmc) - 1);
572 wc.port_num = 1;
573 /* Signal completion event if the solicited bit is set. */
574 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
575 (ohdr->bth[0] &
576 cpu_to_be32(1 << 23)) != 0);
577
578bail:;
579}
diff --git a/drivers/staging/rdma/ipath/ipath_user_pages.c b/drivers/staging/rdma/ipath/ipath_user_pages.c
deleted file mode 100644
index d29b4daf61f8..000000000000
--- a/drivers/staging/rdma/ipath/ipath_user_pages.c
+++ /dev/null
@@ -1,228 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/mm.h>
35#include <linux/device.h>
36#include <linux/slab.h>
37
38#include "ipath_kernel.h"
39
40static void __ipath_release_user_pages(struct page **p, size_t num_pages,
41 int dirty)
42{
43 size_t i;
44
45 for (i = 0; i < num_pages; i++) {
46 ipath_cdbg(MM, "%lu/%lu put_page %p\n", (unsigned long) i,
47 (unsigned long) num_pages, p[i]);
48 if (dirty)
49 set_page_dirty_lock(p[i]);
50 put_page(p[i]);
51 }
52}
53
54/* call with current->mm->mmap_sem held */
55static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
56 struct page **p)
57{
58 unsigned long lock_limit;
59 size_t got;
60 int ret;
61
62 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
63
64 if (num_pages > lock_limit) {
65 ret = -ENOMEM;
66 goto bail;
67 }
68
69 ipath_cdbg(VERBOSE, "pin %lx pages from vaddr %lx\n",
70 (unsigned long) num_pages, start_page);
71
72 for (got = 0; got < num_pages; got += ret) {
73 ret = get_user_pages(current, current->mm,
74 start_page + got * PAGE_SIZE,
75 num_pages - got, 1, 1,
76 p + got, NULL);
77 if (ret < 0)
78 goto bail_release;
79 }
80
81 current->mm->pinned_vm += num_pages;
82
83 ret = 0;
84 goto bail;
85
86bail_release:
87 __ipath_release_user_pages(p, got, 0);
88bail:
89 return ret;
90}
91
92/**
93 * ipath_map_page - a safety wrapper around pci_map_page()
94 *
95 * A dma_addr of all 0's is interpreted by the chip as "disabled".
96 * Unfortunately, it can also be a valid dma_addr returned on some
97 * architectures.
98 *
99 * The powerpc iommu assigns dma_addrs in ascending order, so we don't
100 * have to bother with retries or mapping a dummy page to insure we
101 * don't just get the same mapping again.
102 *
103 * I'm sure we won't be so lucky with other iommu's, so FIXME.
104 */
105dma_addr_t ipath_map_page(struct pci_dev *hwdev, struct page *page,
106 unsigned long offset, size_t size, int direction)
107{
108 dma_addr_t phys;
109
110 phys = pci_map_page(hwdev, page, offset, size, direction);
111
112 if (phys == 0) {
113 pci_unmap_page(hwdev, phys, size, direction);
114 phys = pci_map_page(hwdev, page, offset, size, direction);
115 /*
116 * FIXME: If we get 0 again, we should keep this page,
117 * map another, then free the 0 page.
118 */
119 }
120
121 return phys;
122}
123
124/**
125 * ipath_map_single - a safety wrapper around pci_map_single()
126 *
127 * Same idea as ipath_map_page().
128 */
129dma_addr_t ipath_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
130 int direction)
131{
132 dma_addr_t phys;
133
134 phys = pci_map_single(hwdev, ptr, size, direction);
135
136 if (phys == 0) {
137 pci_unmap_single(hwdev, phys, size, direction);
138 phys = pci_map_single(hwdev, ptr, size, direction);
139 /*
140 * FIXME: If we get 0 again, we should keep this page,
141 * map another, then free the 0 page.
142 */
143 }
144
145 return phys;
146}
147
148/**
149 * ipath_get_user_pages - lock user pages into memory
150 * @start_page: the start page
151 * @num_pages: the number of pages
152 * @p: the output page structures
153 *
154 * This function takes a given start page (page aligned user virtual
155 * address) and pins it and the following specified number of pages. For
156 * now, num_pages is always 1, but that will probably change at some point
157 * (because caller is doing expected sends on a single virtually contiguous
158 * buffer, so we can do all pages at once).
159 */
160int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
161 struct page **p)
162{
163 int ret;
164
165 down_write(&current->mm->mmap_sem);
166
167 ret = __ipath_get_user_pages(start_page, num_pages, p);
168
169 up_write(&current->mm->mmap_sem);
170
171 return ret;
172}
173
174void ipath_release_user_pages(struct page **p, size_t num_pages)
175{
176 down_write(&current->mm->mmap_sem);
177
178 __ipath_release_user_pages(p, num_pages, 1);
179
180 current->mm->pinned_vm -= num_pages;
181
182 up_write(&current->mm->mmap_sem);
183}
184
185struct ipath_user_pages_work {
186 struct work_struct work;
187 struct mm_struct *mm;
188 unsigned long num_pages;
189};
190
191static void user_pages_account(struct work_struct *_work)
192{
193 struct ipath_user_pages_work *work =
194 container_of(_work, struct ipath_user_pages_work, work);
195
196 down_write(&work->mm->mmap_sem);
197 work->mm->pinned_vm -= work->num_pages;
198 up_write(&work->mm->mmap_sem);
199 mmput(work->mm);
200 kfree(work);
201}
202
203void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
204{
205 struct ipath_user_pages_work *work;
206 struct mm_struct *mm;
207
208 __ipath_release_user_pages(p, num_pages, 1);
209
210 mm = get_task_mm(current);
211 if (!mm)
212 return;
213
214 work = kmalloc(sizeof(*work), GFP_KERNEL);
215 if (!work)
216 goto bail_mm;
217
218 INIT_WORK(&work->work, user_pages_account);
219 work->mm = mm;
220 work->num_pages = num_pages;
221
222 queue_work(ib_wq, &work->work);
223 return;
224
225bail_mm:
226 mmput(mm);
227 return;
228}
diff --git a/drivers/staging/rdma/ipath/ipath_user_sdma.c b/drivers/staging/rdma/ipath/ipath_user_sdma.c
deleted file mode 100644
index 8c12e3cccc58..000000000000
--- a/drivers/staging/rdma/ipath/ipath_user_sdma.c
+++ /dev/null
@@ -1,874 +0,0 @@
1/*
2 * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/mm.h>
33#include <linux/types.h>
34#include <linux/device.h>
35#include <linux/dmapool.h>
36#include <linux/slab.h>
37#include <linux/list.h>
38#include <linux/highmem.h>
39#include <linux/io.h>
40#include <linux/uio.h>
41#include <linux/rbtree.h>
42#include <linux/spinlock.h>
43#include <linux/delay.h>
44
45#include "ipath_kernel.h"
46#include "ipath_user_sdma.h"
47
48/* minimum size of header */
49#define IPATH_USER_SDMA_MIN_HEADER_LENGTH 64
50/* expected size of headers (for dma_pool) */
51#define IPATH_USER_SDMA_EXP_HEADER_LENGTH 64
52/* length mask in PBC (lower 11 bits) */
53#define IPATH_PBC_LENGTH_MASK ((1 << 11) - 1)
54
55struct ipath_user_sdma_pkt {
56 u8 naddr; /* dimension of addr (1..3) ... */
57 u32 counter; /* sdma pkts queued counter for this entry */
58 u64 added; /* global descq number of entries */
59
60 struct {
61 u32 offset; /* offset for kvaddr, addr */
62 u32 length; /* length in page */
63 u8 put_page; /* should we put_page? */
64 u8 dma_mapped; /* is page dma_mapped? */
65 struct page *page; /* may be NULL (coherent mem) */
66 void *kvaddr; /* FIXME: only for pio hack */
67 dma_addr_t addr;
68 } addr[4]; /* max pages, any more and we coalesce */
69 struct list_head list; /* list element */
70};
71
72struct ipath_user_sdma_queue {
73 /*
74 * pkts sent to dma engine are queued on this
75 * list head. the type of the elements of this
76 * list are struct ipath_user_sdma_pkt...
77 */
78 struct list_head sent;
79
80 /* headers with expected length are allocated from here... */
81 char header_cache_name[64];
82 struct dma_pool *header_cache;
83
84 /* packets are allocated from the slab cache... */
85 char pkt_slab_name[64];
86 struct kmem_cache *pkt_slab;
87
88 /* as packets go on the queued queue, they are counted... */
89 u32 counter;
90 u32 sent_counter;
91
92 /* dma page table */
93 struct rb_root dma_pages_root;
94
95 /* protect everything above... */
96 struct mutex lock;
97};
98
99struct ipath_user_sdma_queue *
100ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport)
101{
102 struct ipath_user_sdma_queue *pq =
103 kmalloc(sizeof(struct ipath_user_sdma_queue), GFP_KERNEL);
104
105 if (!pq)
106 goto done;
107
108 pq->counter = 0;
109 pq->sent_counter = 0;
110 INIT_LIST_HEAD(&pq->sent);
111
112 mutex_init(&pq->lock);
113
114 snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
115 "ipath-user-sdma-pkts-%u-%02u.%02u", unit, port, sport);
116 pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
117 sizeof(struct ipath_user_sdma_pkt),
118 0, 0, NULL);
119
120 if (!pq->pkt_slab)
121 goto err_kfree;
122
123 snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
124 "ipath-user-sdma-headers-%u-%02u.%02u", unit, port, sport);
125 pq->header_cache = dma_pool_create(pq->header_cache_name,
126 dev,
127 IPATH_USER_SDMA_EXP_HEADER_LENGTH,
128 4, 0);
129 if (!pq->header_cache)
130 goto err_slab;
131
132 pq->dma_pages_root = RB_ROOT;
133
134 goto done;
135
136err_slab:
137 kmem_cache_destroy(pq->pkt_slab);
138err_kfree:
139 kfree(pq);
140 pq = NULL;
141
142done:
143 return pq;
144}
145
146static void ipath_user_sdma_init_frag(struct ipath_user_sdma_pkt *pkt,
147 int i, size_t offset, size_t len,
148 int put_page, int dma_mapped,
149 struct page *page,
150 void *kvaddr, dma_addr_t dma_addr)
151{
152 pkt->addr[i].offset = offset;
153 pkt->addr[i].length = len;
154 pkt->addr[i].put_page = put_page;
155 pkt->addr[i].dma_mapped = dma_mapped;
156 pkt->addr[i].page = page;
157 pkt->addr[i].kvaddr = kvaddr;
158 pkt->addr[i].addr = dma_addr;
159}
160
161static void ipath_user_sdma_init_header(struct ipath_user_sdma_pkt *pkt,
162 u32 counter, size_t offset,
163 size_t len, int dma_mapped,
164 struct page *page,
165 void *kvaddr, dma_addr_t dma_addr)
166{
167 pkt->naddr = 1;
168 pkt->counter = counter;
169 ipath_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
170 kvaddr, dma_addr);
171}
172
173/* we've too many pages in the iovec, coalesce to a single page */
174static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd,
175 struct ipath_user_sdma_pkt *pkt,
176 const struct iovec *iov,
177 unsigned long niov) {
178 int ret = 0;
179 struct page *page = alloc_page(GFP_KERNEL);
180 void *mpage_save;
181 char *mpage;
182 int i;
183 int len = 0;
184 dma_addr_t dma_addr;
185
186 if (!page) {
187 ret = -ENOMEM;
188 goto done;
189 }
190
191 mpage = kmap(page);
192 mpage_save = mpage;
193 for (i = 0; i < niov; i++) {
194 int cfur;
195
196 cfur = copy_from_user(mpage,
197 iov[i].iov_base, iov[i].iov_len);
198 if (cfur) {
199 ret = -EFAULT;
200 goto free_unmap;
201 }
202
203 mpage += iov[i].iov_len;
204 len += iov[i].iov_len;
205 }
206
207 dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
208 DMA_TO_DEVICE);
209 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
210 ret = -ENOMEM;
211 goto free_unmap;
212 }
213
214 ipath_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
215 dma_addr);
216 pkt->naddr = 2;
217
218 goto done;
219
220free_unmap:
221 kunmap(page);
222 __free_page(page);
223done:
224 return ret;
225}
226
227/* how many pages in this iovec element? */
228static int ipath_user_sdma_num_pages(const struct iovec *iov)
229{
230 const unsigned long addr = (unsigned long) iov->iov_base;
231 const unsigned long len = iov->iov_len;
232 const unsigned long spage = addr & PAGE_MASK;
233 const unsigned long epage = (addr + len - 1) & PAGE_MASK;
234
235 return 1 + ((epage - spage) >> PAGE_SHIFT);
236}
237
238/* truncate length to page boundary */
239static int ipath_user_sdma_page_length(unsigned long addr, unsigned long len)
240{
241 const unsigned long offset = offset_in_page(addr);
242
243 return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
244}
245
246static void ipath_user_sdma_free_pkt_frag(struct device *dev,
247 struct ipath_user_sdma_queue *pq,
248 struct ipath_user_sdma_pkt *pkt,
249 int frag)
250{
251 const int i = frag;
252
253 if (pkt->addr[i].page) {
254 if (pkt->addr[i].dma_mapped)
255 dma_unmap_page(dev,
256 pkt->addr[i].addr,
257 pkt->addr[i].length,
258 DMA_TO_DEVICE);
259
260 if (pkt->addr[i].kvaddr)
261 kunmap(pkt->addr[i].page);
262
263 if (pkt->addr[i].put_page)
264 put_page(pkt->addr[i].page);
265 else
266 __free_page(pkt->addr[i].page);
267 } else if (pkt->addr[i].kvaddr)
268 /* free coherent mem from cache... */
269 dma_pool_free(pq->header_cache,
270 pkt->addr[i].kvaddr, pkt->addr[i].addr);
271}
272
273/* return number of pages pinned... */
274static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
275 struct ipath_user_sdma_pkt *pkt,
276 unsigned long addr, int tlen, int npages)
277{
278 struct page *pages[2];
279 int j;
280 int ret;
281
282 ret = get_user_pages_fast(addr, npages, 0, pages);
283 if (ret != npages) {
284 int i;
285
286 for (i = 0; i < ret; i++)
287 put_page(pages[i]);
288
289 ret = -ENOMEM;
290 goto done;
291 }
292
293 for (j = 0; j < npages; j++) {
294 /* map the pages... */
295 const int flen =
296 ipath_user_sdma_page_length(addr, tlen);
297 dma_addr_t dma_addr =
298 dma_map_page(&dd->pcidev->dev,
299 pages[j], 0, flen, DMA_TO_DEVICE);
300 unsigned long fofs = offset_in_page(addr);
301
302 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
303 ret = -ENOMEM;
304 goto done;
305 }
306
307 ipath_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
308 pages[j], kmap(pages[j]),
309 dma_addr);
310
311 pkt->naddr++;
312 addr += flen;
313 tlen -= flen;
314 }
315
316done:
317 return ret;
318}
319
320static int ipath_user_sdma_pin_pkt(const struct ipath_devdata *dd,
321 struct ipath_user_sdma_queue *pq,
322 struct ipath_user_sdma_pkt *pkt,
323 const struct iovec *iov,
324 unsigned long niov)
325{
326 int ret = 0;
327 unsigned long idx;
328
329 for (idx = 0; idx < niov; idx++) {
330 const int npages = ipath_user_sdma_num_pages(iov + idx);
331 const unsigned long addr = (unsigned long) iov[idx].iov_base;
332
333 ret = ipath_user_sdma_pin_pages(dd, pkt,
334 addr, iov[idx].iov_len,
335 npages);
336 if (ret < 0)
337 goto free_pkt;
338 }
339
340 goto done;
341
342free_pkt:
343 for (idx = 0; idx < pkt->naddr; idx++)
344 ipath_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
345
346done:
347 return ret;
348}
349
350static int ipath_user_sdma_init_payload(const struct ipath_devdata *dd,
351 struct ipath_user_sdma_queue *pq,
352 struct ipath_user_sdma_pkt *pkt,
353 const struct iovec *iov,
354 unsigned long niov, int npages)
355{
356 int ret = 0;
357
358 if (npages >= ARRAY_SIZE(pkt->addr))
359 ret = ipath_user_sdma_coalesce(dd, pkt, iov, niov);
360 else
361 ret = ipath_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
362
363 return ret;
364}
365
366/* free a packet list -- return counter value of last packet */
367static void ipath_user_sdma_free_pkt_list(struct device *dev,
368 struct ipath_user_sdma_queue *pq,
369 struct list_head *list)
370{
371 struct ipath_user_sdma_pkt *pkt, *pkt_next;
372
373 list_for_each_entry_safe(pkt, pkt_next, list, list) {
374 int i;
375
376 for (i = 0; i < pkt->naddr; i++)
377 ipath_user_sdma_free_pkt_frag(dev, pq, pkt, i);
378
379 kmem_cache_free(pq->pkt_slab, pkt);
380 }
381}
382
383/*
384 * copy headers, coalesce etc -- pq->lock must be held
385 *
386 * we queue all the packets to list, returning the
387 * number of bytes total. list must be empty initially,
388 * as, if there is an error we clean it...
389 */
390static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd,
391 struct ipath_user_sdma_queue *pq,
392 struct list_head *list,
393 const struct iovec *iov,
394 unsigned long niov,
395 int maxpkts)
396{
397 unsigned long idx = 0;
398 int ret = 0;
399 int npkts = 0;
400 struct page *page = NULL;
401 __le32 *pbc;
402 dma_addr_t dma_addr;
403 struct ipath_user_sdma_pkt *pkt = NULL;
404 size_t len;
405 size_t nw;
406 u32 counter = pq->counter;
407 int dma_mapped = 0;
408
409 while (idx < niov && npkts < maxpkts) {
410 const unsigned long addr = (unsigned long) iov[idx].iov_base;
411 const unsigned long idx_save = idx;
412 unsigned pktnw;
413 unsigned pktnwc;
414 int nfrags = 0;
415 int npages = 0;
416 int cfur;
417
418 dma_mapped = 0;
419 len = iov[idx].iov_len;
420 nw = len >> 2;
421 page = NULL;
422
423 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
424 if (!pkt) {
425 ret = -ENOMEM;
426 goto free_list;
427 }
428
429 if (len < IPATH_USER_SDMA_MIN_HEADER_LENGTH ||
430 len > PAGE_SIZE || len & 3 || addr & 3) {
431 ret = -EINVAL;
432 goto free_pkt;
433 }
434
435 if (len == IPATH_USER_SDMA_EXP_HEADER_LENGTH)
436 pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
437 &dma_addr);
438 else
439 pbc = NULL;
440
441 if (!pbc) {
442 page = alloc_page(GFP_KERNEL);
443 if (!page) {
444 ret = -ENOMEM;
445 goto free_pkt;
446 }
447 pbc = kmap(page);
448 }
449
450 cfur = copy_from_user(pbc, iov[idx].iov_base, len);
451 if (cfur) {
452 ret = -EFAULT;
453 goto free_pbc;
454 }
455
456 /*
457 * this assignment is a bit strange. it's because the
458 * the pbc counts the number of 32 bit words in the full
459 * packet _except_ the first word of the pbc itself...
460 */
461 pktnwc = nw - 1;
462
463 /*
464 * pktnw computation yields the number of 32 bit words
465 * that the caller has indicated in the PBC. note that
466 * this is one less than the total number of words that
467 * goes to the send DMA engine as the first 32 bit word
468 * of the PBC itself is not counted. Armed with this count,
469 * we can verify that the packet is consistent with the
470 * iovec lengths.
471 */
472 pktnw = le32_to_cpu(*pbc) & IPATH_PBC_LENGTH_MASK;
473 if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
474 ret = -EINVAL;
475 goto free_pbc;
476 }
477
478
479 idx++;
480 while (pktnwc < pktnw && idx < niov) {
481 const size_t slen = iov[idx].iov_len;
482 const unsigned long faddr =
483 (unsigned long) iov[idx].iov_base;
484
485 if (slen & 3 || faddr & 3 || !slen ||
486 slen > PAGE_SIZE) {
487 ret = -EINVAL;
488 goto free_pbc;
489 }
490
491 npages++;
492 if ((faddr & PAGE_MASK) !=
493 ((faddr + slen - 1) & PAGE_MASK))
494 npages++;
495
496 pktnwc += slen >> 2;
497 idx++;
498 nfrags++;
499 }
500
501 if (pktnwc != pktnw) {
502 ret = -EINVAL;
503 goto free_pbc;
504 }
505
506 if (page) {
507 dma_addr = dma_map_page(&dd->pcidev->dev,
508 page, 0, len, DMA_TO_DEVICE);
509 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
510 ret = -ENOMEM;
511 goto free_pbc;
512 }
513
514 dma_mapped = 1;
515 }
516
517 ipath_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
518 page, pbc, dma_addr);
519
520 if (nfrags) {
521 ret = ipath_user_sdma_init_payload(dd, pq, pkt,
522 iov + idx_save + 1,
523 nfrags, npages);
524 if (ret < 0)
525 goto free_pbc_dma;
526 }
527
528 counter++;
529 npkts++;
530
531 list_add_tail(&pkt->list, list);
532 }
533
534 ret = idx;
535 goto done;
536
537free_pbc_dma:
538 if (dma_mapped)
539 dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
540free_pbc:
541 if (page) {
542 kunmap(page);
543 __free_page(page);
544 } else
545 dma_pool_free(pq->header_cache, pbc, dma_addr);
546free_pkt:
547 kmem_cache_free(pq->pkt_slab, pkt);
548free_list:
549 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
550done:
551 return ret;
552}
553
554static void ipath_user_sdma_set_complete_counter(struct ipath_user_sdma_queue *pq,
555 u32 c)
556{
557 pq->sent_counter = c;
558}
559
560/* try to clean out queue -- needs pq->lock */
561static int ipath_user_sdma_queue_clean(const struct ipath_devdata *dd,
562 struct ipath_user_sdma_queue *pq)
563{
564 struct list_head free_list;
565 struct ipath_user_sdma_pkt *pkt;
566 struct ipath_user_sdma_pkt *pkt_prev;
567 int ret = 0;
568
569 INIT_LIST_HEAD(&free_list);
570
571 list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
572 s64 descd = dd->ipath_sdma_descq_removed - pkt->added;
573
574 if (descd < 0)
575 break;
576
577 list_move_tail(&pkt->list, &free_list);
578
579 /* one more packet cleaned */
580 ret++;
581 }
582
583 if (!list_empty(&free_list)) {
584 u32 counter;
585
586 pkt = list_entry(free_list.prev,
587 struct ipath_user_sdma_pkt, list);
588 counter = pkt->counter;
589
590 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
591 ipath_user_sdma_set_complete_counter(pq, counter);
592 }
593
594 return ret;
595}
596
597void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq)
598{
599 if (!pq)
600 return;
601
602 kmem_cache_destroy(pq->pkt_slab);
603 dma_pool_destroy(pq->header_cache);
604 kfree(pq);
605}
606
607/* clean descriptor queue, returns > 0 if some elements cleaned */
608static int ipath_user_sdma_hwqueue_clean(struct ipath_devdata *dd)
609{
610 int ret;
611 unsigned long flags;
612
613 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
614 ret = ipath_sdma_make_progress(dd);
615 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
616
617 return ret;
618}
619
620/* we're in close, drain packets so that we can cleanup successfully... */
621void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
622 struct ipath_user_sdma_queue *pq)
623{
624 int i;
625
626 if (!pq)
627 return;
628
629 for (i = 0; i < 100; i++) {
630 mutex_lock(&pq->lock);
631 if (list_empty(&pq->sent)) {
632 mutex_unlock(&pq->lock);
633 break;
634 }
635 ipath_user_sdma_hwqueue_clean(dd);
636 ipath_user_sdma_queue_clean(dd, pq);
637 mutex_unlock(&pq->lock);
638 msleep(10);
639 }
640
641 if (!list_empty(&pq->sent)) {
642 struct list_head free_list;
643
644 printk(KERN_INFO "drain: lists not empty: forcing!\n");
645 INIT_LIST_HEAD(&free_list);
646 mutex_lock(&pq->lock);
647 list_splice_init(&pq->sent, &free_list);
648 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
649 mutex_unlock(&pq->lock);
650 }
651}
652
653static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd,
654 u64 addr, u64 dwlen, u64 dwoffset)
655{
656 return cpu_to_le64(/* SDmaPhyAddr[31:0] */
657 ((addr & 0xfffffffcULL) << 32) |
658 /* SDmaGeneration[1:0] */
659 ((dd->ipath_sdma_generation & 3ULL) << 30) |
660 /* SDmaDwordCount[10:0] */
661 ((dwlen & 0x7ffULL) << 16) |
662 /* SDmaBufOffset[12:2] */
663 (dwoffset & 0x7ffULL));
664}
665
666static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)
667{
668 return descq | cpu_to_le64(1ULL << 12);
669}
670
671static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
672{
673 /* last */ /* dma head */
674 return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
675}
676
677static inline __le64 ipath_sdma_make_desc1(u64 addr)
678{
679 /* SDmaPhyAddr[47:32] */
680 return cpu_to_le64(addr >> 32);
681}
682
683static void ipath_user_sdma_send_frag(struct ipath_devdata *dd,
684 struct ipath_user_sdma_pkt *pkt, int idx,
685 unsigned ofs, u16 tail)
686{
687 const u64 addr = (u64) pkt->addr[idx].addr +
688 (u64) pkt->addr[idx].offset;
689 const u64 dwlen = (u64) pkt->addr[idx].length / 4;
690 __le64 *descqp;
691 __le64 descq0;
692
693 descqp = &dd->ipath_sdma_descq[tail].qw[0];
694
695 descq0 = ipath_sdma_make_desc0(dd, addr, dwlen, ofs);
696 if (idx == 0)
697 descq0 = ipath_sdma_make_first_desc0(descq0);
698 if (idx == pkt->naddr - 1)
699 descq0 = ipath_sdma_make_last_desc0(descq0);
700
701 descqp[0] = descq0;
702 descqp[1] = ipath_sdma_make_desc1(addr);
703}
704
705/* pq->lock must be held, get packets on the wire... */
706static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
707 struct ipath_user_sdma_queue *pq,
708 struct list_head *pktlist)
709{
710 int ret = 0;
711 unsigned long flags;
712 u16 tail;
713
714 if (list_empty(pktlist))
715 return 0;
716
717 if (unlikely(!(dd->ipath_flags & IPATH_LINKACTIVE)))
718 return -ECOMM;
719
720 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
721
722 if (unlikely(dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK)) {
723 ret = -ECOMM;
724 goto unlock;
725 }
726
727 tail = dd->ipath_sdma_descq_tail;
728 while (!list_empty(pktlist)) {
729 struct ipath_user_sdma_pkt *pkt =
730 list_entry(pktlist->next, struct ipath_user_sdma_pkt,
731 list);
732 int i;
733 unsigned ofs = 0;
734 u16 dtail = tail;
735
736 if (pkt->naddr > ipath_sdma_descq_freecnt(dd))
737 goto unlock_check_tail;
738
739 for (i = 0; i < pkt->naddr; i++) {
740 ipath_user_sdma_send_frag(dd, pkt, i, ofs, tail);
741 ofs += pkt->addr[i].length >> 2;
742
743 if (++tail == dd->ipath_sdma_descq_cnt) {
744 tail = 0;
745 ++dd->ipath_sdma_generation;
746 }
747 }
748
749 if ((ofs<<2) > dd->ipath_ibmaxlen) {
750 ipath_dbg("packet size %X > ibmax %X, fail\n",
751 ofs<<2, dd->ipath_ibmaxlen);
752 ret = -EMSGSIZE;
753 goto unlock;
754 }
755
756 /*
757 * if the packet is >= 2KB mtu equivalent, we have to use
758 * the large buffers, and have to mark each descriptor as
759 * part of a large buffer packet.
760 */
761 if (ofs >= IPATH_SMALLBUF_DWORDS) {
762 for (i = 0; i < pkt->naddr; i++) {
763 dd->ipath_sdma_descq[dtail].qw[0] |=
764 cpu_to_le64(1ULL << 14);
765 if (++dtail == dd->ipath_sdma_descq_cnt)
766 dtail = 0;
767 }
768 }
769
770 dd->ipath_sdma_descq_added += pkt->naddr;
771 pkt->added = dd->ipath_sdma_descq_added;
772 list_move_tail(&pkt->list, &pq->sent);
773 ret++;
774 }
775
776unlock_check_tail:
777 /* advance the tail on the chip if necessary */
778 if (dd->ipath_sdma_descq_tail != tail) {
779 wmb();
780 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
781 dd->ipath_sdma_descq_tail = tail;
782 }
783
784unlock:
785 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
786
787 return ret;
788}
789
790int ipath_user_sdma_writev(struct ipath_devdata *dd,
791 struct ipath_user_sdma_queue *pq,
792 const struct iovec *iov,
793 unsigned long dim)
794{
795 int ret = 0;
796 struct list_head list;
797 int npkts = 0;
798
799 INIT_LIST_HEAD(&list);
800
801 mutex_lock(&pq->lock);
802
803 if (dd->ipath_sdma_descq_added != dd->ipath_sdma_descq_removed) {
804 ipath_user_sdma_hwqueue_clean(dd);
805 ipath_user_sdma_queue_clean(dd, pq);
806 }
807
808 while (dim) {
809 const int mxp = 8;
810
811 ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
812 if (ret <= 0)
813 goto done_unlock;
814 else {
815 dim -= ret;
816 iov += ret;
817 }
818
819 /* force packets onto the sdma hw queue... */
820 if (!list_empty(&list)) {
821 /*
822 * lazily clean hw queue. the 4 is a guess of about
823 * how many sdma descriptors a packet will take (it
824 * doesn't have to be perfect).
825 */
826 if (ipath_sdma_descq_freecnt(dd) < ret * 4) {
827 ipath_user_sdma_hwqueue_clean(dd);
828 ipath_user_sdma_queue_clean(dd, pq);
829 }
830
831 ret = ipath_user_sdma_push_pkts(dd, pq, &list);
832 if (ret < 0)
833 goto done_unlock;
834 else {
835 npkts += ret;
836 pq->counter += ret;
837
838 if (!list_empty(&list))
839 goto done_unlock;
840 }
841 }
842 }
843
844done_unlock:
845 if (!list_empty(&list))
846 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
847 mutex_unlock(&pq->lock);
848
849 return (ret < 0) ? ret : npkts;
850}
851
852int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
853 struct ipath_user_sdma_queue *pq)
854{
855 int ret = 0;
856
857 mutex_lock(&pq->lock);
858 ipath_user_sdma_hwqueue_clean(dd);
859 ret = ipath_user_sdma_queue_clean(dd, pq);
860 mutex_unlock(&pq->lock);
861
862 return ret;
863}
864
865u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq)
866{
867 return pq->sent_counter;
868}
869
870u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq)
871{
872 return pq->counter;
873}
874
diff --git a/drivers/staging/rdma/ipath/ipath_user_sdma.h b/drivers/staging/rdma/ipath/ipath_user_sdma.h
deleted file mode 100644
index fc76316c4a58..000000000000
--- a/drivers/staging/rdma/ipath/ipath_user_sdma.h
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/device.h>
33
34struct ipath_user_sdma_queue;
35
36struct ipath_user_sdma_queue *
37ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
38void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq);
39
40int ipath_user_sdma_writev(struct ipath_devdata *dd,
41 struct ipath_user_sdma_queue *pq,
42 const struct iovec *iov,
43 unsigned long dim);
44
45int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
46 struct ipath_user_sdma_queue *pq);
47
48void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
49 struct ipath_user_sdma_queue *pq);
50
51u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq);
52u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq);
diff --git a/drivers/staging/rdma/ipath/ipath_verbs.c b/drivers/staging/rdma/ipath/ipath_verbs.c
deleted file mode 100644
index 53f9dcab180d..000000000000
--- a/drivers/staging/rdma/ipath/ipath_verbs.c
+++ /dev/null
@@ -1,2376 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <rdma/ib_mad.h>
35#include <rdma/ib_user_verbs.h>
36#include <linux/io.h>
37#include <linux/slab.h>
38#include <linux/module.h>
39#include <linux/utsname.h>
40#include <linux/rculist.h>
41
42#include "ipath_kernel.h"
43#include "ipath_verbs.h"
44#include "ipath_common.h"
45
46static unsigned int ib_ipath_qp_table_size = 251;
47module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
48MODULE_PARM_DESC(qp_table_size, "QP table size");
49
50unsigned int ib_ipath_lkey_table_size = 12;
51module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint,
52 S_IRUGO);
53MODULE_PARM_DESC(lkey_table_size,
54 "LKEY table size in bits (2^n, 1 <= n <= 23)");
55
56static unsigned int ib_ipath_max_pds = 0xFFFF;
57module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO);
58MODULE_PARM_DESC(max_pds,
59 "Maximum number of protection domains to support");
60
61static unsigned int ib_ipath_max_ahs = 0xFFFF;
62module_param_named(max_ahs, ib_ipath_max_ahs, uint, S_IWUSR | S_IRUGO);
63MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
64
65unsigned int ib_ipath_max_cqes = 0x2FFFF;
66module_param_named(max_cqes, ib_ipath_max_cqes, uint, S_IWUSR | S_IRUGO);
67MODULE_PARM_DESC(max_cqes,
68 "Maximum number of completion queue entries to support");
69
70unsigned int ib_ipath_max_cqs = 0x1FFFF;
71module_param_named(max_cqs, ib_ipath_max_cqs, uint, S_IWUSR | S_IRUGO);
72MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
73
74unsigned int ib_ipath_max_qp_wrs = 0x3FFF;
75module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint,
76 S_IWUSR | S_IRUGO);
77MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
78
79unsigned int ib_ipath_max_qps = 16384;
80module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO);
81MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
82
83unsigned int ib_ipath_max_sges = 0x60;
84module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO);
85MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
86
87unsigned int ib_ipath_max_mcast_grps = 16384;
88module_param_named(max_mcast_grps, ib_ipath_max_mcast_grps, uint,
89 S_IWUSR | S_IRUGO);
90MODULE_PARM_DESC(max_mcast_grps,
91 "Maximum number of multicast groups to support");
92
93unsigned int ib_ipath_max_mcast_qp_attached = 16;
94module_param_named(max_mcast_qp_attached, ib_ipath_max_mcast_qp_attached,
95 uint, S_IWUSR | S_IRUGO);
96MODULE_PARM_DESC(max_mcast_qp_attached,
97 "Maximum number of attached QPs to support");
98
99unsigned int ib_ipath_max_srqs = 1024;
100module_param_named(max_srqs, ib_ipath_max_srqs, uint, S_IWUSR | S_IRUGO);
101MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
102
103unsigned int ib_ipath_max_srq_sges = 128;
104module_param_named(max_srq_sges, ib_ipath_max_srq_sges,
105 uint, S_IWUSR | S_IRUGO);
106MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
107
108unsigned int ib_ipath_max_srq_wrs = 0x1FFFF;
109module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs,
110 uint, S_IWUSR | S_IRUGO);
111MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
112
113static unsigned int ib_ipath_disable_sma;
114module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO);
115MODULE_PARM_DESC(disable_sma, "Disable the SMA");
116
117/*
118 * Note that it is OK to post send work requests in the SQE and ERR
119 * states; ipath_do_send() will process them and generate error
120 * completions as per IB 1.2 C10-96.
121 */
122const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
123 [IB_QPS_RESET] = 0,
124 [IB_QPS_INIT] = IPATH_POST_RECV_OK,
125 [IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
126 [IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
127 IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK |
128 IPATH_PROCESS_NEXT_SEND_OK,
129 [IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
130 IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK,
131 [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
132 IPATH_POST_SEND_OK | IPATH_FLUSH_SEND,
133 [IB_QPS_ERR] = IPATH_POST_RECV_OK | IPATH_FLUSH_RECV |
134 IPATH_POST_SEND_OK | IPATH_FLUSH_SEND,
135};
136
137struct ipath_ucontext {
138 struct ib_ucontext ibucontext;
139};
140
141static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
142 *ibucontext)
143{
144 return container_of(ibucontext, struct ipath_ucontext, ibucontext);
145}
146
147/*
148 * Translate ib_wr_opcode into ib_wc_opcode.
149 */
150const enum ib_wc_opcode ib_ipath_wc_opcode[] = {
151 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
152 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
153 [IB_WR_SEND] = IB_WC_SEND,
154 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
155 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
156 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
157 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
158};
159
160/*
161 * System image GUID.
162 */
163static __be64 sys_image_guid;
164
165/**
166 * ipath_copy_sge - copy data to SGE memory
167 * @ss: the SGE state
168 * @data: the data to copy
169 * @length: the length of the data
170 */
171void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length)
172{
173 struct ipath_sge *sge = &ss->sge;
174
175 while (length) {
176 u32 len = sge->length;
177
178 if (len > length)
179 len = length;
180 if (len > sge->sge_length)
181 len = sge->sge_length;
182 BUG_ON(len == 0);
183 memcpy(sge->vaddr, data, len);
184 sge->vaddr += len;
185 sge->length -= len;
186 sge->sge_length -= len;
187 if (sge->sge_length == 0) {
188 if (--ss->num_sge)
189 *sge = *ss->sg_list++;
190 } else if (sge->length == 0 && sge->mr != NULL) {
191 if (++sge->n >= IPATH_SEGSZ) {
192 if (++sge->m >= sge->mr->mapsz)
193 break;
194 sge->n = 0;
195 }
196 sge->vaddr =
197 sge->mr->map[sge->m]->segs[sge->n].vaddr;
198 sge->length =
199 sge->mr->map[sge->m]->segs[sge->n].length;
200 }
201 data += len;
202 length -= len;
203 }
204}
205
206/**
207 * ipath_skip_sge - skip over SGE memory - XXX almost dup of prev func
208 * @ss: the SGE state
209 * @length: the number of bytes to skip
210 */
211void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
212{
213 struct ipath_sge *sge = &ss->sge;
214
215 while (length) {
216 u32 len = sge->length;
217
218 if (len > length)
219 len = length;
220 if (len > sge->sge_length)
221 len = sge->sge_length;
222 BUG_ON(len == 0);
223 sge->vaddr += len;
224 sge->length -= len;
225 sge->sge_length -= len;
226 if (sge->sge_length == 0) {
227 if (--ss->num_sge)
228 *sge = *ss->sg_list++;
229 } else if (sge->length == 0 && sge->mr != NULL) {
230 if (++sge->n >= IPATH_SEGSZ) {
231 if (++sge->m >= sge->mr->mapsz)
232 break;
233 sge->n = 0;
234 }
235 sge->vaddr =
236 sge->mr->map[sge->m]->segs[sge->n].vaddr;
237 sge->length =
238 sge->mr->map[sge->m]->segs[sge->n].length;
239 }
240 length -= len;
241 }
242}
243
244/*
245 * Count the number of DMA descriptors needed to send length bytes of data.
246 * Don't modify the ipath_sge_state to get the count.
247 * Return zero if any of the segments is not aligned.
248 */
249static u32 ipath_count_sge(struct ipath_sge_state *ss, u32 length)
250{
251 struct ipath_sge *sg_list = ss->sg_list;
252 struct ipath_sge sge = ss->sge;
253 u8 num_sge = ss->num_sge;
254 u32 ndesc = 1; /* count the header */
255
256 while (length) {
257 u32 len = sge.length;
258
259 if (len > length)
260 len = length;
261 if (len > sge.sge_length)
262 len = sge.sge_length;
263 BUG_ON(len == 0);
264 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
265 (len != length && (len & (sizeof(u32) - 1)))) {
266 ndesc = 0;
267 break;
268 }
269 ndesc++;
270 sge.vaddr += len;
271 sge.length -= len;
272 sge.sge_length -= len;
273 if (sge.sge_length == 0) {
274 if (--num_sge)
275 sge = *sg_list++;
276 } else if (sge.length == 0 && sge.mr != NULL) {
277 if (++sge.n >= IPATH_SEGSZ) {
278 if (++sge.m >= sge.mr->mapsz)
279 break;
280 sge.n = 0;
281 }
282 sge.vaddr =
283 sge.mr->map[sge.m]->segs[sge.n].vaddr;
284 sge.length =
285 sge.mr->map[sge.m]->segs[sge.n].length;
286 }
287 length -= len;
288 }
289 return ndesc;
290}
291
292/*
293 * Copy from the SGEs to the data buffer.
294 */
295static void ipath_copy_from_sge(void *data, struct ipath_sge_state *ss,
296 u32 length)
297{
298 struct ipath_sge *sge = &ss->sge;
299
300 while (length) {
301 u32 len = sge->length;
302
303 if (len > length)
304 len = length;
305 if (len > sge->sge_length)
306 len = sge->sge_length;
307 BUG_ON(len == 0);
308 memcpy(data, sge->vaddr, len);
309 sge->vaddr += len;
310 sge->length -= len;
311 sge->sge_length -= len;
312 if (sge->sge_length == 0) {
313 if (--ss->num_sge)
314 *sge = *ss->sg_list++;
315 } else if (sge->length == 0 && sge->mr != NULL) {
316 if (++sge->n >= IPATH_SEGSZ) {
317 if (++sge->m >= sge->mr->mapsz)
318 break;
319 sge->n = 0;
320 }
321 sge->vaddr =
322 sge->mr->map[sge->m]->segs[sge->n].vaddr;
323 sge->length =
324 sge->mr->map[sge->m]->segs[sge->n].length;
325 }
326 data += len;
327 length -= len;
328 }
329}
330
331/**
332 * ipath_post_one_send - post one RC, UC, or UD send work request
333 * @qp: the QP to post on
334 * @wr: the work request to send
335 */
336static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
337{
338 struct ipath_swqe *wqe;
339 u32 next;
340 int i;
341 int j;
342 int acc;
343 int ret;
344 unsigned long flags;
345 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
346
347 spin_lock_irqsave(&qp->s_lock, flags);
348
349 if (qp->ibqp.qp_type != IB_QPT_SMI &&
350 !(dd->ipath_flags & IPATH_LINKACTIVE)) {
351 ret = -ENETDOWN;
352 goto bail;
353 }
354
355 /* Check that state is OK to post send. */
356 if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK)))
357 goto bail_inval;
358
359 /* IB spec says that num_sge == 0 is OK. */
360 if (wr->num_sge > qp->s_max_sge)
361 goto bail_inval;
362
363 /*
364 * Don't allow RDMA reads or atomic operations on UC or
365 * undefined operations.
366 * Make sure buffer is large enough to hold the result for atomics.
367 */
368 if (qp->ibqp.qp_type == IB_QPT_UC) {
369 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
370 goto bail_inval;
371 } else if (qp->ibqp.qp_type == IB_QPT_UD) {
372 /* Check UD opcode */
373 if (wr->opcode != IB_WR_SEND &&
374 wr->opcode != IB_WR_SEND_WITH_IMM)
375 goto bail_inval;
376 /* Check UD destination address PD */
377 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
378 goto bail_inval;
379 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
380 goto bail_inval;
381 else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
382 (wr->num_sge == 0 ||
383 wr->sg_list[0].length < sizeof(u64) ||
384 wr->sg_list[0].addr & (sizeof(u64) - 1)))
385 goto bail_inval;
386 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
387 goto bail_inval;
388
389 next = qp->s_head + 1;
390 if (next >= qp->s_size)
391 next = 0;
392 if (next == qp->s_last) {
393 ret = -ENOMEM;
394 goto bail;
395 }
396
397 wqe = get_swqe_ptr(qp, qp->s_head);
398
399 if (qp->ibqp.qp_type != IB_QPT_UC &&
400 qp->ibqp.qp_type != IB_QPT_RC)
401 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
402 else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
403 wr->opcode == IB_WR_RDMA_WRITE ||
404 wr->opcode == IB_WR_RDMA_READ)
405 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
406 else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
407 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
408 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
409 else
410 memcpy(&wqe->wr, wr, sizeof(wqe->wr));
411
412 wqe->length = 0;
413 if (wr->num_sge) {
414 acc = wr->opcode >= IB_WR_RDMA_READ ?
415 IB_ACCESS_LOCAL_WRITE : 0;
416 for (i = 0, j = 0; i < wr->num_sge; i++) {
417 u32 length = wr->sg_list[i].length;
418 int ok;
419
420 if (length == 0)
421 continue;
422 ok = ipath_lkey_ok(qp, &wqe->sg_list[j],
423 &wr->sg_list[i], acc);
424 if (!ok)
425 goto bail_inval;
426 wqe->length += length;
427 j++;
428 }
429 wqe->wr.num_sge = j;
430 }
431 if (qp->ibqp.qp_type == IB_QPT_UC ||
432 qp->ibqp.qp_type == IB_QPT_RC) {
433 if (wqe->length > 0x80000000U)
434 goto bail_inval;
435 } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu)
436 goto bail_inval;
437 wqe->ssn = qp->s_ssn++;
438 qp->s_head = next;
439
440 ret = 0;
441 goto bail;
442
443bail_inval:
444 ret = -EINVAL;
445bail:
446 spin_unlock_irqrestore(&qp->s_lock, flags);
447 return ret;
448}
449
450/**
451 * ipath_post_send - post a send on a QP
452 * @ibqp: the QP to post the send on
453 * @wr: the list of work requests to post
454 * @bad_wr: the first bad WR is put here
455 *
456 * This may be called from interrupt context.
457 */
458static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
459 struct ib_send_wr **bad_wr)
460{
461 struct ipath_qp *qp = to_iqp(ibqp);
462 int err = 0;
463
464 for (; wr; wr = wr->next) {
465 err = ipath_post_one_send(qp, wr);
466 if (err) {
467 *bad_wr = wr;
468 goto bail;
469 }
470 }
471
472 /* Try to do the send work in the caller's context. */
473 ipath_do_send((unsigned long) qp);
474
475bail:
476 return err;
477}
478
479/**
480 * ipath_post_receive - post a receive on a QP
481 * @ibqp: the QP to post the receive on
482 * @wr: the WR to post
483 * @bad_wr: the first bad WR is put here
484 *
485 * This may be called from interrupt context.
486 */
487static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
488 struct ib_recv_wr **bad_wr)
489{
490 struct ipath_qp *qp = to_iqp(ibqp);
491 struct ipath_rwq *wq = qp->r_rq.wq;
492 unsigned long flags;
493 int ret;
494
495 /* Check that state is OK to post receive. */
496 if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) {
497 *bad_wr = wr;
498 ret = -EINVAL;
499 goto bail;
500 }
501
502 for (; wr; wr = wr->next) {
503 struct ipath_rwqe *wqe;
504 u32 next;
505 int i;
506
507 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
508 *bad_wr = wr;
509 ret = -EINVAL;
510 goto bail;
511 }
512
513 spin_lock_irqsave(&qp->r_rq.lock, flags);
514 next = wq->head + 1;
515 if (next >= qp->r_rq.size)
516 next = 0;
517 if (next == wq->tail) {
518 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
519 *bad_wr = wr;
520 ret = -ENOMEM;
521 goto bail;
522 }
523
524 wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
525 wqe->wr_id = wr->wr_id;
526 wqe->num_sge = wr->num_sge;
527 for (i = 0; i < wr->num_sge; i++)
528 wqe->sg_list[i] = wr->sg_list[i];
529 /* Make sure queue entry is written before the head index. */
530 smp_wmb();
531 wq->head = next;
532 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
533 }
534 ret = 0;
535
536bail:
537 return ret;
538}
539
540/**
541 * ipath_qp_rcv - processing an incoming packet on a QP
542 * @dev: the device the packet came on
543 * @hdr: the packet header
544 * @has_grh: true if the packet has a GRH
545 * @data: the packet data
546 * @tlen: the packet length
547 * @qp: the QP the packet came on
548 *
549 * This is called from ipath_ib_rcv() to process an incoming packet
550 * for the given QP.
551 * Called at interrupt level.
552 */
553static void ipath_qp_rcv(struct ipath_ibdev *dev,
554 struct ipath_ib_header *hdr, int has_grh,
555 void *data, u32 tlen, struct ipath_qp *qp)
556{
557 /* Check for valid receive state. */
558 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
559 dev->n_pkt_drops++;
560 return;
561 }
562
563 switch (qp->ibqp.qp_type) {
564 case IB_QPT_SMI:
565 case IB_QPT_GSI:
566 if (ib_ipath_disable_sma)
567 break;
568 /* FALLTHROUGH */
569 case IB_QPT_UD:
570 ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp);
571 break;
572
573 case IB_QPT_RC:
574 ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp);
575 break;
576
577 case IB_QPT_UC:
578 ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp);
579 break;
580
581 default:
582 break;
583 }
584}
585
586/**
587 * ipath_ib_rcv - process an incoming packet
588 * @arg: the device pointer
589 * @rhdr: the header of the packet
590 * @data: the packet data
591 * @tlen: the packet length
592 *
593 * This is called from ipath_kreceive() to process an incoming packet at
594 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
595 */
596void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
597 u32 tlen)
598{
599 struct ipath_ib_header *hdr = rhdr;
600 struct ipath_other_headers *ohdr;
601 struct ipath_qp *qp;
602 u32 qp_num;
603 int lnh;
604 u8 opcode;
605 u16 lid;
606
607 if (unlikely(dev == NULL))
608 goto bail;
609
610 if (unlikely(tlen < 24)) { /* LRH+BTH+CRC */
611 dev->rcv_errors++;
612 goto bail;
613 }
614
615 /* Check for a valid destination LID (see ch. 7.11.1). */
616 lid = be16_to_cpu(hdr->lrh[1]);
617 if (lid < IPATH_MULTICAST_LID_BASE) {
618 lid &= ~((1 << dev->dd->ipath_lmc) - 1);
619 if (unlikely(lid != dev->dd->ipath_lid)) {
620 dev->rcv_errors++;
621 goto bail;
622 }
623 }
624
625 /* Check for GRH */
626 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
627 if (lnh == IPATH_LRH_BTH)
628 ohdr = &hdr->u.oth;
629 else if (lnh == IPATH_LRH_GRH)
630 ohdr = &hdr->u.l.oth;
631 else {
632 dev->rcv_errors++;
633 goto bail;
634 }
635
636 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
637 dev->opstats[opcode].n_bytes += tlen;
638 dev->opstats[opcode].n_packets++;
639
640 /* Get the destination QP number. */
641 qp_num = be32_to_cpu(ohdr->bth[1]) & IPATH_QPN_MASK;
642 if (qp_num == IPATH_MULTICAST_QPN) {
643 struct ipath_mcast *mcast;
644 struct ipath_mcast_qp *p;
645
646 if (lnh != IPATH_LRH_GRH) {
647 dev->n_pkt_drops++;
648 goto bail;
649 }
650 mcast = ipath_mcast_find(&hdr->u.l.grh.dgid);
651 if (mcast == NULL) {
652 dev->n_pkt_drops++;
653 goto bail;
654 }
655 dev->n_multicast_rcv++;
656 list_for_each_entry_rcu(p, &mcast->qp_list, list)
657 ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp);
658 /*
659 * Notify ipath_multicast_detach() if it is waiting for us
660 * to finish.
661 */
662 if (atomic_dec_return(&mcast->refcount) <= 1)
663 wake_up(&mcast->wait);
664 } else {
665 qp = ipath_lookup_qpn(&dev->qp_table, qp_num);
666 if (qp) {
667 dev->n_unicast_rcv++;
668 ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data,
669 tlen, qp);
670 /*
671 * Notify ipath_destroy_qp() if it is waiting
672 * for us to finish.
673 */
674 if (atomic_dec_and_test(&qp->refcount))
675 wake_up(&qp->wait);
676 } else
677 dev->n_pkt_drops++;
678 }
679
680bail:;
681}
682
683/**
684 * ipath_ib_timer - verbs timer
685 * @arg: the device pointer
686 *
687 * This is called from ipath_do_rcv_timer() at interrupt level to check for
688 * QPs which need retransmits and to collect performance numbers.
689 */
690static void ipath_ib_timer(struct ipath_ibdev *dev)
691{
692 struct ipath_qp *resend = NULL;
693 struct ipath_qp *rnr = NULL;
694 struct list_head *last;
695 struct ipath_qp *qp;
696 unsigned long flags;
697
698 if (dev == NULL)
699 return;
700
701 spin_lock_irqsave(&dev->pending_lock, flags);
702 /* Start filling the next pending queue. */
703 if (++dev->pending_index >= ARRAY_SIZE(dev->pending))
704 dev->pending_index = 0;
705 /* Save any requests still in the new queue, they have timed out. */
706 last = &dev->pending[dev->pending_index];
707 while (!list_empty(last)) {
708 qp = list_entry(last->next, struct ipath_qp, timerwait);
709 list_del_init(&qp->timerwait);
710 qp->timer_next = resend;
711 resend = qp;
712 atomic_inc(&qp->refcount);
713 }
714 last = &dev->rnrwait;
715 if (!list_empty(last)) {
716 qp = list_entry(last->next, struct ipath_qp, timerwait);
717 if (--qp->s_rnr_timeout == 0) {
718 do {
719 list_del_init(&qp->timerwait);
720 qp->timer_next = rnr;
721 rnr = qp;
722 atomic_inc(&qp->refcount);
723 if (list_empty(last))
724 break;
725 qp = list_entry(last->next, struct ipath_qp,
726 timerwait);
727 } while (qp->s_rnr_timeout == 0);
728 }
729 }
730 /*
731 * We should only be in the started state if pma_sample_start != 0
732 */
733 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
734 --dev->pma_sample_start == 0) {
735 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
736 ipath_snapshot_counters(dev->dd, &dev->ipath_sword,
737 &dev->ipath_rword,
738 &dev->ipath_spkts,
739 &dev->ipath_rpkts,
740 &dev->ipath_xmit_wait);
741 }
742 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
743 if (dev->pma_sample_interval == 0) {
744 u64 ta, tb, tc, td, te;
745
746 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
747 ipath_snapshot_counters(dev->dd, &ta, &tb,
748 &tc, &td, &te);
749
750 dev->ipath_sword = ta - dev->ipath_sword;
751 dev->ipath_rword = tb - dev->ipath_rword;
752 dev->ipath_spkts = tc - dev->ipath_spkts;
753 dev->ipath_rpkts = td - dev->ipath_rpkts;
754 dev->ipath_xmit_wait = te - dev->ipath_xmit_wait;
755 } else {
756 dev->pma_sample_interval--;
757 }
758 }
759 spin_unlock_irqrestore(&dev->pending_lock, flags);
760
761 /* XXX What if timer fires again while this is running? */
762 while (resend != NULL) {
763 qp = resend;
764 resend = qp->timer_next;
765
766 spin_lock_irqsave(&qp->s_lock, flags);
767 if (qp->s_last != qp->s_tail &&
768 ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) {
769 dev->n_timeouts++;
770 ipath_restart_rc(qp, qp->s_last_psn + 1);
771 }
772 spin_unlock_irqrestore(&qp->s_lock, flags);
773
774 /* Notify ipath_destroy_qp() if it is waiting. */
775 if (atomic_dec_and_test(&qp->refcount))
776 wake_up(&qp->wait);
777 }
778 while (rnr != NULL) {
779 qp = rnr;
780 rnr = qp->timer_next;
781
782 spin_lock_irqsave(&qp->s_lock, flags);
783 if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)
784 ipath_schedule_send(qp);
785 spin_unlock_irqrestore(&qp->s_lock, flags);
786
787 /* Notify ipath_destroy_qp() if it is waiting. */
788 if (atomic_dec_and_test(&qp->refcount))
789 wake_up(&qp->wait);
790 }
791}
792
793static void update_sge(struct ipath_sge_state *ss, u32 length)
794{
795 struct ipath_sge *sge = &ss->sge;
796
797 sge->vaddr += length;
798 sge->length -= length;
799 sge->sge_length -= length;
800 if (sge->sge_length == 0) {
801 if (--ss->num_sge)
802 *sge = *ss->sg_list++;
803 } else if (sge->length == 0 && sge->mr != NULL) {
804 if (++sge->n >= IPATH_SEGSZ) {
805 if (++sge->m >= sge->mr->mapsz)
806 return;
807 sge->n = 0;
808 }
809 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
810 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
811 }
812}
813
814#ifdef __LITTLE_ENDIAN
815static inline u32 get_upper_bits(u32 data, u32 shift)
816{
817 return data >> shift;
818}
819
820static inline u32 set_upper_bits(u32 data, u32 shift)
821{
822 return data << shift;
823}
824
825static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
826{
827 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
828 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
829 return data;
830}
831#else
832static inline u32 get_upper_bits(u32 data, u32 shift)
833{
834 return data << shift;
835}
836
837static inline u32 set_upper_bits(u32 data, u32 shift)
838{
839 return data >> shift;
840}
841
842static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
843{
844 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
845 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
846 return data;
847}
848#endif
849
850static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
851 u32 length, unsigned flush_wc)
852{
853 u32 extra = 0;
854 u32 data = 0;
855 u32 last;
856
857 while (1) {
858 u32 len = ss->sge.length;
859 u32 off;
860
861 if (len > length)
862 len = length;
863 if (len > ss->sge.sge_length)
864 len = ss->sge.sge_length;
865 BUG_ON(len == 0);
866 /* If the source address is not aligned, try to align it. */
867 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
868 if (off) {
869 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
870 ~(sizeof(u32) - 1));
871 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
872 u32 y;
873
874 y = sizeof(u32) - off;
875 if (len > y)
876 len = y;
877 if (len + extra >= sizeof(u32)) {
878 data |= set_upper_bits(v, extra *
879 BITS_PER_BYTE);
880 len = sizeof(u32) - extra;
881 if (len == length) {
882 last = data;
883 break;
884 }
885 __raw_writel(data, piobuf);
886 piobuf++;
887 extra = 0;
888 data = 0;
889 } else {
890 /* Clear unused upper bytes */
891 data |= clear_upper_bytes(v, len, extra);
892 if (len == length) {
893 last = data;
894 break;
895 }
896 extra += len;
897 }
898 } else if (extra) {
899 /* Source address is aligned. */
900 u32 *addr = (u32 *) ss->sge.vaddr;
901 int shift = extra * BITS_PER_BYTE;
902 int ushift = 32 - shift;
903 u32 l = len;
904
905 while (l >= sizeof(u32)) {
906 u32 v = *addr;
907
908 data |= set_upper_bits(v, shift);
909 __raw_writel(data, piobuf);
910 data = get_upper_bits(v, ushift);
911 piobuf++;
912 addr++;
913 l -= sizeof(u32);
914 }
915 /*
916 * We still have 'extra' number of bytes leftover.
917 */
918 if (l) {
919 u32 v = *addr;
920
921 if (l + extra >= sizeof(u32)) {
922 data |= set_upper_bits(v, shift);
923 len -= l + extra - sizeof(u32);
924 if (len == length) {
925 last = data;
926 break;
927 }
928 __raw_writel(data, piobuf);
929 piobuf++;
930 extra = 0;
931 data = 0;
932 } else {
933 /* Clear unused upper bytes */
934 data |= clear_upper_bytes(v, l,
935 extra);
936 if (len == length) {
937 last = data;
938 break;
939 }
940 extra += l;
941 }
942 } else if (len == length) {
943 last = data;
944 break;
945 }
946 } else if (len == length) {
947 u32 w;
948
949 /*
950 * Need to round up for the last dword in the
951 * packet.
952 */
953 w = (len + 3) >> 2;
954 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
955 piobuf += w - 1;
956 last = ((u32 *) ss->sge.vaddr)[w - 1];
957 break;
958 } else {
959 u32 w = len >> 2;
960
961 __iowrite32_copy(piobuf, ss->sge.vaddr, w);
962 piobuf += w;
963
964 extra = len & (sizeof(u32) - 1);
965 if (extra) {
966 u32 v = ((u32 *) ss->sge.vaddr)[w];
967
968 /* Clear unused upper bytes */
969 data = clear_upper_bytes(v, extra, 0);
970 }
971 }
972 update_sge(ss, len);
973 length -= len;
974 }
975 /* Update address before sending packet. */
976 update_sge(ss, length);
977 if (flush_wc) {
978 /* must flush early everything before trigger word */
979 ipath_flush_wc();
980 __raw_writel(last, piobuf);
981 /* be sure trigger word is written */
982 ipath_flush_wc();
983 } else
984 __raw_writel(last, piobuf);
985}
986
987/*
988 * Convert IB rate to delay multiplier.
989 */
990unsigned ipath_ib_rate_to_mult(enum ib_rate rate)
991{
992 switch (rate) {
993 case IB_RATE_2_5_GBPS: return 8;
994 case IB_RATE_5_GBPS: return 4;
995 case IB_RATE_10_GBPS: return 2;
996 case IB_RATE_20_GBPS: return 1;
997 default: return 0;
998 }
999}
1000
1001/*
1002 * Convert delay multiplier to IB rate
1003 */
1004static enum ib_rate ipath_mult_to_ib_rate(unsigned mult)
1005{
1006 switch (mult) {
1007 case 8: return IB_RATE_2_5_GBPS;
1008 case 4: return IB_RATE_5_GBPS;
1009 case 2: return IB_RATE_10_GBPS;
1010 case 1: return IB_RATE_20_GBPS;
1011 default: return IB_RATE_PORT_CURRENT;
1012 }
1013}
1014
1015static inline struct ipath_verbs_txreq *get_txreq(struct ipath_ibdev *dev)
1016{
1017 struct ipath_verbs_txreq *tx = NULL;
1018 unsigned long flags;
1019
1020 spin_lock_irqsave(&dev->pending_lock, flags);
1021 if (!list_empty(&dev->txreq_free)) {
1022 struct list_head *l = dev->txreq_free.next;
1023
1024 list_del(l);
1025 tx = list_entry(l, struct ipath_verbs_txreq, txreq.list);
1026 }
1027 spin_unlock_irqrestore(&dev->pending_lock, flags);
1028 return tx;
1029}
1030
1031static inline void put_txreq(struct ipath_ibdev *dev,
1032 struct ipath_verbs_txreq *tx)
1033{
1034 unsigned long flags;
1035
1036 spin_lock_irqsave(&dev->pending_lock, flags);
1037 list_add(&tx->txreq.list, &dev->txreq_free);
1038 spin_unlock_irqrestore(&dev->pending_lock, flags);
1039}
1040
1041static void sdma_complete(void *cookie, int status)
1042{
1043 struct ipath_verbs_txreq *tx = cookie;
1044 struct ipath_qp *qp = tx->qp;
1045 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
1046 unsigned long flags;
1047 enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ?
1048 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR;
1049
1050 if (atomic_dec_and_test(&qp->s_dma_busy)) {
1051 spin_lock_irqsave(&qp->s_lock, flags);
1052 if (tx->wqe)
1053 ipath_send_complete(qp, tx->wqe, ibs);
1054 if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND &&
1055 qp->s_last != qp->s_head) ||
1056 (qp->s_flags & IPATH_S_WAIT_DMA))
1057 ipath_schedule_send(qp);
1058 spin_unlock_irqrestore(&qp->s_lock, flags);
1059 wake_up(&qp->wait_dma);
1060 } else if (tx->wqe) {
1061 spin_lock_irqsave(&qp->s_lock, flags);
1062 ipath_send_complete(qp, tx->wqe, ibs);
1063 spin_unlock_irqrestore(&qp->s_lock, flags);
1064 }
1065
1066 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
1067 kfree(tx->txreq.map_addr);
1068 put_txreq(dev, tx);
1069
1070 if (atomic_dec_and_test(&qp->refcount))
1071 wake_up(&qp->wait);
1072}
1073
1074static void decrement_dma_busy(struct ipath_qp *qp)
1075{
1076 unsigned long flags;
1077
1078 if (atomic_dec_and_test(&qp->s_dma_busy)) {
1079 spin_lock_irqsave(&qp->s_lock, flags);
1080 if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND &&
1081 qp->s_last != qp->s_head) ||
1082 (qp->s_flags & IPATH_S_WAIT_DMA))
1083 ipath_schedule_send(qp);
1084 spin_unlock_irqrestore(&qp->s_lock, flags);
1085 wake_up(&qp->wait_dma);
1086 }
1087}
1088
1089/*
1090 * Compute the number of clock cycles of delay before sending the next packet.
1091 * The multipliers reflect the number of clocks for the fastest rate so
1092 * one tick at 4xDDR is 8 ticks at 1xSDR.
1093 * If the destination port will take longer to receive a packet than
1094 * the outgoing link can send it, we need to delay sending the next packet
1095 * by the difference in time it takes the receiver to receive and the sender
1096 * to send this packet.
1097 * Note that this delay is always correct for UC and RC but not always
1098 * optimal for UD. For UD, the destination HCA can be different for each
1099 * packet, in which case, we could send packets to a different destination
1100 * while "waiting" for the delay. The overhead for doing this without
1101 * HW support is more than just paying the cost of delaying some packets
1102 * unnecessarily.
1103 */
1104static inline unsigned ipath_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult)
1105{
1106 return (rcv_mult > snd_mult) ?
1107 (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
1108}
1109
1110static int ipath_verbs_send_dma(struct ipath_qp *qp,
1111 struct ipath_ib_header *hdr, u32 hdrwords,
1112 struct ipath_sge_state *ss, u32 len,
1113 u32 plen, u32 dwords)
1114{
1115 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
1116 struct ipath_devdata *dd = dev->dd;
1117 struct ipath_verbs_txreq *tx;
1118 u32 *piobuf;
1119 u32 control;
1120 u32 ndesc;
1121 int ret;
1122
1123 tx = qp->s_tx;
1124 if (tx) {
1125 qp->s_tx = NULL;
1126 /* resend previously constructed packet */
1127 atomic_inc(&qp->s_dma_busy);
1128 ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx);
1129 if (ret) {
1130 qp->s_tx = tx;
1131 decrement_dma_busy(qp);
1132 }
1133 goto bail;
1134 }
1135
1136 tx = get_txreq(dev);
1137 if (!tx) {
1138 ret = -EBUSY;
1139 goto bail;
1140 }
1141
1142 /*
1143 * Get the saved delay count we computed for the previous packet
1144 * and save the delay count for this packet to be used next time
1145 * we get here.
1146 */
1147 control = qp->s_pkt_delay;
1148 qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
1149
1150 tx->qp = qp;
1151 atomic_inc(&qp->refcount);
1152 tx->wqe = qp->s_wqe;
1153 tx->txreq.callback = sdma_complete;
1154 tx->txreq.callback_cookie = tx;
1155 tx->txreq.flags = IPATH_SDMA_TXREQ_F_HEADTOHOST |
1156 IPATH_SDMA_TXREQ_F_INTREQ | IPATH_SDMA_TXREQ_F_FREEDESC;
1157 if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
1158 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_USELARGEBUF;
1159
1160 /* VL15 packets bypass credit check */
1161 if ((be16_to_cpu(hdr->lrh[0]) >> 12) == 15) {
1162 control |= 1ULL << 31;
1163 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_VL15;
1164 }
1165
1166 if (len) {
1167 /*
1168 * Don't try to DMA if it takes more descriptors than
1169 * the queue holds.
1170 */
1171 ndesc = ipath_count_sge(ss, len);
1172 if (ndesc >= dd->ipath_sdma_descq_cnt)
1173 ndesc = 0;
1174 } else
1175 ndesc = 1;
1176 if (ndesc) {
1177 tx->hdr.pbc[0] = cpu_to_le32(plen);
1178 tx->hdr.pbc[1] = cpu_to_le32(control);
1179 memcpy(&tx->hdr.hdr, hdr, hdrwords << 2);
1180 tx->txreq.sg_count = ndesc;
1181 tx->map_len = (hdrwords + 2) << 2;
1182 tx->txreq.map_addr = &tx->hdr;
1183 atomic_inc(&qp->s_dma_busy);
1184 ret = ipath_sdma_verbs_send(dd, ss, dwords, tx);
1185 if (ret) {
1186 /* save ss and length in dwords */
1187 tx->ss = ss;
1188 tx->len = dwords;
1189 qp->s_tx = tx;
1190 decrement_dma_busy(qp);
1191 }
1192 goto bail;
1193 }
1194
1195 /* Allocate a buffer and copy the header and payload to it. */
1196 tx->map_len = (plen + 1) << 2;
1197 piobuf = kmalloc(tx->map_len, GFP_ATOMIC);
1198 if (unlikely(piobuf == NULL)) {
1199 ret = -EBUSY;
1200 goto err_tx;
1201 }
1202 tx->txreq.map_addr = piobuf;
1203 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_FREEBUF;
1204 tx->txreq.sg_count = 1;
1205
1206 *piobuf++ = (__force u32) cpu_to_le32(plen);
1207 *piobuf++ = (__force u32) cpu_to_le32(control);
1208 memcpy(piobuf, hdr, hdrwords << 2);
1209 ipath_copy_from_sge(piobuf + hdrwords, ss, len);
1210
1211 atomic_inc(&qp->s_dma_busy);
1212 ret = ipath_sdma_verbs_send(dd, NULL, 0, tx);
1213 /*
1214 * If we couldn't queue the DMA request, save the info
1215 * and try again later rather than destroying the
1216 * buffer and undoing the side effects of the copy.
1217 */
1218 if (ret) {
1219 tx->ss = NULL;
1220 tx->len = 0;
1221 qp->s_tx = tx;
1222 decrement_dma_busy(qp);
1223 }
1224 dev->n_unaligned++;
1225 goto bail;
1226
1227err_tx:
1228 if (atomic_dec_and_test(&qp->refcount))
1229 wake_up(&qp->wait);
1230 put_txreq(dev, tx);
1231bail:
1232 return ret;
1233}
1234
1235static int ipath_verbs_send_pio(struct ipath_qp *qp,
1236 struct ipath_ib_header *ibhdr, u32 hdrwords,
1237 struct ipath_sge_state *ss, u32 len,
1238 u32 plen, u32 dwords)
1239{
1240 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
1241 u32 *hdr = (u32 *) ibhdr;
1242 u32 __iomem *piobuf;
1243 unsigned flush_wc;
1244 u32 control;
1245 int ret;
1246 unsigned long flags;
1247
1248 piobuf = ipath_getpiobuf(dd, plen, NULL);
1249 if (unlikely(piobuf == NULL)) {
1250 ret = -EBUSY;
1251 goto bail;
1252 }
1253
1254 /*
1255 * Get the saved delay count we computed for the previous packet
1256 * and save the delay count for this packet to be used next time
1257 * we get here.
1258 */
1259 control = qp->s_pkt_delay;
1260 qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
1261
1262 /* VL15 packets bypass credit check */
1263 if ((be16_to_cpu(ibhdr->lrh[0]) >> 12) == 15)
1264 control |= 1ULL << 31;
1265
1266 /*
1267 * Write the length to the control qword plus any needed flags.
1268 * We have to flush after the PBC for correctness on some cpus
1269 * or WC buffer can be written out of order.
1270 */
1271 writeq(((u64) control << 32) | plen, piobuf);
1272 piobuf += 2;
1273
1274 flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC;
1275 if (len == 0) {
1276 /*
1277 * If there is just the header portion, must flush before
1278 * writing last word of header for correctness, and after
1279 * the last header word (trigger word).
1280 */
1281 if (flush_wc) {
1282 ipath_flush_wc();
1283 __iowrite32_copy(piobuf, hdr, hdrwords - 1);
1284 ipath_flush_wc();
1285 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1286 ipath_flush_wc();
1287 } else
1288 __iowrite32_copy(piobuf, hdr, hdrwords);
1289 goto done;
1290 }
1291
1292 if (flush_wc)
1293 ipath_flush_wc();
1294 __iowrite32_copy(piobuf, hdr, hdrwords);
1295 piobuf += hdrwords;
1296
1297 /* The common case is aligned and contained in one segment. */
1298 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
1299 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
1300 u32 *addr = (u32 *) ss->sge.vaddr;
1301
1302 /* Update address before sending packet. */
1303 update_sge(ss, len);
1304 if (flush_wc) {
1305 __iowrite32_copy(piobuf, addr, dwords - 1);
1306 /* must flush early everything before trigger word */
1307 ipath_flush_wc();
1308 __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1309 /* be sure trigger word is written */
1310 ipath_flush_wc();
1311 } else
1312 __iowrite32_copy(piobuf, addr, dwords);
1313 goto done;
1314 }
1315 copy_io(piobuf, ss, len, flush_wc);
1316done:
1317 if (qp->s_wqe) {
1318 spin_lock_irqsave(&qp->s_lock, flags);
1319 ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1320 spin_unlock_irqrestore(&qp->s_lock, flags);
1321 }
1322 ret = 0;
1323bail:
1324 return ret;
1325}
1326
1327/**
1328 * ipath_verbs_send - send a packet
1329 * @qp: the QP to send on
1330 * @hdr: the packet header
1331 * @hdrwords: the number of 32-bit words in the header
1332 * @ss: the SGE to send
1333 * @len: the length of the packet in bytes
1334 */
1335int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
1336 u32 hdrwords, struct ipath_sge_state *ss, u32 len)
1337{
1338 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
1339 u32 plen;
1340 int ret;
1341 u32 dwords = (len + 3) >> 2;
1342
1343 /*
1344 * Calculate the send buffer trigger address.
1345 * The +1 counts for the pbc control dword following the pbc length.
1346 */
1347 plen = hdrwords + dwords + 1;
1348
1349 /*
1350 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1351 * can defer SDMA restart until link goes ACTIVE without
1352 * worrying about just how we got there.
1353 */
1354 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1355 !(dd->ipath_flags & IPATH_HAS_SEND_DMA))
1356 ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1357 plen, dwords);
1358 else
1359 ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1360 plen, dwords);
1361
1362 return ret;
1363}
1364
1365int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
1366 u64 *rwords, u64 *spkts, u64 *rpkts,
1367 u64 *xmit_wait)
1368{
1369 int ret;
1370
1371 if (!(dd->ipath_flags & IPATH_INITTED)) {
1372 /* no hardware, freeze, etc. */
1373 ret = -EINVAL;
1374 goto bail;
1375 }
1376 *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
1377 *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
1378 *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
1379 *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
1380 *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
1381
1382 ret = 0;
1383
1384bail:
1385 return ret;
1386}
1387
1388/**
1389 * ipath_get_counters - get various chip counters
1390 * @dd: the infinipath device
1391 * @cntrs: counters are placed here
1392 *
1393 * Return the counters needed by recv_pma_get_portcounters().
1394 */
1395int ipath_get_counters(struct ipath_devdata *dd,
1396 struct ipath_verbs_counters *cntrs)
1397{
1398 struct ipath_cregs const *crp = dd->ipath_cregs;
1399 int ret;
1400
1401 if (!(dd->ipath_flags & IPATH_INITTED)) {
1402 /* no hardware, freeze, etc. */
1403 ret = -EINVAL;
1404 goto bail;
1405 }
1406 cntrs->symbol_error_counter =
1407 ipath_snap_cntr(dd, crp->cr_ibsymbolerrcnt);
1408 cntrs->link_error_recovery_counter =
1409 ipath_snap_cntr(dd, crp->cr_iblinkerrrecovcnt);
1410 /*
1411 * The link downed counter counts when the other side downs the
1412 * connection. We add in the number of times we downed the link
1413 * due to local link integrity errors to compensate.
1414 */
1415 cntrs->link_downed_counter =
1416 ipath_snap_cntr(dd, crp->cr_iblinkdowncnt);
1417 cntrs->port_rcv_errors =
1418 ipath_snap_cntr(dd, crp->cr_rxdroppktcnt) +
1419 ipath_snap_cntr(dd, crp->cr_rcvovflcnt) +
1420 ipath_snap_cntr(dd, crp->cr_portovflcnt) +
1421 ipath_snap_cntr(dd, crp->cr_err_rlencnt) +
1422 ipath_snap_cntr(dd, crp->cr_invalidrlencnt) +
1423 ipath_snap_cntr(dd, crp->cr_errlinkcnt) +
1424 ipath_snap_cntr(dd, crp->cr_erricrccnt) +
1425 ipath_snap_cntr(dd, crp->cr_errvcrccnt) +
1426 ipath_snap_cntr(dd, crp->cr_errlpcrccnt) +
1427 ipath_snap_cntr(dd, crp->cr_badformatcnt) +
1428 dd->ipath_rxfc_unsupvl_errs;
1429 if (crp->cr_rxotherlocalphyerrcnt)
1430 cntrs->port_rcv_errors +=
1431 ipath_snap_cntr(dd, crp->cr_rxotherlocalphyerrcnt);
1432 if (crp->cr_rxvlerrcnt)
1433 cntrs->port_rcv_errors +=
1434 ipath_snap_cntr(dd, crp->cr_rxvlerrcnt);
1435 cntrs->port_rcv_remphys_errors =
1436 ipath_snap_cntr(dd, crp->cr_rcvebpcnt);
1437 cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt);
1438 cntrs->port_xmit_data = ipath_snap_cntr(dd, crp->cr_wordsendcnt);
1439 cntrs->port_rcv_data = ipath_snap_cntr(dd, crp->cr_wordrcvcnt);
1440 cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt);
1441 cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt);
1442 cntrs->local_link_integrity_errors =
1443 crp->cr_locallinkintegrityerrcnt ?
1444 ipath_snap_cntr(dd, crp->cr_locallinkintegrityerrcnt) :
1445 ((dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
1446 dd->ipath_lli_errs : dd->ipath_lli_errors);
1447 cntrs->excessive_buffer_overrun_errors =
1448 crp->cr_excessbufferovflcnt ?
1449 ipath_snap_cntr(dd, crp->cr_excessbufferovflcnt) :
1450 dd->ipath_overrun_thresh_errs;
1451 cntrs->vl15_dropped = crp->cr_vl15droppedpktcnt ?
1452 ipath_snap_cntr(dd, crp->cr_vl15droppedpktcnt) : 0;
1453
1454 ret = 0;
1455
1456bail:
1457 return ret;
1458}
1459
1460/**
1461 * ipath_ib_piobufavail - callback when a PIO buffer is available
1462 * @arg: the device pointer
1463 *
1464 * This is called from ipath_intr() at interrupt level when a PIO buffer is
1465 * available after ipath_verbs_send() returned an error that no buffers were
1466 * available. Return 1 if we consumed all the PIO buffers and we still have
1467 * QPs waiting for buffers (for now, just restart the send tasklet and
1468 * return zero).
1469 */
1470int ipath_ib_piobufavail(struct ipath_ibdev *dev)
1471{
1472 struct list_head *list;
1473 struct ipath_qp *qplist;
1474 struct ipath_qp *qp;
1475 unsigned long flags;
1476
1477 if (dev == NULL)
1478 goto bail;
1479
1480 list = &dev->piowait;
1481 qplist = NULL;
1482
1483 spin_lock_irqsave(&dev->pending_lock, flags);
1484 while (!list_empty(list)) {
1485 qp = list_entry(list->next, struct ipath_qp, piowait);
1486 list_del_init(&qp->piowait);
1487 qp->pio_next = qplist;
1488 qplist = qp;
1489 atomic_inc(&qp->refcount);
1490 }
1491 spin_unlock_irqrestore(&dev->pending_lock, flags);
1492
1493 while (qplist != NULL) {
1494 qp = qplist;
1495 qplist = qp->pio_next;
1496
1497 spin_lock_irqsave(&qp->s_lock, flags);
1498 if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)
1499 ipath_schedule_send(qp);
1500 spin_unlock_irqrestore(&qp->s_lock, flags);
1501
1502 /* Notify ipath_destroy_qp() if it is waiting. */
1503 if (atomic_dec_and_test(&qp->refcount))
1504 wake_up(&qp->wait);
1505 }
1506
1507bail:
1508 return 0;
1509}
1510
1511static int ipath_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
1512 struct ib_udata *uhw)
1513{
1514 struct ipath_ibdev *dev = to_idev(ibdev);
1515
1516 if (uhw->inlen || uhw->outlen)
1517 return -EINVAL;
1518
1519 memset(props, 0, sizeof(*props));
1520
1521 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1522 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1523 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1524 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1525 props->page_size_cap = PAGE_SIZE;
1526 props->vendor_id =
1527 IPATH_SRC_OUI_1 << 16 | IPATH_SRC_OUI_2 << 8 | IPATH_SRC_OUI_3;
1528 props->vendor_part_id = dev->dd->ipath_deviceid;
1529 props->hw_ver = dev->dd->ipath_pcirev;
1530
1531 props->sys_image_guid = dev->sys_image_guid;
1532
1533 props->max_mr_size = ~0ull;
1534 props->max_qp = ib_ipath_max_qps;
1535 props->max_qp_wr = ib_ipath_max_qp_wrs;
1536 props->max_sge = ib_ipath_max_sges;
1537 props->max_sge_rd = ib_ipath_max_sges;
1538 props->max_cq = ib_ipath_max_cqs;
1539 props->max_ah = ib_ipath_max_ahs;
1540 props->max_cqe = ib_ipath_max_cqes;
1541 props->max_mr = dev->lk_table.max;
1542 props->max_fmr = dev->lk_table.max;
1543 props->max_map_per_fmr = 32767;
1544 props->max_pd = ib_ipath_max_pds;
1545 props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC;
1546 props->max_qp_init_rd_atom = 255;
1547 /* props->max_res_rd_atom */
1548 props->max_srq = ib_ipath_max_srqs;
1549 props->max_srq_wr = ib_ipath_max_srq_wrs;
1550 props->max_srq_sge = ib_ipath_max_srq_sges;
1551 /* props->local_ca_ack_delay */
1552 props->atomic_cap = IB_ATOMIC_GLOB;
1553 props->max_pkeys = ipath_get_npkeys(dev->dd);
1554 props->max_mcast_grp = ib_ipath_max_mcast_grps;
1555 props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
1556 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1557 props->max_mcast_grp;
1558
1559 return 0;
1560}
1561
1562const u8 ipath_cvt_physportstate[32] = {
1563 [INFINIPATH_IBCS_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
1564 [INFINIPATH_IBCS_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
1565 [INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
1566 [INFINIPATH_IBCS_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
1567 [INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
1568 [INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
1569 [INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] =
1570 IB_PHYSPORTSTATE_CFG_TRAIN,
1571 [INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] =
1572 IB_PHYSPORTSTATE_CFG_TRAIN,
1573 [INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] =
1574 IB_PHYSPORTSTATE_CFG_TRAIN,
1575 [INFINIPATH_IBCS_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
1576 [INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] =
1577 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
1578 [INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] =
1579 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
1580 [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] =
1581 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
1582 [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
1583 [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
1584 [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
1585 [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
1586 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
1587 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
1588 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
1589 [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
1590};
1591
1592u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
1593{
1594 return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
1595}
1596
1597static int ipath_query_port(struct ib_device *ibdev,
1598 u8 port, struct ib_port_attr *props)
1599{
1600 struct ipath_ibdev *dev = to_idev(ibdev);
1601 struct ipath_devdata *dd = dev->dd;
1602 enum ib_mtu mtu;
1603 u16 lid = dd->ipath_lid;
1604 u64 ibcstat;
1605
1606 memset(props, 0, sizeof(*props));
1607 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1608 props->lmc = dd->ipath_lmc;
1609 props->sm_lid = dev->sm_lid;
1610 props->sm_sl = dev->sm_sl;
1611 ibcstat = dd->ipath_lastibcstat;
1612 /* map LinkState to IB portinfo values. */
1613 props->state = ipath_ib_linkstate(dd, ibcstat) + 1;
1614
1615 /* See phys_state_show() */
1616 props->phys_state = /* MEA: assumes shift == 0 */
1617 ipath_cvt_physportstate[dd->ipath_lastibcstat &
1618 dd->ibcs_lts_mask];
1619 props->port_cap_flags = dev->port_cap_flags;
1620 props->gid_tbl_len = 1;
1621 props->max_msg_sz = 0x80000000;
1622 props->pkey_tbl_len = ipath_get_npkeys(dd);
1623 props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) -
1624 dev->z_pkey_violations;
1625 props->qkey_viol_cntr = dev->qkey_violations;
1626 props->active_width = dd->ipath_link_width_active;
1627 /* See rate_show() */
1628 props->active_speed = dd->ipath_link_speed_active;
1629 props->max_vl_num = 1; /* VLCap = VL0 */
1630 props->init_type_reply = 0;
1631
1632 props->max_mtu = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
1633 switch (dd->ipath_ibmtu) {
1634 case 4096:
1635 mtu = IB_MTU_4096;
1636 break;
1637 case 2048:
1638 mtu = IB_MTU_2048;
1639 break;
1640 case 1024:
1641 mtu = IB_MTU_1024;
1642 break;
1643 case 512:
1644 mtu = IB_MTU_512;
1645 break;
1646 case 256:
1647 mtu = IB_MTU_256;
1648 break;
1649 default:
1650 mtu = IB_MTU_2048;
1651 }
1652 props->active_mtu = mtu;
1653 props->subnet_timeout = dev->subnet_timeout;
1654
1655 return 0;
1656}
1657
1658static int ipath_modify_device(struct ib_device *device,
1659 int device_modify_mask,
1660 struct ib_device_modify *device_modify)
1661{
1662 int ret;
1663
1664 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1665 IB_DEVICE_MODIFY_NODE_DESC)) {
1666 ret = -EOPNOTSUPP;
1667 goto bail;
1668 }
1669
1670 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC)
1671 memcpy(device->node_desc, device_modify->node_desc, 64);
1672
1673 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
1674 to_idev(device)->sys_image_guid =
1675 cpu_to_be64(device_modify->sys_image_guid);
1676
1677 ret = 0;
1678
1679bail:
1680 return ret;
1681}
1682
1683static int ipath_modify_port(struct ib_device *ibdev,
1684 u8 port, int port_modify_mask,
1685 struct ib_port_modify *props)
1686{
1687 struct ipath_ibdev *dev = to_idev(ibdev);
1688
1689 dev->port_cap_flags |= props->set_port_cap_mask;
1690 dev->port_cap_flags &= ~props->clr_port_cap_mask;
1691 if (port_modify_mask & IB_PORT_SHUTDOWN)
1692 ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
1693 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
1694 dev->qkey_violations = 0;
1695 return 0;
1696}
1697
1698static int ipath_query_gid(struct ib_device *ibdev, u8 port,
1699 int index, union ib_gid *gid)
1700{
1701 struct ipath_ibdev *dev = to_idev(ibdev);
1702 int ret;
1703
1704 if (index >= 1) {
1705 ret = -EINVAL;
1706 goto bail;
1707 }
1708 gid->global.subnet_prefix = dev->gid_prefix;
1709 gid->global.interface_id = dev->dd->ipath_guid;
1710
1711 ret = 0;
1712
1713bail:
1714 return ret;
1715}
1716
1717static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev,
1718 struct ib_ucontext *context,
1719 struct ib_udata *udata)
1720{
1721 struct ipath_ibdev *dev = to_idev(ibdev);
1722 struct ipath_pd *pd;
1723 struct ib_pd *ret;
1724
1725 /*
1726 * This is actually totally arbitrary. Some correctness tests
1727 * assume there's a maximum number of PDs that can be allocated.
1728 * We don't actually have this limit, but we fail the test if
1729 * we allow allocations of more than we report for this value.
1730 */
1731
1732 pd = kmalloc(sizeof *pd, GFP_KERNEL);
1733 if (!pd) {
1734 ret = ERR_PTR(-ENOMEM);
1735 goto bail;
1736 }
1737
1738 spin_lock(&dev->n_pds_lock);
1739 if (dev->n_pds_allocated == ib_ipath_max_pds) {
1740 spin_unlock(&dev->n_pds_lock);
1741 kfree(pd);
1742 ret = ERR_PTR(-ENOMEM);
1743 goto bail;
1744 }
1745
1746 dev->n_pds_allocated++;
1747 spin_unlock(&dev->n_pds_lock);
1748
1749 /* ib_alloc_pd() will initialize pd->ibpd. */
1750 pd->user = udata != NULL;
1751
1752 ret = &pd->ibpd;
1753
1754bail:
1755 return ret;
1756}
1757
1758static int ipath_dealloc_pd(struct ib_pd *ibpd)
1759{
1760 struct ipath_pd *pd = to_ipd(ibpd);
1761 struct ipath_ibdev *dev = to_idev(ibpd->device);
1762
1763 spin_lock(&dev->n_pds_lock);
1764 dev->n_pds_allocated--;
1765 spin_unlock(&dev->n_pds_lock);
1766
1767 kfree(pd);
1768
1769 return 0;
1770}
1771
1772/**
1773 * ipath_create_ah - create an address handle
1774 * @pd: the protection domain
1775 * @ah_attr: the attributes of the AH
1776 *
1777 * This may be called from interrupt context.
1778 */
1779static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
1780 struct ib_ah_attr *ah_attr)
1781{
1782 struct ipath_ah *ah;
1783 struct ib_ah *ret;
1784 struct ipath_ibdev *dev = to_idev(pd->device);
1785 unsigned long flags;
1786
1787 /* A multicast address requires a GRH (see ch. 8.4.1). */
1788 if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
1789 ah_attr->dlid != IPATH_PERMISSIVE_LID &&
1790 !(ah_attr->ah_flags & IB_AH_GRH)) {
1791 ret = ERR_PTR(-EINVAL);
1792 goto bail;
1793 }
1794
1795 if (ah_attr->dlid == 0) {
1796 ret = ERR_PTR(-EINVAL);
1797 goto bail;
1798 }
1799
1800 if (ah_attr->port_num < 1 ||
1801 ah_attr->port_num > pd->device->phys_port_cnt) {
1802 ret = ERR_PTR(-EINVAL);
1803 goto bail;
1804 }
1805
1806 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
1807 if (!ah) {
1808 ret = ERR_PTR(-ENOMEM);
1809 goto bail;
1810 }
1811
1812 spin_lock_irqsave(&dev->n_ahs_lock, flags);
1813 if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
1814 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1815 kfree(ah);
1816 ret = ERR_PTR(-ENOMEM);
1817 goto bail;
1818 }
1819
1820 dev->n_ahs_allocated++;
1821 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1822
1823 /* ib_create_ah() will initialize ah->ibah. */
1824 ah->attr = *ah_attr;
1825 ah->attr.static_rate = ipath_ib_rate_to_mult(ah_attr->static_rate);
1826
1827 ret = &ah->ibah;
1828
1829bail:
1830 return ret;
1831}
1832
1833/**
1834 * ipath_destroy_ah - destroy an address handle
1835 * @ibah: the AH to destroy
1836 *
1837 * This may be called from interrupt context.
1838 */
1839static int ipath_destroy_ah(struct ib_ah *ibah)
1840{
1841 struct ipath_ibdev *dev = to_idev(ibah->device);
1842 struct ipath_ah *ah = to_iah(ibah);
1843 unsigned long flags;
1844
1845 spin_lock_irqsave(&dev->n_ahs_lock, flags);
1846 dev->n_ahs_allocated--;
1847 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1848
1849 kfree(ah);
1850
1851 return 0;
1852}
1853
1854static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1855{
1856 struct ipath_ah *ah = to_iah(ibah);
1857
1858 *ah_attr = ah->attr;
1859 ah_attr->static_rate = ipath_mult_to_ib_rate(ah->attr.static_rate);
1860
1861 return 0;
1862}
1863
1864/**
1865 * ipath_get_npkeys - return the size of the PKEY table for port 0
1866 * @dd: the infinipath device
1867 */
1868unsigned ipath_get_npkeys(struct ipath_devdata *dd)
1869{
1870 return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1871}
1872
1873/**
1874 * ipath_get_pkey - return the indexed PKEY from the port PKEY table
1875 * @dd: the infinipath device
1876 * @index: the PKEY index
1877 */
1878unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index)
1879{
1880 unsigned ret;
1881
1882 /* always a kernel port, no locking needed */
1883 if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1884 ret = 0;
1885 else
1886 ret = dd->ipath_pd[0]->port_pkeys[index];
1887
1888 return ret;
1889}
1890
1891static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1892 u16 *pkey)
1893{
1894 struct ipath_ibdev *dev = to_idev(ibdev);
1895 int ret;
1896
1897 if (index >= ipath_get_npkeys(dev->dd)) {
1898 ret = -EINVAL;
1899 goto bail;
1900 }
1901
1902 *pkey = ipath_get_pkey(dev->dd, index);
1903 ret = 0;
1904
1905bail:
1906 return ret;
1907}
1908
1909/**
1910 * ipath_alloc_ucontext - allocate a ucontest
1911 * @ibdev: the infiniband device
1912 * @udata: not used by the InfiniPath driver
1913 */
1914
1915static struct ib_ucontext *ipath_alloc_ucontext(struct ib_device *ibdev,
1916 struct ib_udata *udata)
1917{
1918 struct ipath_ucontext *context;
1919 struct ib_ucontext *ret;
1920
1921 context = kmalloc(sizeof *context, GFP_KERNEL);
1922 if (!context) {
1923 ret = ERR_PTR(-ENOMEM);
1924 goto bail;
1925 }
1926
1927 ret = &context->ibucontext;
1928
1929bail:
1930 return ret;
1931}
1932
1933static int ipath_dealloc_ucontext(struct ib_ucontext *context)
1934{
1935 kfree(to_iucontext(context));
1936 return 0;
1937}
1938
1939static int ipath_verbs_register_sysfs(struct ib_device *dev);
1940
1941static void __verbs_timer(unsigned long arg)
1942{
1943 struct ipath_devdata *dd = (struct ipath_devdata *) arg;
1944
1945 /* Handle verbs layer timeouts. */
1946 ipath_ib_timer(dd->verbs_dev);
1947
1948 mod_timer(&dd->verbs_timer, jiffies + 1);
1949}
1950
1951static int enable_timer(struct ipath_devdata *dd)
1952{
1953 /*
1954 * Early chips had a design flaw where the chip and kernel idea
1955 * of the tail register don't always agree, and therefore we won't
1956 * get an interrupt on the next packet received.
1957 * If the board supports per packet receive interrupts, use it.
1958 * Otherwise, the timer function periodically checks for packets
1959 * to cover this case.
1960 * Either way, the timer is needed for verbs layer related
1961 * processing.
1962 */
1963 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1964 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1965 0x2074076542310ULL);
1966 /* Enable GPIO bit 2 interrupt */
1967 dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT);
1968 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1969 dd->ipath_gpio_mask);
1970 }
1971
1972 setup_timer(&dd->verbs_timer, __verbs_timer, (unsigned long)dd);
1973
1974 dd->verbs_timer.expires = jiffies + 1;
1975 add_timer(&dd->verbs_timer);
1976
1977 return 0;
1978}
1979
1980static int disable_timer(struct ipath_devdata *dd)
1981{
1982 /* Disable GPIO bit 2 interrupt */
1983 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1984 /* Disable GPIO bit 2 interrupt */
1985 dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT));
1986 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1987 dd->ipath_gpio_mask);
1988 /*
1989 * We might want to undo changes to debugportselect,
1990 * but how?
1991 */
1992 }
1993
1994 del_timer_sync(&dd->verbs_timer);
1995
1996 return 0;
1997}
1998
1999static int ipath_port_immutable(struct ib_device *ibdev, u8 port_num,
2000 struct ib_port_immutable *immutable)
2001{
2002 struct ib_port_attr attr;
2003 int err;
2004
2005 err = ipath_query_port(ibdev, port_num, &attr);
2006 if (err)
2007 return err;
2008
2009 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2010 immutable->gid_tbl_len = attr.gid_tbl_len;
2011 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
2012 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2013
2014 return 0;
2015}
2016
2017/**
2018 * ipath_register_ib_device - register our device with the infiniband core
2019 * @dd: the device data structure
2020 * Return the allocated ipath_ibdev pointer or NULL on error.
2021 */
2022int ipath_register_ib_device(struct ipath_devdata *dd)
2023{
2024 struct ipath_verbs_counters cntrs;
2025 struct ipath_ibdev *idev;
2026 struct ib_device *dev;
2027 struct ipath_verbs_txreq *tx;
2028 unsigned i;
2029 int ret;
2030
2031 idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
2032 if (idev == NULL) {
2033 ret = -ENOMEM;
2034 goto bail;
2035 }
2036
2037 dev = &idev->ibdev;
2038
2039 if (dd->ipath_sdma_descq_cnt) {
2040 tx = kmalloc_array(dd->ipath_sdma_descq_cnt, sizeof *tx,
2041 GFP_KERNEL);
2042 if (tx == NULL) {
2043 ret = -ENOMEM;
2044 goto err_tx;
2045 }
2046 } else
2047 tx = NULL;
2048 idev->txreq_bufs = tx;
2049
2050 /* Only need to initialize non-zero fields. */
2051 spin_lock_init(&idev->n_pds_lock);
2052 spin_lock_init(&idev->n_ahs_lock);
2053 spin_lock_init(&idev->n_cqs_lock);
2054 spin_lock_init(&idev->n_qps_lock);
2055 spin_lock_init(&idev->n_srqs_lock);
2056 spin_lock_init(&idev->n_mcast_grps_lock);
2057
2058 spin_lock_init(&idev->qp_table.lock);
2059 spin_lock_init(&idev->lk_table.lock);
2060 idev->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
2061 /* Set the prefix to the default value (see ch. 4.1.1) */
2062 idev->gid_prefix = cpu_to_be64(0xfe80000000000000ULL);
2063
2064 ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
2065 if (ret)
2066 goto err_qp;
2067
2068 /*
2069 * The top ib_ipath_lkey_table_size bits are used to index the
2070 * table. The lower 8 bits can be owned by the user (copied from
2071 * the LKEY). The remaining bits act as a generation number or tag.
2072 */
2073 idev->lk_table.max = 1 << ib_ipath_lkey_table_size;
2074 idev->lk_table.table = kcalloc(idev->lk_table.max,
2075 sizeof(*idev->lk_table.table),
2076 GFP_KERNEL);
2077 if (idev->lk_table.table == NULL) {
2078 ret = -ENOMEM;
2079 goto err_lk;
2080 }
2081 INIT_LIST_HEAD(&idev->pending_mmaps);
2082 spin_lock_init(&idev->pending_lock);
2083 idev->mmap_offset = PAGE_SIZE;
2084 spin_lock_init(&idev->mmap_offset_lock);
2085 INIT_LIST_HEAD(&idev->pending[0]);
2086 INIT_LIST_HEAD(&idev->pending[1]);
2087 INIT_LIST_HEAD(&idev->pending[2]);
2088 INIT_LIST_HEAD(&idev->piowait);
2089 INIT_LIST_HEAD(&idev->rnrwait);
2090 INIT_LIST_HEAD(&idev->txreq_free);
2091 idev->pending_index = 0;
2092 idev->port_cap_flags =
2093 IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
2094 if (dd->ipath_flags & IPATH_HAS_LINK_LATENCY)
2095 idev->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
2096 idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
2097 idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
2098 idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
2099 idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
2100 idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
2101
2102 /* Snapshot current HW counters to "clear" them. */
2103 ipath_get_counters(dd, &cntrs);
2104 idev->z_symbol_error_counter = cntrs.symbol_error_counter;
2105 idev->z_link_error_recovery_counter =
2106 cntrs.link_error_recovery_counter;
2107 idev->z_link_downed_counter = cntrs.link_downed_counter;
2108 idev->z_port_rcv_errors = cntrs.port_rcv_errors;
2109 idev->z_port_rcv_remphys_errors =
2110 cntrs.port_rcv_remphys_errors;
2111 idev->z_port_xmit_discards = cntrs.port_xmit_discards;
2112 idev->z_port_xmit_data = cntrs.port_xmit_data;
2113 idev->z_port_rcv_data = cntrs.port_rcv_data;
2114 idev->z_port_xmit_packets = cntrs.port_xmit_packets;
2115 idev->z_port_rcv_packets = cntrs.port_rcv_packets;
2116 idev->z_local_link_integrity_errors =
2117 cntrs.local_link_integrity_errors;
2118 idev->z_excessive_buffer_overrun_errors =
2119 cntrs.excessive_buffer_overrun_errors;
2120 idev->z_vl15_dropped = cntrs.vl15_dropped;
2121
2122 for (i = 0; i < dd->ipath_sdma_descq_cnt; i++, tx++)
2123 list_add(&tx->txreq.list, &idev->txreq_free);
2124
2125 /*
2126 * The system image GUID is supposed to be the same for all
2127 * IB HCAs in a single system but since there can be other
2128 * device types in the system, we can't be sure this is unique.
2129 */
2130 if (!sys_image_guid)
2131 sys_image_guid = dd->ipath_guid;
2132 idev->sys_image_guid = sys_image_guid;
2133 idev->ib_unit = dd->ipath_unit;
2134 idev->dd = dd;
2135
2136 strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
2137 dev->owner = THIS_MODULE;
2138 dev->node_guid = dd->ipath_guid;
2139 dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
2140 dev->uverbs_cmd_mask =
2141 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2142 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2143 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2144 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2145 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2146 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
2147 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
2148 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
2149 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2150 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2151 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2152 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2153 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
2154 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2155 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
2156 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2157 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2158 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2159 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2160 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2161 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
2162 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2163 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2164 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2165 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2166 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
2167 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
2168 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
2169 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
2170 dev->node_type = RDMA_NODE_IB_CA;
2171 dev->phys_port_cnt = 1;
2172 dev->num_comp_vectors = 1;
2173 dev->dma_device = &dd->pcidev->dev;
2174 dev->query_device = ipath_query_device;
2175 dev->modify_device = ipath_modify_device;
2176 dev->query_port = ipath_query_port;
2177 dev->modify_port = ipath_modify_port;
2178 dev->query_pkey = ipath_query_pkey;
2179 dev->query_gid = ipath_query_gid;
2180 dev->alloc_ucontext = ipath_alloc_ucontext;
2181 dev->dealloc_ucontext = ipath_dealloc_ucontext;
2182 dev->alloc_pd = ipath_alloc_pd;
2183 dev->dealloc_pd = ipath_dealloc_pd;
2184 dev->create_ah = ipath_create_ah;
2185 dev->destroy_ah = ipath_destroy_ah;
2186 dev->query_ah = ipath_query_ah;
2187 dev->create_srq = ipath_create_srq;
2188 dev->modify_srq = ipath_modify_srq;
2189 dev->query_srq = ipath_query_srq;
2190 dev->destroy_srq = ipath_destroy_srq;
2191 dev->create_qp = ipath_create_qp;
2192 dev->modify_qp = ipath_modify_qp;
2193 dev->query_qp = ipath_query_qp;
2194 dev->destroy_qp = ipath_destroy_qp;
2195 dev->post_send = ipath_post_send;
2196 dev->post_recv = ipath_post_receive;
2197 dev->post_srq_recv = ipath_post_srq_receive;
2198 dev->create_cq = ipath_create_cq;
2199 dev->destroy_cq = ipath_destroy_cq;
2200 dev->resize_cq = ipath_resize_cq;
2201 dev->poll_cq = ipath_poll_cq;
2202 dev->req_notify_cq = ipath_req_notify_cq;
2203 dev->get_dma_mr = ipath_get_dma_mr;
2204 dev->reg_user_mr = ipath_reg_user_mr;
2205 dev->dereg_mr = ipath_dereg_mr;
2206 dev->alloc_fmr = ipath_alloc_fmr;
2207 dev->map_phys_fmr = ipath_map_phys_fmr;
2208 dev->unmap_fmr = ipath_unmap_fmr;
2209 dev->dealloc_fmr = ipath_dealloc_fmr;
2210 dev->attach_mcast = ipath_multicast_attach;
2211 dev->detach_mcast = ipath_multicast_detach;
2212 dev->process_mad = ipath_process_mad;
2213 dev->mmap = ipath_mmap;
2214 dev->dma_ops = &ipath_dma_mapping_ops;
2215 dev->get_port_immutable = ipath_port_immutable;
2216
2217 snprintf(dev->node_desc, sizeof(dev->node_desc),
2218 IPATH_IDSTR " %s", init_utsname()->nodename);
2219
2220 ret = ib_register_device(dev, NULL);
2221 if (ret)
2222 goto err_reg;
2223
2224 ret = ipath_verbs_register_sysfs(dev);
2225 if (ret)
2226 goto err_class;
2227
2228 enable_timer(dd);
2229
2230 goto bail;
2231
2232err_class:
2233 ib_unregister_device(dev);
2234err_reg:
2235 kfree(idev->lk_table.table);
2236err_lk:
2237 kfree(idev->qp_table.table);
2238err_qp:
2239 kfree(idev->txreq_bufs);
2240err_tx:
2241 ib_dealloc_device(dev);
2242 ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
2243 idev = NULL;
2244
2245bail:
2246 dd->verbs_dev = idev;
2247 return ret;
2248}
2249
2250void ipath_unregister_ib_device(struct ipath_ibdev *dev)
2251{
2252 struct ib_device *ibdev = &dev->ibdev;
2253 u32 qps_inuse;
2254
2255 ib_unregister_device(ibdev);
2256
2257 disable_timer(dev->dd);
2258
2259 if (!list_empty(&dev->pending[0]) ||
2260 !list_empty(&dev->pending[1]) ||
2261 !list_empty(&dev->pending[2]))
2262 ipath_dev_err(dev->dd, "pending list not empty!\n");
2263 if (!list_empty(&dev->piowait))
2264 ipath_dev_err(dev->dd, "piowait list not empty!\n");
2265 if (!list_empty(&dev->rnrwait))
2266 ipath_dev_err(dev->dd, "rnrwait list not empty!\n");
2267 if (!ipath_mcast_tree_empty())
2268 ipath_dev_err(dev->dd, "multicast table memory leak!\n");
2269 /*
2270 * Note that ipath_unregister_ib_device() can be called before all
2271 * the QPs are destroyed!
2272 */
2273 qps_inuse = ipath_free_all_qps(&dev->qp_table);
2274 if (qps_inuse)
2275 ipath_dev_err(dev->dd, "QP memory leak! %u still in use\n",
2276 qps_inuse);
2277 kfree(dev->qp_table.table);
2278 kfree(dev->lk_table.table);
2279 kfree(dev->txreq_bufs);
2280 ib_dealloc_device(ibdev);
2281}
2282
2283static ssize_t show_rev(struct device *device, struct device_attribute *attr,
2284 char *buf)
2285{
2286 struct ipath_ibdev *dev =
2287 container_of(device, struct ipath_ibdev, ibdev.dev);
2288
2289 return sprintf(buf, "%x\n", dev->dd->ipath_pcirev);
2290}
2291
2292static ssize_t show_hca(struct device *device, struct device_attribute *attr,
2293 char *buf)
2294{
2295 struct ipath_ibdev *dev =
2296 container_of(device, struct ipath_ibdev, ibdev.dev);
2297 int ret;
2298
2299 ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128);
2300 if (ret < 0)
2301 goto bail;
2302 strcat(buf, "\n");
2303 ret = strlen(buf);
2304
2305bail:
2306 return ret;
2307}
2308
2309static ssize_t show_stats(struct device *device, struct device_attribute *attr,
2310 char *buf)
2311{
2312 struct ipath_ibdev *dev =
2313 container_of(device, struct ipath_ibdev, ibdev.dev);
2314 int i;
2315 int len;
2316
2317 len = sprintf(buf,
2318 "RC resends %d\n"
2319 "RC no QACK %d\n"
2320 "RC ACKs %d\n"
2321 "RC SEQ NAKs %d\n"
2322 "RC RDMA seq %d\n"
2323 "RC RNR NAKs %d\n"
2324 "RC OTH NAKs %d\n"
2325 "RC timeouts %d\n"
2326 "RC RDMA dup %d\n"
2327 "piobuf wait %d\n"
2328 "unaligned %d\n"
2329 "PKT drops %d\n"
2330 "WQE errs %d\n",
2331 dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
2332 dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
2333 dev->n_other_naks, dev->n_timeouts,
2334 dev->n_rdma_dup_busy, dev->n_piowait, dev->n_unaligned,
2335 dev->n_pkt_drops, dev->n_wqe_errs);
2336 for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
2337 const struct ipath_opcode_stats *si = &dev->opstats[i];
2338
2339 if (!si->n_packets && !si->n_bytes)
2340 continue;
2341 len += sprintf(buf + len, "%02x %llu/%llu\n", i,
2342 (unsigned long long) si->n_packets,
2343 (unsigned long long) si->n_bytes);
2344 }
2345 return len;
2346}
2347
2348static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
2349static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
2350static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
2351static DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
2352
2353static struct device_attribute *ipath_class_attributes[] = {
2354 &dev_attr_hw_rev,
2355 &dev_attr_hca_type,
2356 &dev_attr_board_id,
2357 &dev_attr_stats
2358};
2359
2360static int ipath_verbs_register_sysfs(struct ib_device *dev)
2361{
2362 int i;
2363 int ret;
2364
2365 for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) {
2366 ret = device_create_file(&dev->dev,
2367 ipath_class_attributes[i]);
2368 if (ret)
2369 goto bail;
2370 }
2371 return 0;
2372bail:
2373 for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
2374 device_remove_file(&dev->dev, ipath_class_attributes[i]);
2375 return ret;
2376}
diff --git a/drivers/staging/rdma/ipath/ipath_verbs.h b/drivers/staging/rdma/ipath/ipath_verbs.h
deleted file mode 100644
index 6c70a89667a9..000000000000
--- a/drivers/staging/rdma/ipath/ipath_verbs.h
+++ /dev/null
@@ -1,941 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef IPATH_VERBS_H
35#define IPATH_VERBS_H
36
37#include <linux/types.h>
38#include <linux/spinlock.h>
39#include <linux/kernel.h>
40#include <linux/interrupt.h>
41#include <linux/kref.h>
42#include <rdma/ib_pack.h>
43#include <rdma/ib_user_verbs.h>
44
45#include "ipath_kernel.h"
46
47#define IPATH_MAX_RDMA_ATOMIC 4
48
49#define QPN_MAX (1 << 24)
50#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
51
52/*
53 * Increment this value if any changes that break userspace ABI
54 * compatibility are made.
55 */
56#define IPATH_UVERBS_ABI_VERSION 2
57
58/*
59 * Define an ib_cq_notify value that is not valid so we know when CQ
60 * notifications are armed.
61 */
62#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
63
64/* AETH NAK opcode values */
65#define IB_RNR_NAK 0x20
66#define IB_NAK_PSN_ERROR 0x60
67#define IB_NAK_INVALID_REQUEST 0x61
68#define IB_NAK_REMOTE_ACCESS_ERROR 0x62
69#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
70#define IB_NAK_INVALID_RD_REQUEST 0x64
71
72/* Flags for checking QP state (see ib_ipath_state_ops[]) */
73#define IPATH_POST_SEND_OK 0x01
74#define IPATH_POST_RECV_OK 0x02
75#define IPATH_PROCESS_RECV_OK 0x04
76#define IPATH_PROCESS_SEND_OK 0x08
77#define IPATH_PROCESS_NEXT_SEND_OK 0x10
78#define IPATH_FLUSH_SEND 0x20
79#define IPATH_FLUSH_RECV 0x40
80#define IPATH_PROCESS_OR_FLUSH_SEND \
81 (IPATH_PROCESS_SEND_OK | IPATH_FLUSH_SEND)
82
83/* IB Performance Manager status values */
84#define IB_PMA_SAMPLE_STATUS_DONE 0x00
85#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
86#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
87
88/* Mandatory IB performance counter select values. */
89#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
90#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
91#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
92#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
93#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
94
95struct ib_reth {
96 __be64 vaddr;
97 __be32 rkey;
98 __be32 length;
99} __attribute__ ((packed));
100
101struct ib_atomic_eth {
102 __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
103 __be32 rkey;
104 __be64 swap_data;
105 __be64 compare_data;
106} __attribute__ ((packed));
107
108struct ipath_other_headers {
109 __be32 bth[3];
110 union {
111 struct {
112 __be32 deth[2];
113 __be32 imm_data;
114 } ud;
115 struct {
116 struct ib_reth reth;
117 __be32 imm_data;
118 } rc;
119 struct {
120 __be32 aeth;
121 __be32 atomic_ack_eth[2];
122 } at;
123 __be32 imm_data;
124 __be32 aeth;
125 struct ib_atomic_eth atomic_eth;
126 } u;
127} __attribute__ ((packed));
128
129/*
130 * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
131 * long (72 w/ imm_data). Only the first 56 bytes of the IB header
132 * will be in the eager header buffer. The remaining 12 or 16 bytes
133 * are in the data buffer.
134 */
135struct ipath_ib_header {
136 __be16 lrh[4];
137 union {
138 struct {
139 struct ib_grh grh;
140 struct ipath_other_headers oth;
141 } l;
142 struct ipath_other_headers oth;
143 } u;
144} __attribute__ ((packed));
145
146struct ipath_pio_header {
147 __le32 pbc[2];
148 struct ipath_ib_header hdr;
149} __attribute__ ((packed));
150
151/*
152 * There is one struct ipath_mcast for each multicast GID.
153 * All attached QPs are then stored as a list of
154 * struct ipath_mcast_qp.
155 */
156struct ipath_mcast_qp {
157 struct list_head list;
158 struct ipath_qp *qp;
159};
160
161struct ipath_mcast {
162 struct rb_node rb_node;
163 union ib_gid mgid;
164 struct list_head qp_list;
165 wait_queue_head_t wait;
166 atomic_t refcount;
167 int n_attached;
168};
169
170/* Protection domain */
171struct ipath_pd {
172 struct ib_pd ibpd;
173 int user; /* non-zero if created from user space */
174};
175
176/* Address Handle */
177struct ipath_ah {
178 struct ib_ah ibah;
179 struct ib_ah_attr attr;
180};
181
182/*
183 * This structure is used by ipath_mmap() to validate an offset
184 * when an mmap() request is made. The vm_area_struct then uses
185 * this as its vm_private_data.
186 */
187struct ipath_mmap_info {
188 struct list_head pending_mmaps;
189 struct ib_ucontext *context;
190 void *obj;
191 __u64 offset;
192 struct kref ref;
193 unsigned size;
194};
195
196/*
197 * This structure is used to contain the head pointer, tail pointer,
198 * and completion queue entries as a single memory allocation so
199 * it can be mmap'ed into user space.
200 */
201struct ipath_cq_wc {
202 u32 head; /* index of next entry to fill */
203 u32 tail; /* index of next ib_poll_cq() entry */
204 union {
205 /* these are actually size ibcq.cqe + 1 */
206 struct ib_uverbs_wc uqueue[0];
207 struct ib_wc kqueue[0];
208 };
209};
210
211/*
212 * The completion queue structure.
213 */
214struct ipath_cq {
215 struct ib_cq ibcq;
216 struct tasklet_struct comptask;
217 spinlock_t lock;
218 u8 notify;
219 u8 triggered;
220 struct ipath_cq_wc *queue;
221 struct ipath_mmap_info *ip;
222};
223
224/*
225 * A segment is a linear region of low physical memory.
226 * XXX Maybe we should use phys addr here and kmap()/kunmap().
227 * Used by the verbs layer.
228 */
229struct ipath_seg {
230 void *vaddr;
231 size_t length;
232};
233
234/* The number of ipath_segs that fit in a page. */
235#define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg))
236
237struct ipath_segarray {
238 struct ipath_seg segs[IPATH_SEGSZ];
239};
240
241struct ipath_mregion {
242 struct ib_pd *pd; /* shares refcnt of ibmr.pd */
243 u64 user_base; /* User's address for this region */
244 u64 iova; /* IB start address of this region */
245 size_t length;
246 u32 lkey;
247 u32 offset; /* offset (bytes) to start of region */
248 int access_flags;
249 u32 max_segs; /* number of ipath_segs in all the arrays */
250 u32 mapsz; /* size of the map array */
251 struct ipath_segarray *map[0]; /* the segments */
252};
253
254/*
255 * These keep track of the copy progress within a memory region.
256 * Used by the verbs layer.
257 */
258struct ipath_sge {
259 struct ipath_mregion *mr;
260 void *vaddr; /* kernel virtual address of segment */
261 u32 sge_length; /* length of the SGE */
262 u32 length; /* remaining length of the segment */
263 u16 m; /* current index: mr->map[m] */
264 u16 n; /* current index: mr->map[m]->segs[n] */
265};
266
267/* Memory region */
268struct ipath_mr {
269 struct ib_mr ibmr;
270 struct ib_umem *umem;
271 struct ipath_mregion mr; /* must be last */
272};
273
274/*
275 * Send work request queue entry.
276 * The size of the sg_list is determined when the QP is created and stored
277 * in qp->s_max_sge.
278 */
279struct ipath_swqe {
280 union {
281 struct ib_send_wr wr; /* don't use wr.sg_list */
282 struct ib_ud_wr ud_wr;
283 struct ib_rdma_wr rdma_wr;
284 struct ib_atomic_wr atomic_wr;
285 };
286
287 u32 psn; /* first packet sequence number */
288 u32 lpsn; /* last packet sequence number */
289 u32 ssn; /* send sequence number */
290 u32 length; /* total length of data in sg_list */
291 struct ipath_sge sg_list[0];
292};
293
294/*
295 * Receive work request queue entry.
296 * The size of the sg_list is determined when the QP (or SRQ) is created
297 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
298 */
299struct ipath_rwqe {
300 u64 wr_id;
301 u8 num_sge;
302 struct ib_sge sg_list[0];
303};
304
305/*
306 * This structure is used to contain the head pointer, tail pointer,
307 * and receive work queue entries as a single memory allocation so
308 * it can be mmap'ed into user space.
309 * Note that the wq array elements are variable size so you can't
310 * just index into the array to get the N'th element;
311 * use get_rwqe_ptr() instead.
312 */
313struct ipath_rwq {
314 u32 head; /* new work requests posted to the head */
315 u32 tail; /* receives pull requests from here. */
316 struct ipath_rwqe wq[0];
317};
318
319struct ipath_rq {
320 struct ipath_rwq *wq;
321 spinlock_t lock;
322 u32 size; /* size of RWQE array */
323 u8 max_sge;
324};
325
326struct ipath_srq {
327 struct ib_srq ibsrq;
328 struct ipath_rq rq;
329 struct ipath_mmap_info *ip;
330 /* send signal when number of RWQEs < limit */
331 u32 limit;
332};
333
334struct ipath_sge_state {
335 struct ipath_sge *sg_list; /* next SGE to be used if any */
336 struct ipath_sge sge; /* progress state for the current SGE */
337 u8 num_sge;
338 u8 static_rate;
339};
340
341/*
342 * This structure holds the information that the send tasklet needs
343 * to send a RDMA read response or atomic operation.
344 */
345struct ipath_ack_entry {
346 u8 opcode;
347 u8 sent;
348 u32 psn;
349 union {
350 struct ipath_sge_state rdma_sge;
351 u64 atomic_data;
352 };
353};
354
355/*
356 * Variables prefixed with s_ are for the requester (sender).
357 * Variables prefixed with r_ are for the responder (receiver).
358 * Variables prefixed with ack_ are for responder replies.
359 *
360 * Common variables are protected by both r_rq.lock and s_lock in that order
361 * which only happens in modify_qp() or changing the QP 'state'.
362 */
363struct ipath_qp {
364 struct ib_qp ibqp;
365 struct ipath_qp *next; /* link list for QPN hash table */
366 struct ipath_qp *timer_next; /* link list for ipath_ib_timer() */
367 struct ipath_qp *pio_next; /* link for ipath_ib_piobufavail() */
368 struct list_head piowait; /* link for wait PIO buf */
369 struct list_head timerwait; /* link for waiting for timeouts */
370 struct ib_ah_attr remote_ah_attr;
371 struct ipath_ib_header s_hdr; /* next packet header to send */
372 atomic_t refcount;
373 wait_queue_head_t wait;
374 wait_queue_head_t wait_dma;
375 struct tasklet_struct s_task;
376 struct ipath_mmap_info *ip;
377 struct ipath_sge_state *s_cur_sge;
378 struct ipath_verbs_txreq *s_tx;
379 struct ipath_sge_state s_sge; /* current send request data */
380 struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1];
381 struct ipath_sge_state s_ack_rdma_sge;
382 struct ipath_sge_state s_rdma_read_sge;
383 struct ipath_sge_state r_sge; /* current receive data */
384 spinlock_t s_lock;
385 atomic_t s_dma_busy;
386 u16 s_pkt_delay;
387 u16 s_hdrwords; /* size of s_hdr in 32 bit words */
388 u32 s_cur_size; /* size of send packet in bytes */
389 u32 s_len; /* total length of s_sge */
390 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
391 u32 s_next_psn; /* PSN for next request */
392 u32 s_last_psn; /* last response PSN processed */
393 u32 s_psn; /* current packet sequence number */
394 u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
395 u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
396 u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
397 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
398 u64 r_wr_id; /* ID for current receive WQE */
399 unsigned long r_aflags;
400 u32 r_len; /* total length of r_sge */
401 u32 r_rcv_len; /* receive data len processed */
402 u32 r_psn; /* expected rcv packet sequence number */
403 u32 r_msn; /* message sequence number */
404 u8 state; /* QP state */
405 u8 s_state; /* opcode of last packet sent */
406 u8 s_ack_state; /* opcode of packet to ACK */
407 u8 s_nak_state; /* non-zero if NAK is pending */
408 u8 r_state; /* opcode of last packet received */
409 u8 r_nak_state; /* non-zero if NAK is pending */
410 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
411 u8 r_flags;
412 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
413 u8 r_head_ack_queue; /* index into s_ack_queue[] */
414 u8 qp_access_flags;
415 u8 s_max_sge; /* size of s_wq->sg_list */
416 u8 s_retry_cnt; /* number of times to retry */
417 u8 s_rnr_retry_cnt;
418 u8 s_retry; /* requester retry counter */
419 u8 s_rnr_retry; /* requester RNR retry counter */
420 u8 s_pkey_index; /* PKEY index to use */
421 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
422 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
423 u8 s_tail_ack_queue; /* index into s_ack_queue[] */
424 u8 s_flags;
425 u8 s_dmult;
426 u8 s_draining;
427 u8 timeout; /* Timeout for this QP */
428 enum ib_mtu path_mtu;
429 u32 remote_qpn;
430 u32 qkey; /* QKEY for this QP (for UD or RD) */
431 u32 s_size; /* send work queue size */
432 u32 s_head; /* new entries added here */
433 u32 s_tail; /* next entry to process */
434 u32 s_cur; /* current work queue entry */
435 u32 s_last; /* last un-ACK'ed entry */
436 u32 s_ssn; /* SSN of tail entry */
437 u32 s_lsn; /* limit sequence number (credit) */
438 struct ipath_swqe *s_wq; /* send work queue */
439 struct ipath_swqe *s_wqe;
440 struct ipath_sge *r_ud_sg_list;
441 struct ipath_rq r_rq; /* receive work queue */
442 struct ipath_sge r_sg_list[0]; /* verified SGEs */
443};
444
445/*
446 * Atomic bit definitions for r_aflags.
447 */
448#define IPATH_R_WRID_VALID 0
449
450/*
451 * Bit definitions for r_flags.
452 */
453#define IPATH_R_REUSE_SGE 0x01
454#define IPATH_R_RDMAR_SEQ 0x02
455
456/*
457 * Bit definitions for s_flags.
458 *
459 * IPATH_S_FENCE_PENDING - waiting for all prior RDMA read or atomic SWQEs
460 * before processing the next SWQE
461 * IPATH_S_RDMAR_PENDING - waiting for any RDMA read or atomic SWQEs
462 * before processing the next SWQE
463 * IPATH_S_WAITING - waiting for RNR timeout or send buffer available.
464 * IPATH_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
465 * IPATH_S_WAIT_DMA - waiting for send DMA queue to drain before generating
466 * next send completion entry not via send DMA.
467 */
468#define IPATH_S_SIGNAL_REQ_WR 0x01
469#define IPATH_S_FENCE_PENDING 0x02
470#define IPATH_S_RDMAR_PENDING 0x04
471#define IPATH_S_ACK_PENDING 0x08
472#define IPATH_S_BUSY 0x10
473#define IPATH_S_WAITING 0x20
474#define IPATH_S_WAIT_SSN_CREDIT 0x40
475#define IPATH_S_WAIT_DMA 0x80
476
477#define IPATH_S_ANY_WAIT (IPATH_S_FENCE_PENDING | IPATH_S_RDMAR_PENDING | \
478 IPATH_S_WAITING | IPATH_S_WAIT_SSN_CREDIT | IPATH_S_WAIT_DMA)
479
480#define IPATH_PSN_CREDIT 512
481
482/*
483 * Since struct ipath_swqe is not a fixed size, we can't simply index into
484 * struct ipath_qp.s_wq. This function does the array index computation.
485 */
486static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp,
487 unsigned n)
488{
489 return (struct ipath_swqe *)((char *)qp->s_wq +
490 (sizeof(struct ipath_swqe) +
491 qp->s_max_sge *
492 sizeof(struct ipath_sge)) * n);
493}
494
495/*
496 * Since struct ipath_rwqe is not a fixed size, we can't simply index into
497 * struct ipath_rwq.wq. This function does the array index computation.
498 */
499static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq,
500 unsigned n)
501{
502 return (struct ipath_rwqe *)
503 ((char *) rq->wq->wq +
504 (sizeof(struct ipath_rwqe) +
505 rq->max_sge * sizeof(struct ib_sge)) * n);
506}
507
508/*
509 * QPN-map pages start out as NULL, they get allocated upon
510 * first use and are never deallocated. This way,
511 * large bitmaps are not allocated unless large numbers of QPs are used.
512 */
513struct qpn_map {
514 atomic_t n_free;
515 void *page;
516};
517
518struct ipath_qp_table {
519 spinlock_t lock;
520 u32 last; /* last QP number allocated */
521 u32 max; /* size of the hash table */
522 u32 nmaps; /* size of the map table */
523 struct ipath_qp **table;
524 /* bit map of free numbers */
525 struct qpn_map map[QPNMAP_ENTRIES];
526};
527
528struct ipath_lkey_table {
529 spinlock_t lock;
530 u32 next; /* next unused index (speeds search) */
531 u32 gen; /* generation count */
532 u32 max; /* size of the table */
533 struct ipath_mregion **table;
534};
535
536struct ipath_opcode_stats {
537 u64 n_packets; /* number of packets */
538 u64 n_bytes; /* total number of bytes */
539};
540
541struct ipath_ibdev {
542 struct ib_device ibdev;
543 struct ipath_devdata *dd;
544 struct list_head pending_mmaps;
545 spinlock_t mmap_offset_lock;
546 u32 mmap_offset;
547 int ib_unit; /* This is the device number */
548 u16 sm_lid; /* in host order */
549 u8 sm_sl;
550 u8 mkeyprot;
551 /* non-zero when timer is set */
552 unsigned long mkey_lease_timeout;
553
554 /* The following fields are really per port. */
555 struct ipath_qp_table qp_table;
556 struct ipath_lkey_table lk_table;
557 struct list_head pending[3]; /* FIFO of QPs waiting for ACKs */
558 struct list_head piowait; /* list for wait PIO buf */
559 struct list_head txreq_free;
560 void *txreq_bufs;
561 /* list of QPs waiting for RNR timer */
562 struct list_head rnrwait;
563 spinlock_t pending_lock;
564 __be64 sys_image_guid; /* in network order */
565 __be64 gid_prefix; /* in network order */
566 __be64 mkey;
567
568 u32 n_pds_allocated; /* number of PDs allocated for device */
569 spinlock_t n_pds_lock;
570 u32 n_ahs_allocated; /* number of AHs allocated for device */
571 spinlock_t n_ahs_lock;
572 u32 n_cqs_allocated; /* number of CQs allocated for device */
573 spinlock_t n_cqs_lock;
574 u32 n_qps_allocated; /* number of QPs allocated for device */
575 spinlock_t n_qps_lock;
576 u32 n_srqs_allocated; /* number of SRQs allocated for device */
577 spinlock_t n_srqs_lock;
578 u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
579 spinlock_t n_mcast_grps_lock;
580
581 u64 ipath_sword; /* total dwords sent (sample result) */
582 u64 ipath_rword; /* total dwords received (sample result) */
583 u64 ipath_spkts; /* total packets sent (sample result) */
584 u64 ipath_rpkts; /* total packets received (sample result) */
585 /* # of ticks no data sent (sample result) */
586 u64 ipath_xmit_wait;
587 u64 rcv_errors; /* # of packets with SW detected rcv errs */
588 u64 n_unicast_xmit; /* total unicast packets sent */
589 u64 n_unicast_rcv; /* total unicast packets received */
590 u64 n_multicast_xmit; /* total multicast packets sent */
591 u64 n_multicast_rcv; /* total multicast packets received */
592 u64 z_symbol_error_counter; /* starting count for PMA */
593 u64 z_link_error_recovery_counter; /* starting count for PMA */
594 u64 z_link_downed_counter; /* starting count for PMA */
595 u64 z_port_rcv_errors; /* starting count for PMA */
596 u64 z_port_rcv_remphys_errors; /* starting count for PMA */
597 u64 z_port_xmit_discards; /* starting count for PMA */
598 u64 z_port_xmit_data; /* starting count for PMA */
599 u64 z_port_rcv_data; /* starting count for PMA */
600 u64 z_port_xmit_packets; /* starting count for PMA */
601 u64 z_port_rcv_packets; /* starting count for PMA */
602 u32 z_pkey_violations; /* starting count for PMA */
603 u32 z_local_link_integrity_errors; /* starting count for PMA */
604 u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
605 u32 z_vl15_dropped; /* starting count for PMA */
606 u32 n_rc_resends;
607 u32 n_rc_acks;
608 u32 n_rc_qacks;
609 u32 n_seq_naks;
610 u32 n_rdma_seq;
611 u32 n_rnr_naks;
612 u32 n_other_naks;
613 u32 n_timeouts;
614 u32 n_pkt_drops;
615 u32 n_vl15_dropped;
616 u32 n_wqe_errs;
617 u32 n_rdma_dup_busy;
618 u32 n_piowait;
619 u32 n_unaligned;
620 u32 port_cap_flags;
621 u32 pma_sample_start;
622 u32 pma_sample_interval;
623 __be16 pma_counter_select[5];
624 u16 pma_tag;
625 u16 qkey_violations;
626 u16 mkey_violations;
627 u16 mkey_lease_period;
628 u16 pending_index; /* which pending queue is active */
629 u8 pma_sample_status;
630 u8 subnet_timeout;
631 u8 vl_high_limit;
632 struct ipath_opcode_stats opstats[128];
633};
634
635struct ipath_verbs_counters {
636 u64 symbol_error_counter;
637 u64 link_error_recovery_counter;
638 u64 link_downed_counter;
639 u64 port_rcv_errors;
640 u64 port_rcv_remphys_errors;
641 u64 port_xmit_discards;
642 u64 port_xmit_data;
643 u64 port_rcv_data;
644 u64 port_xmit_packets;
645 u64 port_rcv_packets;
646 u32 local_link_integrity_errors;
647 u32 excessive_buffer_overrun_errors;
648 u32 vl15_dropped;
649};
650
651struct ipath_verbs_txreq {
652 struct ipath_qp *qp;
653 struct ipath_swqe *wqe;
654 u32 map_len;
655 u32 len;
656 struct ipath_sge_state *ss;
657 struct ipath_pio_header hdr;
658 struct ipath_sdma_txreq txreq;
659};
660
661static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
662{
663 return container_of(ibmr, struct ipath_mr, ibmr);
664}
665
666static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd)
667{
668 return container_of(ibpd, struct ipath_pd, ibpd);
669}
670
671static inline struct ipath_ah *to_iah(struct ib_ah *ibah)
672{
673 return container_of(ibah, struct ipath_ah, ibah);
674}
675
676static inline struct ipath_cq *to_icq(struct ib_cq *ibcq)
677{
678 return container_of(ibcq, struct ipath_cq, ibcq);
679}
680
681static inline struct ipath_srq *to_isrq(struct ib_srq *ibsrq)
682{
683 return container_of(ibsrq, struct ipath_srq, ibsrq);
684}
685
686static inline struct ipath_qp *to_iqp(struct ib_qp *ibqp)
687{
688 return container_of(ibqp, struct ipath_qp, ibqp);
689}
690
691static inline struct ipath_ibdev *to_idev(struct ib_device *ibdev)
692{
693 return container_of(ibdev, struct ipath_ibdev, ibdev);
694}
695
696/*
697 * This must be called with s_lock held.
698 */
699static inline void ipath_schedule_send(struct ipath_qp *qp)
700{
701 if (qp->s_flags & IPATH_S_ANY_WAIT)
702 qp->s_flags &= ~IPATH_S_ANY_WAIT;
703 if (!(qp->s_flags & IPATH_S_BUSY))
704 tasklet_hi_schedule(&qp->s_task);
705}
706
707int ipath_process_mad(struct ib_device *ibdev,
708 int mad_flags,
709 u8 port_num,
710 const struct ib_wc *in_wc,
711 const struct ib_grh *in_grh,
712 const struct ib_mad_hdr *in, size_t in_mad_size,
713 struct ib_mad_hdr *out, size_t *out_mad_size,
714 u16 *out_mad_pkey_index);
715
716/*
717 * Compare the lower 24 bits of the two values.
718 * Returns an integer <, ==, or > than zero.
719 */
720static inline int ipath_cmp24(u32 a, u32 b)
721{
722 return (((int) a) - ((int) b)) << 8;
723}
724
725struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid);
726
727int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
728 u64 *rwords, u64 *spkts, u64 *rpkts,
729 u64 *xmit_wait);
730
731int ipath_get_counters(struct ipath_devdata *dd,
732 struct ipath_verbs_counters *cntrs);
733
734int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
735
736int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
737
738int ipath_mcast_tree_empty(void);
739
740__be32 ipath_compute_aeth(struct ipath_qp *qp);
741
742struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn);
743
744struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
745 struct ib_qp_init_attr *init_attr,
746 struct ib_udata *udata);
747
748int ipath_destroy_qp(struct ib_qp *ibqp);
749
750int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err);
751
752int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
753 int attr_mask, struct ib_udata *udata);
754
755int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
756 int attr_mask, struct ib_qp_init_attr *init_attr);
757
758unsigned ipath_free_all_qps(struct ipath_qp_table *qpt);
759
760int ipath_init_qp_table(struct ipath_ibdev *idev, int size);
761
762void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
763
764unsigned ipath_ib_rate_to_mult(enum ib_rate rate);
765
766int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
767 u32 hdrwords, struct ipath_sge_state *ss, u32 len);
768
769void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
770
771void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
772
773void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
774 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
775
776void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
777 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
778
779void ipath_restart_rc(struct ipath_qp *qp, u32 psn);
780
781void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err);
782
783int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);
784
785void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
786 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
787
788int ipath_alloc_lkey(struct ipath_lkey_table *rkt,
789 struct ipath_mregion *mr);
790
791void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey);
792
793int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
794 struct ib_sge *sge, int acc);
795
796int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
797 u32 len, u64 vaddr, u32 rkey, int acc);
798
799int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
800 struct ib_recv_wr **bad_wr);
801
802struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
803 struct ib_srq_init_attr *srq_init_attr,
804 struct ib_udata *udata);
805
806int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
807 enum ib_srq_attr_mask attr_mask,
808 struct ib_udata *udata);
809
810int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
811
812int ipath_destroy_srq(struct ib_srq *ibsrq);
813
814void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
815
816int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
817
818struct ib_cq *ipath_create_cq(struct ib_device *ibdev,
819 const struct ib_cq_init_attr *attr,
820 struct ib_ucontext *context,
821 struct ib_udata *udata);
822
823int ipath_destroy_cq(struct ib_cq *ibcq);
824
825int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
826
827int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
828
829struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc);
830
831struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
832 u64 virt_addr, int mr_access_flags,
833 struct ib_udata *udata);
834
835int ipath_dereg_mr(struct ib_mr *ibmr);
836
837struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
838 struct ib_fmr_attr *fmr_attr);
839
840int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
841 int list_len, u64 iova);
842
843int ipath_unmap_fmr(struct list_head *fmr_list);
844
845int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
846
847void ipath_release_mmap_info(struct kref *ref);
848
849struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
850 u32 size,
851 struct ib_ucontext *context,
852 void *obj);
853
854void ipath_update_mmap_info(struct ipath_ibdev *dev,
855 struct ipath_mmap_info *ip,
856 u32 size, void *obj);
857
858int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
859
860void ipath_insert_rnr_queue(struct ipath_qp *qp);
861
862int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
863 u32 *lengthp, struct ipath_sge_state *ss);
864
865int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
866
867u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
868 struct ib_global_route *grh, u32 hwords, u32 nwords);
869
870void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
871 struct ipath_other_headers *ohdr,
872 u32 bth0, u32 bth2);
873
874void ipath_do_send(unsigned long data);
875
876void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
877 enum ib_wc_status status);
878
879int ipath_make_rc_req(struct ipath_qp *qp);
880
881int ipath_make_uc_req(struct ipath_qp *qp);
882
883int ipath_make_ud_req(struct ipath_qp *qp);
884
885int ipath_register_ib_device(struct ipath_devdata *);
886
887void ipath_unregister_ib_device(struct ipath_ibdev *);
888
889void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32);
890
891int ipath_ib_piobufavail(struct ipath_ibdev *);
892
893unsigned ipath_get_npkeys(struct ipath_devdata *);
894
895u32 ipath_get_cr_errpkey(struct ipath_devdata *);
896
897unsigned ipath_get_pkey(struct ipath_devdata *, unsigned);
898
899extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
900
901/*
902 * Below converts HCA-specific LinkTrainingState to IB PhysPortState
903 * values.
904 */
905extern const u8 ipath_cvt_physportstate[];
906#define IB_PHYSPORTSTATE_SLEEP 1
907#define IB_PHYSPORTSTATE_POLL 2
908#define IB_PHYSPORTSTATE_DISABLED 3
909#define IB_PHYSPORTSTATE_CFG_TRAIN 4
910#define IB_PHYSPORTSTATE_LINKUP 5
911#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
912
913extern const int ib_ipath_state_ops[];
914
915extern unsigned int ib_ipath_lkey_table_size;
916
917extern unsigned int ib_ipath_max_cqes;
918
919extern unsigned int ib_ipath_max_cqs;
920
921extern unsigned int ib_ipath_max_qp_wrs;
922
923extern unsigned int ib_ipath_max_qps;
924
925extern unsigned int ib_ipath_max_sges;
926
927extern unsigned int ib_ipath_max_mcast_grps;
928
929extern unsigned int ib_ipath_max_mcast_qp_attached;
930
931extern unsigned int ib_ipath_max_srqs;
932
933extern unsigned int ib_ipath_max_srq_sges;
934
935extern unsigned int ib_ipath_max_srq_wrs;
936
937extern const u32 ib_ipath_rnr_table[];
938
939extern struct ib_dma_mapping_ops ipath_dma_mapping_ops;
940
941#endif /* IPATH_VERBS_H */
diff --git a/drivers/staging/rdma/ipath/ipath_verbs_mcast.c b/drivers/staging/rdma/ipath/ipath_verbs_mcast.c
deleted file mode 100644
index 72d476fa5b8f..000000000000
--- a/drivers/staging/rdma/ipath/ipath_verbs_mcast.c
+++ /dev/null
@@ -1,363 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/rculist.h>
35#include <linux/slab.h>
36
37#include "ipath_verbs.h"
38
39/*
40 * Global table of GID to attached QPs.
41 * The table is global to all ipath devices since a send from one QP/device
42 * needs to be locally routed to any locally attached QPs on the same
43 * or different device.
44 */
45static struct rb_root mcast_tree;
46static DEFINE_SPINLOCK(mcast_lock);
47
48/**
49 * ipath_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
50 * @qp: the QP to link
51 */
52static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp)
53{
54 struct ipath_mcast_qp *mqp;
55
56 mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
57 if (!mqp)
58 goto bail;
59
60 mqp->qp = qp;
61 atomic_inc(&qp->refcount);
62
63bail:
64 return mqp;
65}
66
67static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp)
68{
69 struct ipath_qp *qp = mqp->qp;
70
71 /* Notify ipath_destroy_qp() if it is waiting. */
72 if (atomic_dec_and_test(&qp->refcount))
73 wake_up(&qp->wait);
74
75 kfree(mqp);
76}
77
78/**
79 * ipath_mcast_alloc - allocate the multicast GID structure
80 * @mgid: the multicast GID
81 *
82 * A list of QPs will be attached to this structure.
83 */
84static struct ipath_mcast *ipath_mcast_alloc(union ib_gid *mgid)
85{
86 struct ipath_mcast *mcast;
87
88 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
89 if (!mcast)
90 goto bail;
91
92 mcast->mgid = *mgid;
93 INIT_LIST_HEAD(&mcast->qp_list);
94 init_waitqueue_head(&mcast->wait);
95 atomic_set(&mcast->refcount, 0);
96 mcast->n_attached = 0;
97
98bail:
99 return mcast;
100}
101
102static void ipath_mcast_free(struct ipath_mcast *mcast)
103{
104 struct ipath_mcast_qp *p, *tmp;
105
106 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
107 ipath_mcast_qp_free(p);
108
109 kfree(mcast);
110}
111
112/**
113 * ipath_mcast_find - search the global table for the given multicast GID
114 * @mgid: the multicast GID to search for
115 *
116 * Returns NULL if not found.
117 *
118 * The caller is responsible for decrementing the reference count if found.
119 */
120struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid)
121{
122 struct rb_node *n;
123 unsigned long flags;
124 struct ipath_mcast *mcast;
125
126 spin_lock_irqsave(&mcast_lock, flags);
127 n = mcast_tree.rb_node;
128 while (n) {
129 int ret;
130
131 mcast = rb_entry(n, struct ipath_mcast, rb_node);
132
133 ret = memcmp(mgid->raw, mcast->mgid.raw,
134 sizeof(union ib_gid));
135 if (ret < 0)
136 n = n->rb_left;
137 else if (ret > 0)
138 n = n->rb_right;
139 else {
140 atomic_inc(&mcast->refcount);
141 spin_unlock_irqrestore(&mcast_lock, flags);
142 goto bail;
143 }
144 }
145 spin_unlock_irqrestore(&mcast_lock, flags);
146
147 mcast = NULL;
148
149bail:
150 return mcast;
151}
152
153/**
154 * ipath_mcast_add - insert mcast GID into table and attach QP struct
155 * @mcast: the mcast GID table
156 * @mqp: the QP to attach
157 *
158 * Return zero if both were added. Return EEXIST if the GID was already in
159 * the table but the QP was added. Return ESRCH if the QP was already
160 * attached and neither structure was added.
161 */
162static int ipath_mcast_add(struct ipath_ibdev *dev,
163 struct ipath_mcast *mcast,
164 struct ipath_mcast_qp *mqp)
165{
166 struct rb_node **n = &mcast_tree.rb_node;
167 struct rb_node *pn = NULL;
168 int ret;
169
170 spin_lock_irq(&mcast_lock);
171
172 while (*n) {
173 struct ipath_mcast *tmcast;
174 struct ipath_mcast_qp *p;
175
176 pn = *n;
177 tmcast = rb_entry(pn, struct ipath_mcast, rb_node);
178
179 ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
180 sizeof(union ib_gid));
181 if (ret < 0) {
182 n = &pn->rb_left;
183 continue;
184 }
185 if (ret > 0) {
186 n = &pn->rb_right;
187 continue;
188 }
189
190 /* Search the QP list to see if this is already there. */
191 list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
192 if (p->qp == mqp->qp) {
193 ret = ESRCH;
194 goto bail;
195 }
196 }
197 if (tmcast->n_attached == ib_ipath_max_mcast_qp_attached) {
198 ret = ENOMEM;
199 goto bail;
200 }
201
202 tmcast->n_attached++;
203
204 list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
205 ret = EEXIST;
206 goto bail;
207 }
208
209 spin_lock(&dev->n_mcast_grps_lock);
210 if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) {
211 spin_unlock(&dev->n_mcast_grps_lock);
212 ret = ENOMEM;
213 goto bail;
214 }
215
216 dev->n_mcast_grps_allocated++;
217 spin_unlock(&dev->n_mcast_grps_lock);
218
219 mcast->n_attached++;
220
221 list_add_tail_rcu(&mqp->list, &mcast->qp_list);
222
223 atomic_inc(&mcast->refcount);
224 rb_link_node(&mcast->rb_node, pn, n);
225 rb_insert_color(&mcast->rb_node, &mcast_tree);
226
227 ret = 0;
228
229bail:
230 spin_unlock_irq(&mcast_lock);
231
232 return ret;
233}
234
235int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
236{
237 struct ipath_qp *qp = to_iqp(ibqp);
238 struct ipath_ibdev *dev = to_idev(ibqp->device);
239 struct ipath_mcast *mcast;
240 struct ipath_mcast_qp *mqp;
241 int ret;
242
243 /*
244 * Allocate data structures since its better to do this outside of
245 * spin locks and it will most likely be needed.
246 */
247 mcast = ipath_mcast_alloc(gid);
248 if (mcast == NULL) {
249 ret = -ENOMEM;
250 goto bail;
251 }
252 mqp = ipath_mcast_qp_alloc(qp);
253 if (mqp == NULL) {
254 ipath_mcast_free(mcast);
255 ret = -ENOMEM;
256 goto bail;
257 }
258 switch (ipath_mcast_add(dev, mcast, mqp)) {
259 case ESRCH:
260 /* Neither was used: can't attach the same QP twice. */
261 ipath_mcast_qp_free(mqp);
262 ipath_mcast_free(mcast);
263 ret = -EINVAL;
264 goto bail;
265 case EEXIST: /* The mcast wasn't used */
266 ipath_mcast_free(mcast);
267 break;
268 case ENOMEM:
269 /* Exceeded the maximum number of mcast groups. */
270 ipath_mcast_qp_free(mqp);
271 ipath_mcast_free(mcast);
272 ret = -ENOMEM;
273 goto bail;
274 default:
275 break;
276 }
277
278 ret = 0;
279
280bail:
281 return ret;
282}
283
284int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
285{
286 struct ipath_qp *qp = to_iqp(ibqp);
287 struct ipath_ibdev *dev = to_idev(ibqp->device);
288 struct ipath_mcast *mcast = NULL;
289 struct ipath_mcast_qp *p, *tmp;
290 struct rb_node *n;
291 int last = 0;
292 int ret;
293
294 spin_lock_irq(&mcast_lock);
295
296 /* Find the GID in the mcast table. */
297 n = mcast_tree.rb_node;
298 while (1) {
299 if (n == NULL) {
300 spin_unlock_irq(&mcast_lock);
301 ret = -EINVAL;
302 goto bail;
303 }
304
305 mcast = rb_entry(n, struct ipath_mcast, rb_node);
306 ret = memcmp(gid->raw, mcast->mgid.raw,
307 sizeof(union ib_gid));
308 if (ret < 0)
309 n = n->rb_left;
310 else if (ret > 0)
311 n = n->rb_right;
312 else
313 break;
314 }
315
316 /* Search the QP list. */
317 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
318 if (p->qp != qp)
319 continue;
320 /*
321 * We found it, so remove it, but don't poison the forward
322 * link until we are sure there are no list walkers.
323 */
324 list_del_rcu(&p->list);
325 mcast->n_attached--;
326
327 /* If this was the last attached QP, remove the GID too. */
328 if (list_empty(&mcast->qp_list)) {
329 rb_erase(&mcast->rb_node, &mcast_tree);
330 last = 1;
331 }
332 break;
333 }
334
335 spin_unlock_irq(&mcast_lock);
336
337 if (p) {
338 /*
339 * Wait for any list walkers to finish before freeing the
340 * list element.
341 */
342 wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
343 ipath_mcast_qp_free(p);
344 }
345 if (last) {
346 atomic_dec(&mcast->refcount);
347 wait_event(mcast->wait, !atomic_read(&mcast->refcount));
348 ipath_mcast_free(mcast);
349 spin_lock_irq(&dev->n_mcast_grps_lock);
350 dev->n_mcast_grps_allocated--;
351 spin_unlock_irq(&dev->n_mcast_grps_lock);
352 }
353
354 ret = 0;
355
356bail:
357 return ret;
358}
359
360int ipath_mcast_tree_empty(void)
361{
362 return mcast_tree.rb_node == NULL;
363}
diff --git a/drivers/staging/rdma/ipath/ipath_wc_ppc64.c b/drivers/staging/rdma/ipath/ipath_wc_ppc64.c
deleted file mode 100644
index 1a7e20a75149..000000000000
--- a/drivers/staging/rdma/ipath/ipath_wc_ppc64.c
+++ /dev/null
@@ -1,49 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/*
34 * This file is conditionally built on PowerPC only. Otherwise weak symbol
35 * versions of the functions exported from here are used.
36 */
37
38#include "ipath_kernel.h"
39
40/**
41 * ipath_enable_wc - enable write combining for MMIO writes to the device
42 * @dd: infinipath device
43 *
44 * Nothing to do on PowerPC, so just return without error.
45 */
46int ipath_enable_wc(struct ipath_devdata *dd)
47{
48 return 0;
49}
diff --git a/drivers/staging/rdma/ipath/ipath_wc_x86_64.c b/drivers/staging/rdma/ipath/ipath_wc_x86_64.c
deleted file mode 100644
index 7b6e4c843e19..000000000000
--- a/drivers/staging/rdma/ipath/ipath_wc_x86_64.c
+++ /dev/null
@@ -1,144 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/*
35 * This file is conditionally built on x86_64 only. Otherwise weak symbol
36 * versions of the functions exported from here are used.
37 */
38
39#include <linux/pci.h>
40#include <asm/processor.h>
41
42#include "ipath_kernel.h"
43
44/**
45 * ipath_enable_wc - enable write combining for MMIO writes to the device
46 * @dd: infinipath device
47 *
48 * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable
49 * write combining.
50 */
51int ipath_enable_wc(struct ipath_devdata *dd)
52{
53 int ret = 0;
54 u64 pioaddr, piolen;
55 unsigned bits;
56 const unsigned long addr = pci_resource_start(dd->pcidev, 0);
57 const size_t len = pci_resource_len(dd->pcidev, 0);
58
59 /*
60 * Set the PIO buffers to be WCCOMB, so we get HT bursts to the
61 * chip. Linux (possibly the hardware) requires it to be on a power
62 * of 2 address matching the length (which has to be a power of 2).
63 * For rev1, that means the base address, for rev2, it will be just
64 * the PIO buffers themselves.
65 * For chips with two sets of buffers, the calculations are
66 * somewhat more complicated; we need to sum, and the piobufbase
67 * register has both offsets, 2K in low 32 bits, 4K in high 32 bits.
68 * The buffers are still packed, so a single range covers both.
69 */
70 if (dd->ipath_piobcnt2k && dd->ipath_piobcnt4k) { /* 2 sizes */
71 unsigned long pio2kbase, pio4kbase;
72 pio2kbase = dd->ipath_piobufbase & 0xffffffffUL;
73 pio4kbase = (dd->ipath_piobufbase >> 32) & 0xffffffffUL;
74 if (pio2kbase < pio4kbase) { /* all, for now */
75 pioaddr = addr + pio2kbase;
76 piolen = pio4kbase - pio2kbase +
77 dd->ipath_piobcnt4k * dd->ipath_4kalign;
78 } else {
79 pioaddr = addr + pio4kbase;
80 piolen = pio2kbase - pio4kbase +
81 dd->ipath_piobcnt2k * dd->ipath_palign;
82 }
83 } else { /* single buffer size (2K, currently) */
84 pioaddr = addr + dd->ipath_piobufbase;
85 piolen = dd->ipath_piobcnt2k * dd->ipath_palign +
86 dd->ipath_piobcnt4k * dd->ipath_4kalign;
87 }
88
89 for (bits = 0; !(piolen & (1ULL << bits)); bits++)
90 /* do nothing */ ;
91
92 if (piolen != (1ULL << bits)) {
93 piolen >>= bits;
94 while (piolen >>= 1)
95 bits++;
96 piolen = 1ULL << (bits + 1);
97 }
98 if (pioaddr & (piolen - 1)) {
99 u64 atmp;
100 ipath_dbg("pioaddr %llx not on right boundary for size "
101 "%llx, fixing\n",
102 (unsigned long long) pioaddr,
103 (unsigned long long) piolen);
104 atmp = pioaddr & ~(piolen - 1);
105 if (atmp < addr || (atmp + piolen) > (addr + len)) {
106 ipath_dev_err(dd, "No way to align address/size "
107 "(%llx/%llx), no WC mtrr\n",
108 (unsigned long long) atmp,
109 (unsigned long long) piolen << 1);
110 ret = -ENODEV;
111 } else {
112 ipath_dbg("changing WC base from %llx to %llx, "
113 "len from %llx to %llx\n",
114 (unsigned long long) pioaddr,
115 (unsigned long long) atmp,
116 (unsigned long long) piolen,
117 (unsigned long long) piolen << 1);
118 pioaddr = atmp;
119 piolen <<= 1;
120 }
121 }
122
123 if (!ret) {
124 dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen);
125 if (dd->wc_cookie < 0) {
126 ipath_dev_err(dd, "Seting mtrr failed on PIO buffers\n");
127 ret = -ENODEV;
128 } else if (dd->wc_cookie == 0)
129 ipath_cdbg(VERBOSE, "Set mtrr for chip to WC not needed\n");
130 else
131 ipath_cdbg(VERBOSE, "Set mtrr for chip to WC\n");
132 }
133
134 return ret;
135}
136
137/**
138 * ipath_disable_wc - disable write combining for MMIO writes to the device
139 * @dd: infinipath device
140 */
141void ipath_disable_wc(struct ipath_devdata *dd)
142{
143 arch_phys_wc_del(dd->wc_cookie);
144}
diff --git a/drivers/staging/speakup/Kconfig b/drivers/staging/speakup/Kconfig
index efd6f4560d3e..7e8037e230b8 100644
--- a/drivers/staging/speakup/Kconfig
+++ b/drivers/staging/speakup/Kconfig
@@ -1,7 +1,7 @@
1menu "Speakup console speech" 1menu "Speakup console speech"
2 2
3config SPEAKUP 3config SPEAKUP
4 depends on VT 4 depends on VT && !MN10300
5 tristate "Speakup core" 5 tristate "Speakup core"
6 ---help--- 6 ---help---
7 This is the Speakup screen reader. Think of it as a 7 This is the Speakup screen reader. Think of it as a
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 63c59bc89b04..30cf973f326d 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -264,8 +264,9 @@ static struct notifier_block vt_notifier_block = {
264 .notifier_call = vt_notifier_call, 264 .notifier_call = vt_notifier_call,
265}; 265};
266 266
267static unsigned char get_attributes(u16 *pos) 267static unsigned char get_attributes(struct vc_data *vc, u16 *pos)
268{ 268{
269 pos = screen_pos(vc, pos - (u16 *)vc->vc_origin, 1);
269 return (u_char) (scr_readw(pos) >> 8); 270 return (u_char) (scr_readw(pos) >> 8);
270} 271}
271 272
@@ -275,7 +276,7 @@ static void speakup_date(struct vc_data *vc)
275 spk_y = spk_cy = vc->vc_y; 276 spk_y = spk_cy = vc->vc_y;
276 spk_pos = spk_cp = vc->vc_pos; 277 spk_pos = spk_cp = vc->vc_pos;
277 spk_old_attr = spk_attr; 278 spk_old_attr = spk_attr;
278 spk_attr = get_attributes((u_short *) spk_pos); 279 spk_attr = get_attributes(vc, (u_short *)spk_pos);
279} 280}
280 281
281static void bleep(u_short val) 282static void bleep(u_short val)
@@ -469,8 +470,12 @@ static u16 get_char(struct vc_data *vc, u16 *pos, u_char *attribs)
469 u16 ch = ' '; 470 u16 ch = ' ';
470 471
471 if (vc && pos) { 472 if (vc && pos) {
472 u16 w = scr_readw(pos); 473 u16 w;
473 u16 c = w & 0xff; 474 u16 c;
475
476 pos = screen_pos(vc, pos - (u16 *)vc->vc_origin, 1);
477 w = scr_readw(pos);
478 c = w & 0xff;
474 479
475 if (w & vc->vc_hi_font_mask) 480 if (w & vc->vc_hi_font_mask)
476 c |= 0x100; 481 c |= 0x100;
@@ -746,7 +751,7 @@ static int get_line(struct vc_data *vc)
746 u_char tmp2; 751 u_char tmp2;
747 752
748 spk_old_attr = spk_attr; 753 spk_old_attr = spk_attr;
749 spk_attr = get_attributes((u_short *) spk_pos); 754 spk_attr = get_attributes(vc, (u_short *)spk_pos);
750 for (i = 0; i < vc->vc_cols; i++) { 755 for (i = 0; i < vc->vc_cols; i++) {
751 buf[i] = (u_char) get_char(vc, (u_short *) tmp, &tmp2); 756 buf[i] = (u_char) get_char(vc, (u_short *) tmp, &tmp2);
752 tmp += 2; 757 tmp += 2;
@@ -811,7 +816,7 @@ static int say_from_to(struct vc_data *vc, u_long from, u_long to,
811 u_short saved_punc_mask = spk_punc_mask; 816 u_short saved_punc_mask = spk_punc_mask;
812 817
813 spk_old_attr = spk_attr; 818 spk_old_attr = spk_attr;
814 spk_attr = get_attributes((u_short *) from); 819 spk_attr = get_attributes(vc, (u_short *)from);
815 while (from < to) { 820 while (from < to) {
816 buf[i++] = (char)get_char(vc, (u_short *) from, &tmp); 821 buf[i++] = (char)get_char(vc, (u_short *) from, &tmp);
817 from += 2; 822 from += 2;
@@ -886,7 +891,7 @@ static int get_sentence_buf(struct vc_data *vc, int read_punc)
886 sentmarks[bn][0] = &sentbuf[bn][0]; 891 sentmarks[bn][0] = &sentbuf[bn][0];
887 i = 0; 892 i = 0;
888 spk_old_attr = spk_attr; 893 spk_old_attr = spk_attr;
889 spk_attr = get_attributes((u_short *) start); 894 spk_attr = get_attributes(vc, (u_short *)start);
890 895
891 while (start < end) { 896 while (start < end) {
892 sentbuf[bn][i] = (char)get_char(vc, (u_short *) start, &tmp); 897 sentbuf[bn][i] = (char)get_char(vc, (u_short *) start, &tmp);
@@ -1585,7 +1590,7 @@ static int count_highlight_color(struct vc_data *vc)
1585 u16 *ptr; 1590 u16 *ptr;
1586 1591
1587 for (ptr = start; ptr < end; ptr++) { 1592 for (ptr = start; ptr < end; ptr++) {
1588 ch = get_attributes(ptr); 1593 ch = get_attributes(vc, ptr);
1589 bg = (ch & 0x70) >> 4; 1594 bg = (ch & 0x70) >> 4;
1590 speakup_console[vc_num]->ht.bgcount[bg]++; 1595 speakup_console[vc_num]->ht.bgcount[bg]++;
1591 } 1596 }
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index aa5ab6c80ed4..41ef099b7aa6 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -142,7 +142,9 @@ static void __speakup_paste_selection(struct work_struct *work)
142 struct tty_ldisc *ld; 142 struct tty_ldisc *ld;
143 DECLARE_WAITQUEUE(wait, current); 143 DECLARE_WAITQUEUE(wait, current);
144 144
145 ld = tty_ldisc_ref_wait(tty); 145 ld = tty_ldisc_ref(tty);
146 if (!ld)
147 goto tty_unref;
146 tty_buffer_lock_exclusive(&vc->port); 148 tty_buffer_lock_exclusive(&vc->port);
147 149
148 add_wait_queue(&vc->paste_wait, &wait); 150 add_wait_queue(&vc->paste_wait, &wait);
@@ -162,6 +164,7 @@ static void __speakup_paste_selection(struct work_struct *work)
162 164
163 tty_buffer_unlock_exclusive(&vc->port); 165 tty_buffer_unlock_exclusive(&vc->port);
164 tty_ldisc_deref(ld); 166 tty_ldisc_deref(ld);
167tty_unref:
165 tty_kref_put(tty); 168 tty_kref_put(tty);
166} 169}
167 170
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index 3b5835b28128..a5bbb338f275 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -6,6 +6,11 @@
6#include "spk_priv.h" 6#include "spk_priv.h"
7#include "serialio.h" 7#include "serialio.h"
8 8
9#include <linux/serial_core.h>
10/* WARNING: Do not change this to <linux/serial.h> without testing that
11 * SERIAL_PORT_DFNS does get defined to the appropriate value. */
12#include <asm/serial.h>
13
9#ifndef SERIAL_PORT_DFNS 14#ifndef SERIAL_PORT_DFNS
10#define SERIAL_PORT_DFNS 15#define SERIAL_PORT_DFNS
11#endif 16#endif
@@ -23,9 +28,15 @@ const struct old_serial_port *spk_serial_init(int index)
23 int baud = 9600, quot = 0; 28 int baud = 9600, quot = 0;
24 unsigned int cval = 0; 29 unsigned int cval = 0;
25 int cflag = CREAD | HUPCL | CLOCAL | B9600 | CS8; 30 int cflag = CREAD | HUPCL | CLOCAL | B9600 | CS8;
26 const struct old_serial_port *ser = rs_table + index; 31 const struct old_serial_port *ser;
27 int err; 32 int err;
28 33
34 if (index >= ARRAY_SIZE(rs_table)) {
35 pr_info("no port info for ttyS%d\n", index);
36 return NULL;
37 }
38 ser = rs_table + index;
39
29 /* Divisor, bytesize and parity */ 40 /* Divisor, bytesize and parity */
30 quot = ser->baud_base / baud; 41 quot = ser->baud_base / baud;
31 cval = cflag & (CSIZE | CSTOPB); 42 cval = cflag & (CSIZE | CSTOPB);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 3327c49674d3..713c63d9681b 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -898,7 +898,7 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item,
898 da->unmap_zeroes_data = flag; 898 da->unmap_zeroes_data = flag;
899 pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n", 899 pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
900 da->da_dev, flag); 900 da->da_dev, flag);
901 return 0; 901 return count;
902} 902}
903 903
904/* 904/*
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index cacd97a8cbd0..da457e25717a 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -828,6 +828,50 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
828 return dev; 828 return dev;
829} 829}
830 830
831/*
832 * Check if the underlying struct block_device request_queue supports
833 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
834 * in ATA and we need to set TPE=1
835 */
836bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
837 struct request_queue *q, int block_size)
838{
839 if (!blk_queue_discard(q))
840 return false;
841
842 attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
843 block_size;
844 /*
845 * Currently hardcoded to 1 in Linux/SCSI code..
846 */
847 attrib->max_unmap_block_desc_count = 1;
848 attrib->unmap_granularity = q->limits.discard_granularity / block_size;
849 attrib->unmap_granularity_alignment = q->limits.discard_alignment /
850 block_size;
851 attrib->unmap_zeroes_data = q->limits.discard_zeroes_data;
852 return true;
853}
854EXPORT_SYMBOL(target_configure_unmap_from_queue);
855
856/*
857 * Convert from blocksize advertised to the initiator to the 512 byte
858 * units unconditionally used by the Linux block layer.
859 */
860sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
861{
862 switch (dev->dev_attrib.block_size) {
863 case 4096:
864 return lb << 3;
865 case 2048:
866 return lb << 2;
867 case 1024:
868 return lb << 1;
869 default:
870 return lb;
871 }
872}
873EXPORT_SYMBOL(target_to_linux_sector);
874
831int target_configure_device(struct se_device *dev) 875int target_configure_device(struct se_device *dev)
832{ 876{
833 struct se_hba *hba = dev->se_hba; 877 struct se_hba *hba = dev->se_hba;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index e3195700211a..75f0f08b2a34 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -160,25 +160,11 @@ static int fd_configure_device(struct se_device *dev)
160 " block_device blocks: %llu logical_block_size: %d\n", 160 " block_device blocks: %llu logical_block_size: %d\n",
161 dev_size, div_u64(dev_size, fd_dev->fd_block_size), 161 dev_size, div_u64(dev_size, fd_dev->fd_block_size),
162 fd_dev->fd_block_size); 162 fd_dev->fd_block_size);
163 /* 163
164 * Check if the underlying struct block_device request_queue supports 164 if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
165 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 165 fd_dev->fd_block_size))
166 * in ATA and we need to set TPE=1
167 */
168 if (blk_queue_discard(q)) {
169 dev->dev_attrib.max_unmap_lba_count =
170 q->limits.max_discard_sectors;
171 /*
172 * Currently hardcoded to 1 in Linux/SCSI code..
173 */
174 dev->dev_attrib.max_unmap_block_desc_count = 1;
175 dev->dev_attrib.unmap_granularity =
176 q->limits.discard_granularity >> 9;
177 dev->dev_attrib.unmap_granularity_alignment =
178 q->limits.discard_alignment;
179 pr_debug("IFILE: BLOCK Discard support available," 166 pr_debug("IFILE: BLOCK Discard support available,"
180 " disabled by default\n"); 167 " disabled by default\n");
181 }
182 /* 168 /*
183 * Enable write same emulation for IBLOCK and use 0xFFFF as 169 * Enable write same emulation for IBLOCK and use 0xFFFF as
184 * the smaller WRITE_SAME(10) only has a two-byte block count. 170 * the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -490,9 +476,12 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
490 if (S_ISBLK(inode->i_mode)) { 476 if (S_ISBLK(inode->i_mode)) {
491 /* The backend is block device, use discard */ 477 /* The backend is block device, use discard */
492 struct block_device *bdev = inode->i_bdev; 478 struct block_device *bdev = inode->i_bdev;
479 struct se_device *dev = cmd->se_dev;
493 480
494 ret = blkdev_issue_discard(bdev, lba, 481 ret = blkdev_issue_discard(bdev,
495 nolb, GFP_KERNEL, 0); 482 target_to_linux_sector(dev, lba),
483 target_to_linux_sector(dev, nolb),
484 GFP_KERNEL, 0);
496 if (ret < 0) { 485 if (ret < 0) {
497 pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n", 486 pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
498 ret); 487 ret);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 5a2899f9f50b..abe4eb997a84 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -121,29 +121,11 @@ static int iblock_configure_device(struct se_device *dev)
121 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); 121 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
122 dev->dev_attrib.hw_queue_depth = q->nr_requests; 122 dev->dev_attrib.hw_queue_depth = q->nr_requests;
123 123
124 /* 124 if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
125 * Check if the underlying struct block_device request_queue supports 125 dev->dev_attrib.hw_block_size))
126 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
127 * in ATA and we need to set TPE=1
128 */
129 if (blk_queue_discard(q)) {
130 dev->dev_attrib.max_unmap_lba_count =
131 q->limits.max_discard_sectors;
132
133 /*
134 * Currently hardcoded to 1 in Linux/SCSI code..
135 */
136 dev->dev_attrib.max_unmap_block_desc_count = 1;
137 dev->dev_attrib.unmap_granularity =
138 q->limits.discard_granularity >> 9;
139 dev->dev_attrib.unmap_granularity_alignment =
140 q->limits.discard_alignment;
141 dev->dev_attrib.unmap_zeroes_data =
142 q->limits.discard_zeroes_data;
143
144 pr_debug("IBLOCK: BLOCK Discard support available," 126 pr_debug("IBLOCK: BLOCK Discard support available,"
145 " disabled by default\n"); 127 " disabled by default\n");
146 } 128
147 /* 129 /*
148 * Enable write same emulation for IBLOCK and use 0xFFFF as 130 * Enable write same emulation for IBLOCK and use 0xFFFF as
149 * the smaller WRITE_SAME(10) only has a two-byte block count. 131 * the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -415,9 +397,13 @@ static sense_reason_t
415iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) 397iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
416{ 398{
417 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; 399 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
400 struct se_device *dev = cmd->se_dev;
418 int ret; 401 int ret;
419 402
420 ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0); 403 ret = blkdev_issue_discard(bdev,
404 target_to_linux_sector(dev, lba),
405 target_to_linux_sector(dev, nolb),
406 GFP_KERNEL, 0);
421 if (ret < 0) { 407 if (ret < 0) {
422 pr_err("blkdev_issue_discard() failed: %d\n", ret); 408 pr_err("blkdev_issue_discard() failed: %d\n", ret);
423 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 409 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -433,8 +419,10 @@ iblock_execute_write_same(struct se_cmd *cmd)
433 struct scatterlist *sg; 419 struct scatterlist *sg;
434 struct bio *bio; 420 struct bio *bio;
435 struct bio_list list; 421 struct bio_list list;
436 sector_t block_lba = cmd->t_task_lba; 422 struct se_device *dev = cmd->se_dev;
437 sector_t sectors = sbc_get_write_same_sectors(cmd); 423 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
424 sector_t sectors = target_to_linux_sector(dev,
425 sbc_get_write_same_sectors(cmd));
438 426
439 if (cmd->prot_op) { 427 if (cmd->prot_op) {
440 pr_err("WRITE_SAME: Protection information with IBLOCK" 428 pr_err("WRITE_SAME: Protection information with IBLOCK"
@@ -648,12 +636,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
648 enum dma_data_direction data_direction) 636 enum dma_data_direction data_direction)
649{ 637{
650 struct se_device *dev = cmd->se_dev; 638 struct se_device *dev = cmd->se_dev;
639 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
651 struct iblock_req *ibr; 640 struct iblock_req *ibr;
652 struct bio *bio, *bio_start; 641 struct bio *bio, *bio_start;
653 struct bio_list list; 642 struct bio_list list;
654 struct scatterlist *sg; 643 struct scatterlist *sg;
655 u32 sg_num = sgl_nents; 644 u32 sg_num = sgl_nents;
656 sector_t block_lba;
657 unsigned bio_cnt; 645 unsigned bio_cnt;
658 int rw = 0; 646 int rw = 0;
659 int i; 647 int i;
@@ -679,24 +667,6 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
679 rw = READ; 667 rw = READ;
680 } 668 }
681 669
682 /*
683 * Convert the blocksize advertised to the initiator to the 512 byte
684 * units unconditionally used by the Linux block layer.
685 */
686 if (dev->dev_attrib.block_size == 4096)
687 block_lba = (cmd->t_task_lba << 3);
688 else if (dev->dev_attrib.block_size == 2048)
689 block_lba = (cmd->t_task_lba << 2);
690 else if (dev->dev_attrib.block_size == 1024)
691 block_lba = (cmd->t_task_lba << 1);
692 else if (dev->dev_attrib.block_size == 512)
693 block_lba = cmd->t_task_lba;
694 else {
695 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
696 " %u\n", dev->dev_attrib.block_size);
697 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
698 }
699
700 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 670 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
701 if (!ibr) 671 if (!ibr)
702 goto fail; 672 goto fail;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index dae0750c2032..db4412fe6b8a 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -141,7 +141,6 @@ void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int);
141int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int); 141int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
142int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); 142int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
143int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); 143int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
144bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
145void transport_clear_lun_ref(struct se_lun *); 144void transport_clear_lun_ref(struct se_lun *);
146void transport_send_task_abort(struct se_cmd *); 145void transport_send_task_abort(struct se_cmd *);
147sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); 146sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index fcdcb117c60d..82a663ba9800 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -68,23 +68,25 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
68 68
69 if (dev) { 69 if (dev) {
70 spin_lock_irqsave(&dev->se_tmr_lock, flags); 70 spin_lock_irqsave(&dev->se_tmr_lock, flags);
71 list_del(&tmr->tmr_list); 71 list_del_init(&tmr->tmr_list);
72 spin_unlock_irqrestore(&dev->se_tmr_lock, flags); 72 spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
73 } 73 }
74 74
75 kfree(tmr); 75 kfree(tmr);
76} 76}
77 77
78static void core_tmr_handle_tas_abort( 78static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
79 struct se_node_acl *tmr_nacl,
80 struct se_cmd *cmd,
81 int tas)
82{ 79{
83 bool remove = true; 80 unsigned long flags;
81 bool remove = true, send_tas;
84 /* 82 /*
85 * TASK ABORTED status (TAS) bit support 83 * TASK ABORTED status (TAS) bit support
86 */ 84 */
87 if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) { 85 spin_lock_irqsave(&cmd->t_state_lock, flags);
86 send_tas = (cmd->transport_state & CMD_T_TAS);
87 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
88
89 if (send_tas) {
88 remove = false; 90 remove = false;
89 transport_send_task_abort(cmd); 91 transport_send_task_abort(cmd);
90 } 92 }
@@ -107,6 +109,46 @@ static int target_check_cdb_and_preempt(struct list_head *list,
107 return 1; 109 return 1;
108} 110}
109 111
112static bool __target_check_io_state(struct se_cmd *se_cmd,
113 struct se_session *tmr_sess, int tas)
114{
115 struct se_session *sess = se_cmd->se_sess;
116
117 assert_spin_locked(&sess->sess_cmd_lock);
118 WARN_ON_ONCE(!irqs_disabled());
119 /*
120 * If command already reached CMD_T_COMPLETE state within
121 * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
122 * this se_cmd has been passed to fabric driver and will
123 * not be aborted.
124 *
125 * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
126 * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
127 * long as se_cmd->cmd_kref is still active unless zero.
128 */
129 spin_lock(&se_cmd->t_state_lock);
130 if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
131 pr_debug("Attempted to abort io tag: %llu already complete or"
132 " fabric stop, skipping\n", se_cmd->tag);
133 spin_unlock(&se_cmd->t_state_lock);
134 return false;
135 }
136 if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
137 pr_debug("Attempted to abort io tag: %llu already shutdown,"
138 " skipping\n", se_cmd->tag);
139 spin_unlock(&se_cmd->t_state_lock);
140 return false;
141 }
142 se_cmd->transport_state |= CMD_T_ABORTED;
143
144 if ((tmr_sess != se_cmd->se_sess) && tas)
145 se_cmd->transport_state |= CMD_T_TAS;
146
147 spin_unlock(&se_cmd->t_state_lock);
148
149 return kref_get_unless_zero(&se_cmd->cmd_kref);
150}
151
110void core_tmr_abort_task( 152void core_tmr_abort_task(
111 struct se_device *dev, 153 struct se_device *dev,
112 struct se_tmr_req *tmr, 154 struct se_tmr_req *tmr,
@@ -130,34 +172,22 @@ void core_tmr_abort_task(
130 if (tmr->ref_task_tag != ref_tag) 172 if (tmr->ref_task_tag != ref_tag)
131 continue; 173 continue;
132 174
133 if (!kref_get_unless_zero(&se_cmd->cmd_kref))
134 continue;
135
136 printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", 175 printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
137 se_cmd->se_tfo->get_fabric_name(), ref_tag); 176 se_cmd->se_tfo->get_fabric_name(), ref_tag);
138 177
139 spin_lock(&se_cmd->t_state_lock); 178 if (!__target_check_io_state(se_cmd, se_sess, 0)) {
140 if (se_cmd->transport_state & CMD_T_COMPLETE) {
141 printk("ABORT_TASK: ref_tag: %llu already complete,"
142 " skipping\n", ref_tag);
143 spin_unlock(&se_cmd->t_state_lock);
144 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 179 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
145
146 target_put_sess_cmd(se_cmd); 180 target_put_sess_cmd(se_cmd);
147
148 goto out; 181 goto out;
149 } 182 }
150 se_cmd->transport_state |= CMD_T_ABORTED;
151 spin_unlock(&se_cmd->t_state_lock);
152
153 list_del_init(&se_cmd->se_cmd_list); 183 list_del_init(&se_cmd->se_cmd_list);
154 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 184 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
155 185
156 cancel_work_sync(&se_cmd->work); 186 cancel_work_sync(&se_cmd->work);
157 transport_wait_for_tasks(se_cmd); 187 transport_wait_for_tasks(se_cmd);
158 188
159 target_put_sess_cmd(se_cmd);
160 transport_cmd_finish_abort(se_cmd, true); 189 transport_cmd_finish_abort(se_cmd, true);
190 target_put_sess_cmd(se_cmd);
161 191
162 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" 192 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
163 " ref_tag: %llu\n", ref_tag); 193 " ref_tag: %llu\n", ref_tag);
@@ -178,9 +208,11 @@ static void core_tmr_drain_tmr_list(
178 struct list_head *preempt_and_abort_list) 208 struct list_head *preempt_and_abort_list)
179{ 209{
180 LIST_HEAD(drain_tmr_list); 210 LIST_HEAD(drain_tmr_list);
211 struct se_session *sess;
181 struct se_tmr_req *tmr_p, *tmr_pp; 212 struct se_tmr_req *tmr_p, *tmr_pp;
182 struct se_cmd *cmd; 213 struct se_cmd *cmd;
183 unsigned long flags; 214 unsigned long flags;
215 bool rc;
184 /* 216 /*
185 * Release all pending and outgoing TMRs aside from the received 217 * Release all pending and outgoing TMRs aside from the received
186 * LUN_RESET tmr.. 218 * LUN_RESET tmr..
@@ -206,17 +238,39 @@ static void core_tmr_drain_tmr_list(
206 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) 238 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
207 continue; 239 continue;
208 240
241 sess = cmd->se_sess;
242 if (WARN_ON_ONCE(!sess))
243 continue;
244
245 spin_lock(&sess->sess_cmd_lock);
209 spin_lock(&cmd->t_state_lock); 246 spin_lock(&cmd->t_state_lock);
210 if (!(cmd->transport_state & CMD_T_ACTIVE)) { 247 if (!(cmd->transport_state & CMD_T_ACTIVE) ||
248 (cmd->transport_state & CMD_T_FABRIC_STOP)) {
211 spin_unlock(&cmd->t_state_lock); 249 spin_unlock(&cmd->t_state_lock);
250 spin_unlock(&sess->sess_cmd_lock);
212 continue; 251 continue;
213 } 252 }
214 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { 253 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
215 spin_unlock(&cmd->t_state_lock); 254 spin_unlock(&cmd->t_state_lock);
255 spin_unlock(&sess->sess_cmd_lock);
216 continue; 256 continue;
217 } 257 }
258 if (sess->sess_tearing_down || cmd->cmd_wait_set) {
259 spin_unlock(&cmd->t_state_lock);
260 spin_unlock(&sess->sess_cmd_lock);
261 continue;
262 }
263 cmd->transport_state |= CMD_T_ABORTED;
218 spin_unlock(&cmd->t_state_lock); 264 spin_unlock(&cmd->t_state_lock);
219 265
266 rc = kref_get_unless_zero(&cmd->cmd_kref);
267 if (!rc) {
268 printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
269 spin_unlock(&sess->sess_cmd_lock);
270 continue;
271 }
272 spin_unlock(&sess->sess_cmd_lock);
273
220 list_move_tail(&tmr_p->tmr_list, &drain_tmr_list); 274 list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
221 } 275 }
222 spin_unlock_irqrestore(&dev->se_tmr_lock, flags); 276 spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
@@ -230,20 +284,26 @@ static void core_tmr_drain_tmr_list(
230 (preempt_and_abort_list) ? "Preempt" : "", tmr_p, 284 (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
231 tmr_p->function, tmr_p->response, cmd->t_state); 285 tmr_p->function, tmr_p->response, cmd->t_state);
232 286
287 cancel_work_sync(&cmd->work);
288 transport_wait_for_tasks(cmd);
289
233 transport_cmd_finish_abort(cmd, 1); 290 transport_cmd_finish_abort(cmd, 1);
291 target_put_sess_cmd(cmd);
234 } 292 }
235} 293}
236 294
237static void core_tmr_drain_state_list( 295static void core_tmr_drain_state_list(
238 struct se_device *dev, 296 struct se_device *dev,
239 struct se_cmd *prout_cmd, 297 struct se_cmd *prout_cmd,
240 struct se_node_acl *tmr_nacl, 298 struct se_session *tmr_sess,
241 int tas, 299 int tas,
242 struct list_head *preempt_and_abort_list) 300 struct list_head *preempt_and_abort_list)
243{ 301{
244 LIST_HEAD(drain_task_list); 302 LIST_HEAD(drain_task_list);
303 struct se_session *sess;
245 struct se_cmd *cmd, *next; 304 struct se_cmd *cmd, *next;
246 unsigned long flags; 305 unsigned long flags;
306 int rc;
247 307
248 /* 308 /*
249 * Complete outstanding commands with TASK_ABORTED SAM status. 309 * Complete outstanding commands with TASK_ABORTED SAM status.
@@ -282,6 +342,16 @@ static void core_tmr_drain_state_list(
282 if (prout_cmd == cmd) 342 if (prout_cmd == cmd)
283 continue; 343 continue;
284 344
345 sess = cmd->se_sess;
346 if (WARN_ON_ONCE(!sess))
347 continue;
348
349 spin_lock(&sess->sess_cmd_lock);
350 rc = __target_check_io_state(cmd, tmr_sess, tas);
351 spin_unlock(&sess->sess_cmd_lock);
352 if (!rc)
353 continue;
354
285 list_move_tail(&cmd->state_list, &drain_task_list); 355 list_move_tail(&cmd->state_list, &drain_task_list);
286 cmd->state_active = false; 356 cmd->state_active = false;
287 } 357 }
@@ -289,7 +359,7 @@ static void core_tmr_drain_state_list(
289 359
290 while (!list_empty(&drain_task_list)) { 360 while (!list_empty(&drain_task_list)) {
291 cmd = list_entry(drain_task_list.next, struct se_cmd, state_list); 361 cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
292 list_del(&cmd->state_list); 362 list_del_init(&cmd->state_list);
293 363
294 pr_debug("LUN_RESET: %s cmd: %p" 364 pr_debug("LUN_RESET: %s cmd: %p"
295 " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d" 365 " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
@@ -313,16 +383,11 @@ static void core_tmr_drain_state_list(
313 * loop above, but we do it down here given that 383 * loop above, but we do it down here given that
314 * cancel_work_sync may block. 384 * cancel_work_sync may block.
315 */ 385 */
316 if (cmd->t_state == TRANSPORT_COMPLETE) 386 cancel_work_sync(&cmd->work);
317 cancel_work_sync(&cmd->work); 387 transport_wait_for_tasks(cmd);
318
319 spin_lock_irqsave(&cmd->t_state_lock, flags);
320 target_stop_cmd(cmd, &flags);
321
322 cmd->transport_state |= CMD_T_ABORTED;
323 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
324 388
325 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas); 389 core_tmr_handle_tas_abort(cmd, tas);
390 target_put_sess_cmd(cmd);
326 } 391 }
327} 392}
328 393
@@ -334,6 +399,7 @@ int core_tmr_lun_reset(
334{ 399{
335 struct se_node_acl *tmr_nacl = NULL; 400 struct se_node_acl *tmr_nacl = NULL;
336 struct se_portal_group *tmr_tpg = NULL; 401 struct se_portal_group *tmr_tpg = NULL;
402 struct se_session *tmr_sess = NULL;
337 int tas; 403 int tas;
338 /* 404 /*
339 * TASK_ABORTED status bit, this is configurable via ConfigFS 405 * TASK_ABORTED status bit, this is configurable via ConfigFS
@@ -352,8 +418,9 @@ int core_tmr_lun_reset(
352 * or struct se_device passthrough.. 418 * or struct se_device passthrough..
353 */ 419 */
354 if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { 420 if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
355 tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; 421 tmr_sess = tmr->task_cmd->se_sess;
356 tmr_tpg = tmr->task_cmd->se_sess->se_tpg; 422 tmr_nacl = tmr_sess->se_node_acl;
423 tmr_tpg = tmr_sess->se_tpg;
357 if (tmr_nacl && tmr_tpg) { 424 if (tmr_nacl && tmr_tpg) {
358 pr_debug("LUN_RESET: TMR caller fabric: %s" 425 pr_debug("LUN_RESET: TMR caller fabric: %s"
359 " initiator port %s\n", 426 " initiator port %s\n",
@@ -366,7 +433,7 @@ int core_tmr_lun_reset(
366 dev->transport->name, tas); 433 dev->transport->name, tas);
367 434
368 core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); 435 core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
369 core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, 436 core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
370 preempt_and_abort_list); 437 preempt_and_abort_list);
371 438
372 /* 439 /*
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 9f3608e10f25..867bc6d0a68a 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -534,9 +534,6 @@ void transport_deregister_session(struct se_session *se_sess)
534} 534}
535EXPORT_SYMBOL(transport_deregister_session); 535EXPORT_SYMBOL(transport_deregister_session);
536 536
537/*
538 * Called with cmd->t_state_lock held.
539 */
540static void target_remove_from_state_list(struct se_cmd *cmd) 537static void target_remove_from_state_list(struct se_cmd *cmd)
541{ 538{
542 struct se_device *dev = cmd->se_dev; 539 struct se_device *dev = cmd->se_dev;
@@ -561,10 +558,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
561{ 558{
562 unsigned long flags; 559 unsigned long flags;
563 560
564 spin_lock_irqsave(&cmd->t_state_lock, flags);
565 if (write_pending)
566 cmd->t_state = TRANSPORT_WRITE_PENDING;
567
568 if (remove_from_lists) { 561 if (remove_from_lists) {
569 target_remove_from_state_list(cmd); 562 target_remove_from_state_list(cmd);
570 563
@@ -574,6 +567,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
574 cmd->se_lun = NULL; 567 cmd->se_lun = NULL;
575 } 568 }
576 569
570 spin_lock_irqsave(&cmd->t_state_lock, flags);
571 if (write_pending)
572 cmd->t_state = TRANSPORT_WRITE_PENDING;
573
577 /* 574 /*
578 * Determine if frontend context caller is requesting the stopping of 575 * Determine if frontend context caller is requesting the stopping of
579 * this command for frontend exceptions. 576 * this command for frontend exceptions.
@@ -627,6 +624,8 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
627 624
628void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 625void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
629{ 626{
627 bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
628
630 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) 629 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
631 transport_lun_remove_cmd(cmd); 630 transport_lun_remove_cmd(cmd);
632 /* 631 /*
@@ -638,7 +637,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
638 637
639 if (transport_cmd_check_stop_to_fabric(cmd)) 638 if (transport_cmd_check_stop_to_fabric(cmd))
640 return; 639 return;
641 if (remove) 640 if (remove && ack_kref)
642 transport_put_cmd(cmd); 641 transport_put_cmd(cmd);
643} 642}
644 643
@@ -694,19 +693,10 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
694 } 693 }
695 694
696 /* 695 /*
697 * See if we are waiting to complete for an exception condition.
698 */
699 if (cmd->transport_state & CMD_T_REQUEST_STOP) {
700 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
701 complete(&cmd->task_stop_comp);
702 return;
703 }
704
705 /*
706 * Check for case where an explicit ABORT_TASK has been received 696 * Check for case where an explicit ABORT_TASK has been received
707 * and transport_wait_for_tasks() will be waiting for completion.. 697 * and transport_wait_for_tasks() will be waiting for completion..
708 */ 698 */
709 if (cmd->transport_state & CMD_T_ABORTED && 699 if (cmd->transport_state & CMD_T_ABORTED ||
710 cmd->transport_state & CMD_T_STOP) { 700 cmd->transport_state & CMD_T_STOP) {
711 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 701 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
712 complete_all(&cmd->t_transport_stop_comp); 702 complete_all(&cmd->t_transport_stop_comp);
@@ -721,10 +711,10 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
721 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 711 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
722 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 712 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
723 713
724 if (cmd->cpuid == -1) 714 if (cmd->se_cmd_flags & SCF_USE_CPUID)
725 queue_work(target_completion_wq, &cmd->work);
726 else
727 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); 715 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
716 else
717 queue_work(target_completion_wq, &cmd->work);
728} 718}
729EXPORT_SYMBOL(target_complete_cmd); 719EXPORT_SYMBOL(target_complete_cmd);
730 720
@@ -1203,7 +1193,6 @@ void transport_init_se_cmd(
1203 INIT_LIST_HEAD(&cmd->state_list); 1193 INIT_LIST_HEAD(&cmd->state_list);
1204 init_completion(&cmd->t_transport_stop_comp); 1194 init_completion(&cmd->t_transport_stop_comp);
1205 init_completion(&cmd->cmd_wait_comp); 1195 init_completion(&cmd->cmd_wait_comp);
1206 init_completion(&cmd->task_stop_comp);
1207 spin_lock_init(&cmd->t_state_lock); 1196 spin_lock_init(&cmd->t_state_lock);
1208 kref_init(&cmd->cmd_kref); 1197 kref_init(&cmd->cmd_kref);
1209 cmd->transport_state = CMD_T_DEV_ACTIVE; 1198 cmd->transport_state = CMD_T_DEV_ACTIVE;
@@ -1437,6 +1426,12 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
1437 */ 1426 */
1438 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1427 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1439 data_length, data_dir, task_attr, sense); 1428 data_length, data_dir, task_attr, sense);
1429
1430 if (flags & TARGET_SCF_USE_CPUID)
1431 se_cmd->se_cmd_flags |= SCF_USE_CPUID;
1432 else
1433 se_cmd->cpuid = WORK_CPU_UNBOUND;
1434
1440 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1435 if (flags & TARGET_SCF_UNKNOWN_SIZE)
1441 se_cmd->unknown_data_length = 1; 1436 se_cmd->unknown_data_length = 1;
1442 /* 1437 /*
@@ -1635,33 +1630,6 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1635EXPORT_SYMBOL(target_submit_tmr); 1630EXPORT_SYMBOL(target_submit_tmr);
1636 1631
1637/* 1632/*
1638 * If the cmd is active, request it to be stopped and sleep until it
1639 * has completed.
1640 */
1641bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
1642 __releases(&cmd->t_state_lock)
1643 __acquires(&cmd->t_state_lock)
1644{
1645 bool was_active = false;
1646
1647 if (cmd->transport_state & CMD_T_BUSY) {
1648 cmd->transport_state |= CMD_T_REQUEST_STOP;
1649 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1650
1651 pr_debug("cmd %p waiting to complete\n", cmd);
1652 wait_for_completion(&cmd->task_stop_comp);
1653 pr_debug("cmd %p stopped successfully\n", cmd);
1654
1655 spin_lock_irqsave(&cmd->t_state_lock, *flags);
1656 cmd->transport_state &= ~CMD_T_REQUEST_STOP;
1657 cmd->transport_state &= ~CMD_T_BUSY;
1658 was_active = true;
1659 }
1660
1661 return was_active;
1662}
1663
1664/*
1665 * Handle SAM-esque emulation for generic transport request failures. 1633 * Handle SAM-esque emulation for generic transport request failures.
1666 */ 1634 */
1667void transport_generic_request_failure(struct se_cmd *cmd, 1635void transport_generic_request_failure(struct se_cmd *cmd,
@@ -1859,19 +1827,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1859 return true; 1827 return true;
1860} 1828}
1861 1829
1830static int __transport_check_aborted_status(struct se_cmd *, int);
1831
1862void target_execute_cmd(struct se_cmd *cmd) 1832void target_execute_cmd(struct se_cmd *cmd)
1863{ 1833{
1864 /* 1834 /*
1865 * If the received CDB has aleady been aborted stop processing it here.
1866 */
1867 if (transport_check_aborted_status(cmd, 1))
1868 return;
1869
1870 /*
1871 * Determine if frontend context caller is requesting the stopping of 1835 * Determine if frontend context caller is requesting the stopping of
1872 * this command for frontend exceptions. 1836 * this command for frontend exceptions.
1837 *
1838 * If the received CDB has aleady been aborted stop processing it here.
1873 */ 1839 */
1874 spin_lock_irq(&cmd->t_state_lock); 1840 spin_lock_irq(&cmd->t_state_lock);
1841 if (__transport_check_aborted_status(cmd, 1)) {
1842 spin_unlock_irq(&cmd->t_state_lock);
1843 return;
1844 }
1875 if (cmd->transport_state & CMD_T_STOP) { 1845 if (cmd->transport_state & CMD_T_STOP) {
1876 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 1846 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
1877 __func__, __LINE__, cmd->tag); 1847 __func__, __LINE__, cmd->tag);
@@ -2222,20 +2192,14 @@ static inline void transport_free_pages(struct se_cmd *cmd)
2222} 2192}
2223 2193
2224/** 2194/**
2225 * transport_release_cmd - free a command 2195 * transport_put_cmd - release a reference to a command
2226 * @cmd: command to free 2196 * @cmd: command to release
2227 * 2197 *
2228 * This routine unconditionally frees a command, and reference counting 2198 * This routine releases our reference to the command and frees it if possible.
2229 * or list removal must be done in the caller.
2230 */ 2199 */
2231static int transport_release_cmd(struct se_cmd *cmd) 2200static int transport_put_cmd(struct se_cmd *cmd)
2232{ 2201{
2233 BUG_ON(!cmd->se_tfo); 2202 BUG_ON(!cmd->se_tfo);
2234
2235 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2236 core_tmr_release_req(cmd->se_tmr_req);
2237 if (cmd->t_task_cdb != cmd->__t_task_cdb)
2238 kfree(cmd->t_task_cdb);
2239 /* 2203 /*
2240 * If this cmd has been setup with target_get_sess_cmd(), drop 2204 * If this cmd has been setup with target_get_sess_cmd(), drop
2241 * the kref and call ->release_cmd() in kref callback. 2205 * the kref and call ->release_cmd() in kref callback.
@@ -2243,18 +2207,6 @@ static int transport_release_cmd(struct se_cmd *cmd)
2243 return target_put_sess_cmd(cmd); 2207 return target_put_sess_cmd(cmd);
2244} 2208}
2245 2209
2246/**
2247 * transport_put_cmd - release a reference to a command
2248 * @cmd: command to release
2249 *
2250 * This routine releases our reference to the command and frees it if possible.
2251 */
2252static int transport_put_cmd(struct se_cmd *cmd)
2253{
2254 transport_free_pages(cmd);
2255 return transport_release_cmd(cmd);
2256}
2257
2258void *transport_kmap_data_sg(struct se_cmd *cmd) 2210void *transport_kmap_data_sg(struct se_cmd *cmd)
2259{ 2211{
2260 struct scatterlist *sg = cmd->t_data_sg; 2212 struct scatterlist *sg = cmd->t_data_sg;
@@ -2450,34 +2402,58 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
2450 } 2402 }
2451} 2403}
2452 2404
2453int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2405static bool
2406__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
2407 unsigned long *flags);
2408
2409static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2454{ 2410{
2455 unsigned long flags; 2411 unsigned long flags;
2412
2413 spin_lock_irqsave(&cmd->t_state_lock, flags);
2414 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
2415 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2416}
2417
2418int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2419{
2456 int ret = 0; 2420 int ret = 0;
2421 bool aborted = false, tas = false;
2457 2422
2458 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 2423 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
2459 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2424 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2460 transport_wait_for_tasks(cmd); 2425 target_wait_free_cmd(cmd, &aborted, &tas);
2461 2426
2462 ret = transport_release_cmd(cmd); 2427 if (!aborted || tas)
2428 ret = transport_put_cmd(cmd);
2463 } else { 2429 } else {
2464 if (wait_for_tasks) 2430 if (wait_for_tasks)
2465 transport_wait_for_tasks(cmd); 2431 target_wait_free_cmd(cmd, &aborted, &tas);
2466 /* 2432 /*
2467 * Handle WRITE failure case where transport_generic_new_cmd() 2433 * Handle WRITE failure case where transport_generic_new_cmd()
2468 * has already added se_cmd to state_list, but fabric has 2434 * has already added se_cmd to state_list, but fabric has
2469 * failed command before I/O submission. 2435 * failed command before I/O submission.
2470 */ 2436 */
2471 if (cmd->state_active) { 2437 if (cmd->state_active)
2472 spin_lock_irqsave(&cmd->t_state_lock, flags);
2473 target_remove_from_state_list(cmd); 2438 target_remove_from_state_list(cmd);
2474 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2475 }
2476 2439
2477 if (cmd->se_lun) 2440 if (cmd->se_lun)
2478 transport_lun_remove_cmd(cmd); 2441 transport_lun_remove_cmd(cmd);
2479 2442
2480 ret = transport_put_cmd(cmd); 2443 if (!aborted || tas)
2444 ret = transport_put_cmd(cmd);
2445 }
2446 /*
2447 * If the task has been internally aborted due to TMR ABORT_TASK
2448 * or LUN_RESET, target_core_tmr.c is responsible for performing
2449 * the remaining calls to target_put_sess_cmd(), and not the
2450 * callers of this function.
2451 */
2452 if (aborted) {
2453 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2454 wait_for_completion(&cmd->cmd_wait_comp);
2455 cmd->se_tfo->release_cmd(cmd);
2456 ret = 1;
2481 } 2457 }
2482 return ret; 2458 return ret;
2483} 2459}
@@ -2517,26 +2493,46 @@ out:
2517} 2493}
2518EXPORT_SYMBOL(target_get_sess_cmd); 2494EXPORT_SYMBOL(target_get_sess_cmd);
2519 2495
2496static void target_free_cmd_mem(struct se_cmd *cmd)
2497{
2498 transport_free_pages(cmd);
2499
2500 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2501 core_tmr_release_req(cmd->se_tmr_req);
2502 if (cmd->t_task_cdb != cmd->__t_task_cdb)
2503 kfree(cmd->t_task_cdb);
2504}
2505
2520static void target_release_cmd_kref(struct kref *kref) 2506static void target_release_cmd_kref(struct kref *kref)
2521{ 2507{
2522 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2508 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2523 struct se_session *se_sess = se_cmd->se_sess; 2509 struct se_session *se_sess = se_cmd->se_sess;
2524 unsigned long flags; 2510 unsigned long flags;
2511 bool fabric_stop;
2525 2512
2526 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2513 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2527 if (list_empty(&se_cmd->se_cmd_list)) { 2514 if (list_empty(&se_cmd->se_cmd_list)) {
2528 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2515 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2516 target_free_cmd_mem(se_cmd);
2529 se_cmd->se_tfo->release_cmd(se_cmd); 2517 se_cmd->se_tfo->release_cmd(se_cmd);
2530 return; 2518 return;
2531 } 2519 }
2532 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 2520
2521 spin_lock(&se_cmd->t_state_lock);
2522 fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
2523 spin_unlock(&se_cmd->t_state_lock);
2524
2525 if (se_cmd->cmd_wait_set || fabric_stop) {
2526 list_del_init(&se_cmd->se_cmd_list);
2533 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2527 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2528 target_free_cmd_mem(se_cmd);
2534 complete(&se_cmd->cmd_wait_comp); 2529 complete(&se_cmd->cmd_wait_comp);
2535 return; 2530 return;
2536 } 2531 }
2537 list_del(&se_cmd->se_cmd_list); 2532 list_del_init(&se_cmd->se_cmd_list);
2538 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2533 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2539 2534
2535 target_free_cmd_mem(se_cmd);
2540 se_cmd->se_tfo->release_cmd(se_cmd); 2536 se_cmd->se_tfo->release_cmd(se_cmd);
2541} 2537}
2542 2538
@@ -2548,6 +2544,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
2548 struct se_session *se_sess = se_cmd->se_sess; 2544 struct se_session *se_sess = se_cmd->se_sess;
2549 2545
2550 if (!se_sess) { 2546 if (!se_sess) {
2547 target_free_cmd_mem(se_cmd);
2551 se_cmd->se_tfo->release_cmd(se_cmd); 2548 se_cmd->se_tfo->release_cmd(se_cmd);
2552 return 1; 2549 return 1;
2553 } 2550 }
@@ -2564,6 +2561,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2564{ 2561{
2565 struct se_cmd *se_cmd; 2562 struct se_cmd *se_cmd;
2566 unsigned long flags; 2563 unsigned long flags;
2564 int rc;
2567 2565
2568 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2566 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2569 if (se_sess->sess_tearing_down) { 2567 if (se_sess->sess_tearing_down) {
@@ -2573,8 +2571,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2573 se_sess->sess_tearing_down = 1; 2571 se_sess->sess_tearing_down = 1;
2574 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); 2572 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
2575 2573
2576 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) 2574 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
2577 se_cmd->cmd_wait_set = 1; 2575 rc = kref_get_unless_zero(&se_cmd->cmd_kref);
2576 if (rc) {
2577 se_cmd->cmd_wait_set = 1;
2578 spin_lock(&se_cmd->t_state_lock);
2579 se_cmd->transport_state |= CMD_T_FABRIC_STOP;
2580 spin_unlock(&se_cmd->t_state_lock);
2581 }
2582 }
2578 2583
2579 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2584 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2580} 2585}
@@ -2587,15 +2592,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
2587{ 2592{
2588 struct se_cmd *se_cmd, *tmp_cmd; 2593 struct se_cmd *se_cmd, *tmp_cmd;
2589 unsigned long flags; 2594 unsigned long flags;
2595 bool tas;
2590 2596
2591 list_for_each_entry_safe(se_cmd, tmp_cmd, 2597 list_for_each_entry_safe(se_cmd, tmp_cmd,
2592 &se_sess->sess_wait_list, se_cmd_list) { 2598 &se_sess->sess_wait_list, se_cmd_list) {
2593 list_del(&se_cmd->se_cmd_list); 2599 list_del_init(&se_cmd->se_cmd_list);
2594 2600
2595 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 2601 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
2596 " %d\n", se_cmd, se_cmd->t_state, 2602 " %d\n", se_cmd, se_cmd->t_state,
2597 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2603 se_cmd->se_tfo->get_cmd_state(se_cmd));
2598 2604
2605 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2606 tas = (se_cmd->transport_state & CMD_T_TAS);
2607 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2608
2609 if (!target_put_sess_cmd(se_cmd)) {
2610 if (tas)
2611 target_put_sess_cmd(se_cmd);
2612 }
2613
2599 wait_for_completion(&se_cmd->cmd_wait_comp); 2614 wait_for_completion(&se_cmd->cmd_wait_comp);
2600 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" 2615 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
2601 " fabric state: %d\n", se_cmd, se_cmd->t_state, 2616 " fabric state: %d\n", se_cmd, se_cmd->t_state,
@@ -2617,53 +2632,75 @@ void transport_clear_lun_ref(struct se_lun *lun)
2617 wait_for_completion(&lun->lun_ref_comp); 2632 wait_for_completion(&lun->lun_ref_comp);
2618} 2633}
2619 2634
2620/** 2635static bool
2621 * transport_wait_for_tasks - wait for completion to occur 2636__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
2622 * @cmd: command to wait 2637 bool *aborted, bool *tas, unsigned long *flags)
2623 * 2638 __releases(&cmd->t_state_lock)
2624 * Called from frontend fabric context to wait for storage engine 2639 __acquires(&cmd->t_state_lock)
2625 * to pause and/or release frontend generated struct se_cmd.
2626 */
2627bool transport_wait_for_tasks(struct se_cmd *cmd)
2628{ 2640{
2629 unsigned long flags;
2630 2641
2631 spin_lock_irqsave(&cmd->t_state_lock, flags); 2642 assert_spin_locked(&cmd->t_state_lock);
2643 WARN_ON_ONCE(!irqs_disabled());
2644
2645 if (fabric_stop)
2646 cmd->transport_state |= CMD_T_FABRIC_STOP;
2647
2648 if (cmd->transport_state & CMD_T_ABORTED)
2649 *aborted = true;
2650
2651 if (cmd->transport_state & CMD_T_TAS)
2652 *tas = true;
2653
2632 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2654 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
2633 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2655 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2634 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2635 return false; 2656 return false;
2636 }
2637 2657
2638 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 2658 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
2639 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2659 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2640 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2641 return false; 2660 return false;
2642 }
2643 2661
2644 if (!(cmd->transport_state & CMD_T_ACTIVE)) { 2662 if (!(cmd->transport_state & CMD_T_ACTIVE))
2645 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2663 return false;
2664
2665 if (fabric_stop && *aborted)
2646 return false; 2666 return false;
2647 }
2648 2667
2649 cmd->transport_state |= CMD_T_STOP; 2668 cmd->transport_state |= CMD_T_STOP;
2650 2669
2651 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n", 2670 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
2652 cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2671 " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
2672 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
2653 2673
2654 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2674 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
2655 2675
2656 wait_for_completion(&cmd->t_transport_stop_comp); 2676 wait_for_completion(&cmd->t_transport_stop_comp);
2657 2677
2658 spin_lock_irqsave(&cmd->t_state_lock, flags); 2678 spin_lock_irqsave(&cmd->t_state_lock, *flags);
2659 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2679 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
2660 2680
2661 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n", 2681 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
2662 cmd->tag); 2682 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
2663 2683
2684 return true;
2685}
2686
2687/**
2688 * transport_wait_for_tasks - wait for completion to occur
2689 * @cmd: command to wait
2690 *
2691 * Called from frontend fabric context to wait for storage engine
2692 * to pause and/or release frontend generated struct se_cmd.
2693 */
2694bool transport_wait_for_tasks(struct se_cmd *cmd)
2695{
2696 unsigned long flags;
2697 bool ret, aborted = false, tas = false;
2698
2699 spin_lock_irqsave(&cmd->t_state_lock, flags);
2700 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
2664 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2701 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2665 2702
2666 return true; 2703 return ret;
2667} 2704}
2668EXPORT_SYMBOL(transport_wait_for_tasks); 2705EXPORT_SYMBOL(transport_wait_for_tasks);
2669 2706
@@ -2845,28 +2882,49 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
2845} 2882}
2846EXPORT_SYMBOL(transport_send_check_condition_and_sense); 2883EXPORT_SYMBOL(transport_send_check_condition_and_sense);
2847 2884
2848int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 2885static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
2886 __releases(&cmd->t_state_lock)
2887 __acquires(&cmd->t_state_lock)
2849{ 2888{
2889 assert_spin_locked(&cmd->t_state_lock);
2890 WARN_ON_ONCE(!irqs_disabled());
2891
2850 if (!(cmd->transport_state & CMD_T_ABORTED)) 2892 if (!(cmd->transport_state & CMD_T_ABORTED))
2851 return 0; 2893 return 0;
2852
2853 /* 2894 /*
2854 * If cmd has been aborted but either no status is to be sent or it has 2895 * If cmd has been aborted but either no status is to be sent or it has
2855 * already been sent, just return 2896 * already been sent, just return
2856 */ 2897 */
2857 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) 2898 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
2899 if (send_status)
2900 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
2858 return 1; 2901 return 1;
2902 }
2859 2903
2860 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n", 2904 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
2861 cmd->t_task_cdb[0], cmd->tag); 2905 " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
2862 2906
2863 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; 2907 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
2864 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2908 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
2865 trace_target_cmd_complete(cmd); 2909 trace_target_cmd_complete(cmd);
2910
2911 spin_unlock_irq(&cmd->t_state_lock);
2866 cmd->se_tfo->queue_status(cmd); 2912 cmd->se_tfo->queue_status(cmd);
2913 spin_lock_irq(&cmd->t_state_lock);
2867 2914
2868 return 1; 2915 return 1;
2869} 2916}
2917
2918int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
2919{
2920 int ret;
2921
2922 spin_lock_irq(&cmd->t_state_lock);
2923 ret = __transport_check_aborted_status(cmd, send_status);
2924 spin_unlock_irq(&cmd->t_state_lock);
2925
2926 return ret;
2927}
2870EXPORT_SYMBOL(transport_check_aborted_status); 2928EXPORT_SYMBOL(transport_check_aborted_status);
2871 2929
2872void transport_send_task_abort(struct se_cmd *cmd) 2930void transport_send_task_abort(struct se_cmd *cmd)
@@ -2888,11 +2946,17 @@ void transport_send_task_abort(struct se_cmd *cmd)
2888 */ 2946 */
2889 if (cmd->data_direction == DMA_TO_DEVICE) { 2947 if (cmd->data_direction == DMA_TO_DEVICE) {
2890 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2948 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
2891 cmd->transport_state |= CMD_T_ABORTED; 2949 spin_lock_irqsave(&cmd->t_state_lock, flags);
2950 if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
2951 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2952 goto send_abort;
2953 }
2892 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 2954 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
2955 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2893 return; 2956 return;
2894 } 2957 }
2895 } 2958 }
2959send_abort:
2896 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2960 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
2897 2961
2898 transport_lun_remove_cmd(cmd); 2962 transport_lun_remove_cmd(cmd);
@@ -2909,8 +2973,17 @@ static void target_tmr_work(struct work_struct *work)
2909 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2973 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2910 struct se_device *dev = cmd->se_dev; 2974 struct se_device *dev = cmd->se_dev;
2911 struct se_tmr_req *tmr = cmd->se_tmr_req; 2975 struct se_tmr_req *tmr = cmd->se_tmr_req;
2976 unsigned long flags;
2912 int ret; 2977 int ret;
2913 2978
2979 spin_lock_irqsave(&cmd->t_state_lock, flags);
2980 if (cmd->transport_state & CMD_T_ABORTED) {
2981 tmr->response = TMR_FUNCTION_REJECTED;
2982 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2983 goto check_stop;
2984 }
2985 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2986
2914 switch (tmr->function) { 2987 switch (tmr->function) {
2915 case TMR_ABORT_TASK: 2988 case TMR_ABORT_TASK:
2916 core_tmr_abort_task(dev, tmr, cmd->se_sess); 2989 core_tmr_abort_task(dev, tmr, cmd->se_sess);
@@ -2943,9 +3016,17 @@ static void target_tmr_work(struct work_struct *work)
2943 break; 3016 break;
2944 } 3017 }
2945 3018
3019 spin_lock_irqsave(&cmd->t_state_lock, flags);
3020 if (cmd->transport_state & CMD_T_ABORTED) {
3021 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3022 goto check_stop;
3023 }
2946 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 3024 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3025 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3026
2947 cmd->se_tfo->queue_tm_rsp(cmd); 3027 cmd->se_tfo->queue_tm_rsp(cmd);
2948 3028
3029check_stop:
2949 transport_cmd_check_stop_to_fabric(cmd); 3030 transport_cmd_check_stop_to_fabric(cmd);
2950} 3031}
2951 3032
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index dd600e5ead71..94f5154ac788 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -903,7 +903,7 @@ static int tcmu_configure_device(struct se_device *dev)
903 info->version = __stringify(TCMU_MAILBOX_VERSION); 903 info->version = __stringify(TCMU_MAILBOX_VERSION);
904 904
905 info->mem[0].name = "tcm-user command & data buffer"; 905 info->mem[0].name = "tcm-user command & data buffer";
906 info->mem[0].addr = (phys_addr_t) udev->mb_addr; 906 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
907 info->mem[0].size = TCMU_RING_SIZE; 907 info->mem[0].size = TCMU_RING_SIZE;
908 info->mem[0].memtype = UIO_MEM_VIRTUAL; 908 info->mem[0].memtype = UIO_MEM_VIRTUAL;
909 909
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 8cc4ac64a91c..7c92c09be213 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -195,7 +195,7 @@ config IMX_THERMAL
195 passive trip is crossed. 195 passive trip is crossed.
196 196
197config SPEAR_THERMAL 197config SPEAR_THERMAL
198 bool "SPEAr thermal sensor driver" 198 tristate "SPEAr thermal sensor driver"
199 depends on PLAT_SPEAR || COMPILE_TEST 199 depends on PLAT_SPEAR || COMPILE_TEST
200 depends on OF 200 depends on OF
201 help 201 help
@@ -237,8 +237,8 @@ config DOVE_THERMAL
237 framework. 237 framework.
238 238
239config DB8500_THERMAL 239config DB8500_THERMAL
240 bool "DB8500 thermal management" 240 tristate "DB8500 thermal management"
241 depends on ARCH_U8500 241 depends on MFD_DB8500_PRCMU
242 default y 242 default y
243 help 243 help
244 Adds DB8500 thermal management implementation according to the thermal 244 Adds DB8500 thermal management implementation according to the thermal
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index e3fbc5a5d88f..6ceac4f2d4b2 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -377,26 +377,28 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device,
377 * get_load() - get load for a cpu since last updated 377 * get_load() - get load for a cpu since last updated
378 * @cpufreq_device: &struct cpufreq_cooling_device for this cpu 378 * @cpufreq_device: &struct cpufreq_cooling_device for this cpu
379 * @cpu: cpu number 379 * @cpu: cpu number
380 * @cpu_idx: index of the cpu in cpufreq_device->allowed_cpus
380 * 381 *
381 * Return: The average load of cpu @cpu in percentage since this 382 * Return: The average load of cpu @cpu in percentage since this
382 * function was last called. 383 * function was last called.
383 */ 384 */
384static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu) 385static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu,
386 int cpu_idx)
385{ 387{
386 u32 load; 388 u32 load;
387 u64 now, now_idle, delta_time, delta_idle; 389 u64 now, now_idle, delta_time, delta_idle;
388 390
389 now_idle = get_cpu_idle_time(cpu, &now, 0); 391 now_idle = get_cpu_idle_time(cpu, &now, 0);
390 delta_idle = now_idle - cpufreq_device->time_in_idle[cpu]; 392 delta_idle = now_idle - cpufreq_device->time_in_idle[cpu_idx];
391 delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu]; 393 delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu_idx];
392 394
393 if (delta_time <= delta_idle) 395 if (delta_time <= delta_idle)
394 load = 0; 396 load = 0;
395 else 397 else
396 load = div64_u64(100 * (delta_time - delta_idle), delta_time); 398 load = div64_u64(100 * (delta_time - delta_idle), delta_time);
397 399
398 cpufreq_device->time_in_idle[cpu] = now_idle; 400 cpufreq_device->time_in_idle[cpu_idx] = now_idle;
399 cpufreq_device->time_in_idle_timestamp[cpu] = now; 401 cpufreq_device->time_in_idle_timestamp[cpu_idx] = now;
400 402
401 return load; 403 return load;
402} 404}
@@ -598,7 +600,7 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
598 u32 load; 600 u32 load;
599 601
600 if (cpu_online(cpu)) 602 if (cpu_online(cpu))
601 load = get_load(cpufreq_device, cpu); 603 load = get_load(cpufreq_device, cpu, i);
602 else 604 else
603 load = 0; 605 load = 0;
604 606
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index be4eedcb839a..9043f8f91852 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -475,14 +475,10 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
475 475
476 sensor_np = of_node_get(dev->of_node); 476 sensor_np = of_node_get(dev->of_node);
477 477
478 for_each_child_of_node(np, child) { 478 for_each_available_child_of_node(np, child) {
479 struct of_phandle_args sensor_specs; 479 struct of_phandle_args sensor_specs;
480 int ret, id; 480 int ret, id;
481 481
482 /* Check whether child is enabled or not */
483 if (!of_device_is_available(child))
484 continue;
485
486 /* For now, thermal framework supports only 1 sensor per zone */ 482 /* For now, thermal framework supports only 1 sensor per zone */
487 ret = of_parse_phandle_with_args(child, "thermal-sensors", 483 ret = of_parse_phandle_with_args(child, "thermal-sensors",
488 "#thermal-sensor-cells", 484 "#thermal-sensor-cells",
@@ -881,16 +877,12 @@ int __init of_parse_thermal_zones(void)
881 return 0; /* Run successfully on systems without thermal DT */ 877 return 0; /* Run successfully on systems without thermal DT */
882 } 878 }
883 879
884 for_each_child_of_node(np, child) { 880 for_each_available_child_of_node(np, child) {
885 struct thermal_zone_device *zone; 881 struct thermal_zone_device *zone;
886 struct thermal_zone_params *tzp; 882 struct thermal_zone_params *tzp;
887 int i, mask = 0; 883 int i, mask = 0;
888 u32 prop; 884 u32 prop;
889 885
890 /* Check whether child is enabled or not */
891 if (!of_device_is_available(child))
892 continue;
893
894 tz = thermal_of_build_thermal_zone(child); 886 tz = thermal_of_build_thermal_zone(child);
895 if (IS_ERR(tz)) { 887 if (IS_ERR(tz)) {
896 pr_err("failed to build thermal zone %s: %ld\n", 888 pr_err("failed to build thermal zone %s: %ld\n",
@@ -968,13 +960,9 @@ void of_thermal_destroy_zones(void)
968 return; 960 return;
969 } 961 }
970 962
971 for_each_child_of_node(np, child) { 963 for_each_available_child_of_node(np, child) {
972 struct thermal_zone_device *zone; 964 struct thermal_zone_device *zone;
973 965
974 /* Check whether child is enabled or not */
975 if (!of_device_is_available(child))
976 continue;
977
978 zone = thermal_zone_get_zone_by_name(child->name); 966 zone = thermal_zone_get_zone_by_name(child->name);
979 if (IS_ERR(zone)) 967 if (IS_ERR(zone))
980 continue; 968 continue;
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 44b9c485157d..0e735acea33a 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -23,6 +23,7 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/of_device.h>
26#include <linux/platform_device.h> 27#include <linux/platform_device.h>
27#include <linux/pm_runtime.h> 28#include <linux/pm_runtime.h>
28#include <linux/reboot.h> 29#include <linux/reboot.h>
@@ -75,8 +76,10 @@ struct rcar_thermal_priv {
75#define rcar_has_irq_support(priv) ((priv)->common->base) 76#define rcar_has_irq_support(priv) ((priv)->common->base)
76#define rcar_id_to_shift(priv) ((priv)->id * 8) 77#define rcar_id_to_shift(priv) ((priv)->id * 8)
77 78
79#define USE_OF_THERMAL 1
78static const struct of_device_id rcar_thermal_dt_ids[] = { 80static const struct of_device_id rcar_thermal_dt_ids[] = {
79 { .compatible = "renesas,rcar-thermal", }, 81 { .compatible = "renesas,rcar-thermal", },
82 { .compatible = "renesas,rcar-gen2-thermal", .data = (void *)USE_OF_THERMAL },
80 {}, 83 {},
81}; 84};
82MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids); 85MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids);
@@ -200,9 +203,9 @@ err_out_unlock:
200 return ret; 203 return ret;
201} 204}
202 205
203static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp) 206static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv,
207 int *temp)
204{ 208{
205 struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
206 int tmp; 209 int tmp;
207 int ret; 210 int ret;
208 211
@@ -226,6 +229,20 @@ static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
226 return 0; 229 return 0;
227} 230}
228 231
232static int rcar_thermal_of_get_temp(void *data, int *temp)
233{
234 struct rcar_thermal_priv *priv = data;
235
236 return rcar_thermal_get_current_temp(priv, temp);
237}
238
239static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
240{
241 struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
242
243 return rcar_thermal_get_current_temp(priv, temp);
244}
245
229static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone, 246static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone,
230 int trip, enum thermal_trip_type *type) 247 int trip, enum thermal_trip_type *type)
231{ 248{
@@ -282,6 +299,10 @@ static int rcar_thermal_notify(struct thermal_zone_device *zone,
282 return 0; 299 return 0;
283} 300}
284 301
302static const struct thermal_zone_of_device_ops rcar_thermal_zone_of_ops = {
303 .get_temp = rcar_thermal_of_get_temp,
304};
305
285static struct thermal_zone_device_ops rcar_thermal_zone_ops = { 306static struct thermal_zone_device_ops rcar_thermal_zone_ops = {
286 .get_temp = rcar_thermal_get_temp, 307 .get_temp = rcar_thermal_get_temp,
287 .get_trip_type = rcar_thermal_get_trip_type, 308 .get_trip_type = rcar_thermal_get_trip_type,
@@ -318,14 +339,20 @@ static void rcar_thermal_work(struct work_struct *work)
318 339
319 priv = container_of(work, struct rcar_thermal_priv, work.work); 340 priv = container_of(work, struct rcar_thermal_priv, work.work);
320 341
321 rcar_thermal_get_temp(priv->zone, &cctemp); 342 ret = rcar_thermal_get_current_temp(priv, &cctemp);
343 if (ret < 0)
344 return;
345
322 ret = rcar_thermal_update_temp(priv); 346 ret = rcar_thermal_update_temp(priv);
323 if (ret < 0) 347 if (ret < 0)
324 return; 348 return;
325 349
326 rcar_thermal_irq_enable(priv); 350 rcar_thermal_irq_enable(priv);
327 351
328 rcar_thermal_get_temp(priv->zone, &nctemp); 352 ret = rcar_thermal_get_current_temp(priv, &nctemp);
353 if (ret < 0)
354 return;
355
329 if (nctemp != cctemp) 356 if (nctemp != cctemp)
330 thermal_zone_device_update(priv->zone); 357 thermal_zone_device_update(priv->zone);
331} 358}
@@ -403,6 +430,8 @@ static int rcar_thermal_probe(struct platform_device *pdev)
403 struct rcar_thermal_priv *priv; 430 struct rcar_thermal_priv *priv;
404 struct device *dev = &pdev->dev; 431 struct device *dev = &pdev->dev;
405 struct resource *res, *irq; 432 struct resource *res, *irq;
433 const struct of_device_id *of_id = of_match_device(rcar_thermal_dt_ids, dev);
434 unsigned long of_data = (unsigned long)of_id->data;
406 int mres = 0; 435 int mres = 0;
407 int i; 436 int i;
408 int ret = -ENODEV; 437 int ret = -ENODEV;
@@ -463,7 +492,13 @@ static int rcar_thermal_probe(struct platform_device *pdev)
463 if (ret < 0) 492 if (ret < 0)
464 goto error_unregister; 493 goto error_unregister;
465 494
466 priv->zone = thermal_zone_device_register("rcar_thermal", 495 if (of_data == USE_OF_THERMAL)
496 priv->zone = thermal_zone_of_sensor_register(
497 dev, i, priv,
498 &rcar_thermal_zone_of_ops);
499 else
500 priv->zone = thermal_zone_device_register(
501 "rcar_thermal",
467 1, 0, priv, 502 1, 0, priv,
468 &rcar_thermal_zone_ops, NULL, 0, 503 &rcar_thermal_zone_ops, NULL, 0,
469 idle); 504 idle);
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index 534dd9136662..81b35aace9de 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -54,8 +54,7 @@ static struct thermal_zone_device_ops ops = {
54 .get_temp = thermal_get_temp, 54 .get_temp = thermal_get_temp,
55}; 55};
56 56
57#ifdef CONFIG_PM 57static int __maybe_unused spear_thermal_suspend(struct device *dev)
58static int spear_thermal_suspend(struct device *dev)
59{ 58{
60 struct platform_device *pdev = to_platform_device(dev); 59 struct platform_device *pdev = to_platform_device(dev);
61 struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); 60 struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
@@ -72,7 +71,7 @@ static int spear_thermal_suspend(struct device *dev)
72 return 0; 71 return 0;
73} 72}
74 73
75static int spear_thermal_resume(struct device *dev) 74static int __maybe_unused spear_thermal_resume(struct device *dev)
76{ 75{
77 struct platform_device *pdev = to_platform_device(dev); 76 struct platform_device *pdev = to_platform_device(dev);
78 struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); 77 struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
@@ -94,7 +93,6 @@ static int spear_thermal_resume(struct device *dev)
94 93
95 return 0; 94 return 0;
96} 95}
97#endif
98 96
99static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend, 97static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend,
100 spear_thermal_resume); 98 spear_thermal_resume);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index d9a5fc28fef4..b280abaad91b 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -269,16 +269,13 @@ static void n_tty_check_throttle(struct tty_struct *tty)
269 269
270static void n_tty_check_unthrottle(struct tty_struct *tty) 270static void n_tty_check_unthrottle(struct tty_struct *tty)
271{ 271{
272 if (tty->driver->type == TTY_DRIVER_TYPE_PTY && 272 if (tty->driver->type == TTY_DRIVER_TYPE_PTY) {
273 tty->link->ldisc->ops->write_wakeup == n_tty_write_wakeup) {
274 if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE) 273 if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE)
275 return; 274 return;
276 if (!tty->count) 275 if (!tty->count)
277 return; 276 return;
278 n_tty_kick_worker(tty); 277 n_tty_kick_worker(tty);
279 n_tty_write_wakeup(tty->link); 278 tty_wakeup(tty->link);
280 if (waitqueue_active(&tty->link->write_wait))
281 wake_up_interruptible_poll(&tty->link->write_wait, POLLOUT);
282 return; 279 return;
283 } 280 }
284 281
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index b3110040164a..2348fa613707 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -681,7 +681,14 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
681/* this is called once with whichever end is closed last */ 681/* this is called once with whichever end is closed last */
682static void pty_unix98_shutdown(struct tty_struct *tty) 682static void pty_unix98_shutdown(struct tty_struct *tty)
683{ 683{
684 devpts_kill_index(tty->driver_data, tty->index); 684 struct inode *ptmx_inode;
685
686 if (tty->driver->subtype == PTY_TYPE_MASTER)
687 ptmx_inode = tty->driver_data;
688 else
689 ptmx_inode = tty->link->driver_data;
690 devpts_kill_index(ptmx_inode, tty->index);
691 devpts_del_ref(ptmx_inode);
685} 692}
686 693
687static const struct tty_operations ptm_unix98_ops = { 694static const struct tty_operations ptm_unix98_ops = {
@@ -773,6 +780,18 @@ static int ptmx_open(struct inode *inode, struct file *filp)
773 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ 780 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
774 tty->driver_data = inode; 781 tty->driver_data = inode;
775 782
783 /*
784 * In the case where all references to ptmx inode are dropped and we
785 * still have /dev/tty opened pointing to the master/slave pair (ptmx
786 * is closed/released before /dev/tty), we must make sure that the inode
787 * is still valid when we call the final pty_unix98_shutdown, thus we
788 * hold an additional reference to the ptmx inode. For the same /dev/tty
789 * last close case, we also need to make sure the super_block isn't
790 * destroyed (devpts instance unmounted), before /dev/tty is closed and
791 * on its release devpts_kill_index is called.
792 */
793 devpts_add_ref(inode);
794
776 tty_add_file(tty, filp); 795 tty_add_file(tty, filp);
777 796
778 slave_inode = devpts_pty_new(inode, 797 slave_inode = devpts_pty_new(inode,
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 4097f3f65b3b..7cd6f9a90542 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1379,6 +1379,9 @@ ce4100_serial_setup(struct serial_private *priv,
1379#define PCI_DEVICE_ID_INTEL_BSW_UART1 0x228a 1379#define PCI_DEVICE_ID_INTEL_BSW_UART1 0x228a
1380#define PCI_DEVICE_ID_INTEL_BSW_UART2 0x228c 1380#define PCI_DEVICE_ID_INTEL_BSW_UART2 0x228c
1381 1381
1382#define PCI_DEVICE_ID_INTEL_BDW_UART1 0x9ce3
1383#define PCI_DEVICE_ID_INTEL_BDW_UART2 0x9ce4
1384
1382#define BYT_PRV_CLK 0x800 1385#define BYT_PRV_CLK 0x800
1383#define BYT_PRV_CLK_EN (1 << 0) 1386#define BYT_PRV_CLK_EN (1 << 0)
1384#define BYT_PRV_CLK_M_VAL_SHIFT 1 1387#define BYT_PRV_CLK_M_VAL_SHIFT 1
@@ -1461,11 +1464,13 @@ byt_serial_setup(struct serial_private *priv,
1461 switch (pdev->device) { 1464 switch (pdev->device) {
1462 case PCI_DEVICE_ID_INTEL_BYT_UART1: 1465 case PCI_DEVICE_ID_INTEL_BYT_UART1:
1463 case PCI_DEVICE_ID_INTEL_BSW_UART1: 1466 case PCI_DEVICE_ID_INTEL_BSW_UART1:
1467 case PCI_DEVICE_ID_INTEL_BDW_UART1:
1464 rx_param->src_id = 3; 1468 rx_param->src_id = 3;
1465 tx_param->dst_id = 2; 1469 tx_param->dst_id = 2;
1466 break; 1470 break;
1467 case PCI_DEVICE_ID_INTEL_BYT_UART2: 1471 case PCI_DEVICE_ID_INTEL_BYT_UART2:
1468 case PCI_DEVICE_ID_INTEL_BSW_UART2: 1472 case PCI_DEVICE_ID_INTEL_BSW_UART2:
1473 case PCI_DEVICE_ID_INTEL_BDW_UART2:
1469 rx_param->src_id = 5; 1474 rx_param->src_id = 5;
1470 tx_param->dst_id = 4; 1475 tx_param->dst_id = 4;
1471 break; 1476 break;
@@ -1936,6 +1941,7 @@ pci_wch_ch38x_setup(struct serial_private *priv,
1936#define PCIE_VENDOR_ID_WCH 0x1c00 1941#define PCIE_VENDOR_ID_WCH 0x1c00
1937#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250 1942#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
1938#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470 1943#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
1944#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
1939 1945
1940#define PCI_VENDOR_ID_PERICOM 0x12D8 1946#define PCI_VENDOR_ID_PERICOM 0x12D8
1941#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951 1947#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
@@ -2062,6 +2068,20 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
2062 .subdevice = PCI_ANY_ID, 2068 .subdevice = PCI_ANY_ID,
2063 .setup = byt_serial_setup, 2069 .setup = byt_serial_setup,
2064 }, 2070 },
2071 {
2072 .vendor = PCI_VENDOR_ID_INTEL,
2073 .device = PCI_DEVICE_ID_INTEL_BDW_UART1,
2074 .subvendor = PCI_ANY_ID,
2075 .subdevice = PCI_ANY_ID,
2076 .setup = byt_serial_setup,
2077 },
2078 {
2079 .vendor = PCI_VENDOR_ID_INTEL,
2080 .device = PCI_DEVICE_ID_INTEL_BDW_UART2,
2081 .subvendor = PCI_ANY_ID,
2082 .subdevice = PCI_ANY_ID,
2083 .setup = byt_serial_setup,
2084 },
2065 /* 2085 /*
2066 * ITE 2086 * ITE
2067 */ 2087 */
@@ -2618,6 +2638,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
2618 .subdevice = PCI_ANY_ID, 2638 .subdevice = PCI_ANY_ID,
2619 .setup = pci_wch_ch353_setup, 2639 .setup = pci_wch_ch353_setup,
2620 }, 2640 },
2641 /* WCH CH382 2S card (16850 clone) */
2642 {
2643 .vendor = PCIE_VENDOR_ID_WCH,
2644 .device = PCIE_DEVICE_ID_WCH_CH382_2S,
2645 .subvendor = PCI_ANY_ID,
2646 .subdevice = PCI_ANY_ID,
2647 .setup = pci_wch_ch38x_setup,
2648 },
2621 /* WCH CH382 2S1P card (16850 clone) */ 2649 /* WCH CH382 2S1P card (16850 clone) */
2622 { 2650 {
2623 .vendor = PCIE_VENDOR_ID_WCH, 2651 .vendor = PCIE_VENDOR_ID_WCH,
@@ -2936,6 +2964,7 @@ enum pci_board_num_t {
2936 pbn_fintek_4, 2964 pbn_fintek_4,
2937 pbn_fintek_8, 2965 pbn_fintek_8,
2938 pbn_fintek_12, 2966 pbn_fintek_12,
2967 pbn_wch382_2,
2939 pbn_wch384_4, 2968 pbn_wch384_4,
2940 pbn_pericom_PI7C9X7951, 2969 pbn_pericom_PI7C9X7951,
2941 pbn_pericom_PI7C9X7952, 2970 pbn_pericom_PI7C9X7952,
@@ -3756,6 +3785,13 @@ static struct pciserial_board pci_boards[] = {
3756 .base_baud = 115200, 3785 .base_baud = 115200,
3757 .first_offset = 0x40, 3786 .first_offset = 0x40,
3758 }, 3787 },
3788 [pbn_wch382_2] = {
3789 .flags = FL_BASE0,
3790 .num_ports = 2,
3791 .base_baud = 115200,
3792 .uart_offset = 8,
3793 .first_offset = 0xC0,
3794 },
3759 [pbn_wch384_4] = { 3795 [pbn_wch384_4] = {
3760 .flags = FL_BASE0, 3796 .flags = FL_BASE0,
3761 .num_ports = 4, 3797 .num_ports = 4,
@@ -5506,6 +5542,16 @@ static struct pci_device_id serial_pci_tbl[] = {
5506 PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000, 5542 PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
5507 pbn_byt }, 5543 pbn_byt },
5508 5544
5545 /* Intel Broadwell */
5546 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART1,
5547 PCI_ANY_ID, PCI_ANY_ID,
5548 PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
5549 pbn_byt },
5550 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART2,
5551 PCI_ANY_ID, PCI_ANY_ID,
5552 PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
5553 pbn_byt },
5554
5509 /* 5555 /*
5510 * Intel Quark x1000 5556 * Intel Quark x1000
5511 */ 5557 */
@@ -5545,6 +5591,10 @@ static struct pci_device_id serial_pci_tbl[] = {
5545 PCI_ANY_ID, PCI_ANY_ID, 5591 PCI_ANY_ID, PCI_ANY_ID,
5546 0, 0, pbn_b0_bt_2_115200 }, 5592 0, 0, pbn_b0_bt_2_115200 },
5547 5593
5594 { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
5595 PCI_ANY_ID, PCI_ANY_ID,
5596 0, 0, pbn_wch382_2 },
5597
5548 { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S, 5598 { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
5549 PCI_ANY_ID, PCI_ANY_ID, 5599 PCI_ANY_ID, PCI_ANY_ID,
5550 0, 0, pbn_wch384_4 }, 5600 0, 0, pbn_wch384_4 },
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index b645f9228ed7..fa49eb1e2fa2 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1165,7 +1165,7 @@ serial_omap_type(struct uart_port *port)
1165 1165
1166#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) 1166#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
1167 1167
1168static void wait_for_xmitr(struct uart_omap_port *up) 1168static void __maybe_unused wait_for_xmitr(struct uart_omap_port *up)
1169{ 1169{
1170 unsigned int status, tmout = 10000; 1170 unsigned int status, tmout = 10000;
1171 1171
@@ -1343,7 +1343,7 @@ static inline void serial_omap_add_console_port(struct uart_omap_port *up)
1343 1343
1344/* Enable or disable the rs485 support */ 1344/* Enable or disable the rs485 support */
1345static int 1345static int
1346serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf) 1346serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
1347{ 1347{
1348 struct uart_omap_port *up = to_uart_omap_port(port); 1348 struct uart_omap_port *up = to_uart_omap_port(port);
1349 unsigned int mode; 1349 unsigned int mode;
@@ -1356,8 +1356,12 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
1356 up->ier = 0; 1356 up->ier = 0;
1357 serial_out(up, UART_IER, 0); 1357 serial_out(up, UART_IER, 0);
1358 1358
1359 /* Clamp the delays to [0, 100ms] */
1360 rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
1361 rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
1362
1359 /* store new config */ 1363 /* store new config */
1360 port->rs485 = *rs485conf; 1364 port->rs485 = *rs485;
1361 1365
1362 /* 1366 /*
1363 * Just as a precaution, only allow rs485 1367 * Just as a precaution, only allow rs485
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 892c92354745..a7eacef1bd22 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1463,13 +1463,13 @@ static int tty_reopen(struct tty_struct *tty)
1463{ 1463{
1464 struct tty_driver *driver = tty->driver; 1464 struct tty_driver *driver = tty->driver;
1465 1465
1466 if (!tty->count)
1467 return -EIO;
1468
1469 if (driver->type == TTY_DRIVER_TYPE_PTY && 1466 if (driver->type == TTY_DRIVER_TYPE_PTY &&
1470 driver->subtype == PTY_TYPE_MASTER) 1467 driver->subtype == PTY_TYPE_MASTER)
1471 return -EIO; 1468 return -EIO;
1472 1469
1470 if (!tty->count)
1471 return -EAGAIN;
1472
1473 if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN)) 1473 if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
1474 return -EBUSY; 1474 return -EBUSY;
1475 1475
@@ -2065,9 +2065,13 @@ retry_open:
2065 2065
2066 if (tty) { 2066 if (tty) {
2067 mutex_unlock(&tty_mutex); 2067 mutex_unlock(&tty_mutex);
2068 tty_lock(tty); 2068 retval = tty_lock_interruptible(tty);
2069 /* safe to drop the kref from tty_driver_lookup_tty() */ 2069 tty_kref_put(tty); /* drop kref from tty_driver_lookup_tty() */
2070 tty_kref_put(tty); 2070 if (retval) {
2071 if (retval == -EINTR)
2072 retval = -ERESTARTSYS;
2073 goto err_unref;
2074 }
2071 retval = tty_reopen(tty); 2075 retval = tty_reopen(tty);
2072 if (retval < 0) { 2076 if (retval < 0) {
2073 tty_unlock(tty); 2077 tty_unlock(tty);
@@ -2083,7 +2087,11 @@ retry_open:
2083 2087
2084 if (IS_ERR(tty)) { 2088 if (IS_ERR(tty)) {
2085 retval = PTR_ERR(tty); 2089 retval = PTR_ERR(tty);
2086 goto err_file; 2090 if (retval != -EAGAIN || signal_pending(current))
2091 goto err_file;
2092 tty_free_file(filp);
2093 schedule();
2094 goto retry_open;
2087 } 2095 }
2088 2096
2089 tty_add_file(tty, filp); 2097 tty_add_file(tty, filp);
@@ -2152,6 +2160,7 @@ retry_open:
2152 return 0; 2160 return 0;
2153err_unlock: 2161err_unlock:
2154 mutex_unlock(&tty_mutex); 2162 mutex_unlock(&tty_mutex);
2163err_unref:
2155 /* after locks to avoid deadlock */ 2164 /* after locks to avoid deadlock */
2156 if (!IS_ERR_OR_NULL(driver)) 2165 if (!IS_ERR_OR_NULL(driver))
2157 tty_driver_kref_put(driver); 2166 tty_driver_kref_put(driver);
@@ -2649,6 +2658,28 @@ static int tiocsetd(struct tty_struct *tty, int __user *p)
2649} 2658}
2650 2659
2651/** 2660/**
2661 * tiocgetd - get line discipline
2662 * @tty: tty device
2663 * @p: pointer to user data
2664 *
2665 * Retrieves the line discipline id directly from the ldisc.
2666 *
2667 * Locking: waits for ldisc reference (in case the line discipline
2668 * is changing or the tty is being hungup)
2669 */
2670
2671static int tiocgetd(struct tty_struct *tty, int __user *p)
2672{
2673 struct tty_ldisc *ld;
2674 int ret;
2675
2676 ld = tty_ldisc_ref_wait(tty);
2677 ret = put_user(ld->ops->num, p);
2678 tty_ldisc_deref(ld);
2679 return ret;
2680}
2681
2682/**
2652 * send_break - performed time break 2683 * send_break - performed time break
2653 * @tty: device to break on 2684 * @tty: device to break on
2654 * @duration: timeout in mS 2685 * @duration: timeout in mS
@@ -2874,7 +2905,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2874 case TIOCGSID: 2905 case TIOCGSID:
2875 return tiocgsid(tty, real_tty, p); 2906 return tiocgsid(tty, real_tty, p);
2876 case TIOCGETD: 2907 case TIOCGETD:
2877 return put_user(tty->ldisc->ops->num, (int __user *)p); 2908 return tiocgetd(tty, p);
2878 case TIOCSETD: 2909 case TIOCSETD:
2879 return tiocsetd(tty, p); 2910 return tiocsetd(tty, p);
2880 case TIOCVHANGUP: 2911 case TIOCVHANGUP:
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index 77703a391207..dfa9ec03fa8e 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -19,6 +19,19 @@ void __lockfunc tty_lock(struct tty_struct *tty)
19} 19}
20EXPORT_SYMBOL(tty_lock); 20EXPORT_SYMBOL(tty_lock);
21 21
22int tty_lock_interruptible(struct tty_struct *tty)
23{
24 int ret;
25
26 if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
27 return -EIO;
28 tty_kref_get(tty);
29 ret = mutex_lock_interruptible(&tty->legacy_mutex);
30 if (ret)
31 tty_kref_put(tty);
32 return ret;
33}
34
22void __lockfunc tty_unlock(struct tty_struct *tty) 35void __lockfunc tty_unlock(struct tty_struct *tty)
23{ 36{
24 if (WARN(tty->magic != TTY_MAGIC, "U Bad %p\n", tty)) 37 if (WARN(tty->magic != TTY_MAGIC, "U Bad %p\n", tty))
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index e7cbc44eef57..bd51bdd0a7bf 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -4250,6 +4250,7 @@ unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed)
4250{ 4250{
4251 return screenpos(vc, 2 * w_offset, viewed); 4251 return screenpos(vc, 2 * w_offset, viewed);
4252} 4252}
4253EXPORT_SYMBOL_GPL(screen_pos);
4253 4254
4254void getconsxy(struct vc_data *vc, unsigned char *p) 4255void getconsxy(struct vc_data *vc, unsigned char *p)
4255{ 4256{
diff --git a/drivers/usb/chipidea/ci_hdrc_pci.c b/drivers/usb/chipidea/ci_hdrc_pci.c
index b59195edf636..b635ab67490d 100644
--- a/drivers/usb/chipidea/ci_hdrc_pci.c
+++ b/drivers/usb/chipidea/ci_hdrc_pci.c
@@ -85,8 +85,8 @@ static int ci_hdrc_pci_probe(struct pci_dev *pdev,
85 85
86 /* register a nop PHY */ 86 /* register a nop PHY */
87 ci->phy = usb_phy_generic_register(); 87 ci->phy = usb_phy_generic_register();
88 if (!ci->phy) 88 if (IS_ERR(ci->phy))
89 return -ENOMEM; 89 return PTR_ERR(ci->phy);
90 90
91 memset(res, 0, sizeof(res)); 91 memset(res, 0, sizeof(res));
92 res[0].start = pci_resource_start(pdev, 0); 92 res[0].start = pci_resource_start(pdev, 0);
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index a4f7db2e18dd..df47110bad2d 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -100,6 +100,9 @@ static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf,
100 if (sscanf(buf, "%u", &mode) != 1) 100 if (sscanf(buf, "%u", &mode) != 1)
101 return -EINVAL; 101 return -EINVAL;
102 102
103 if (mode > 255)
104 return -EBADRQC;
105
103 pm_runtime_get_sync(ci->dev); 106 pm_runtime_get_sync(ci->dev);
104 spin_lock_irqsave(&ci->lock, flags); 107 spin_lock_irqsave(&ci->lock, flags);
105 ret = hw_port_test_set(ci, mode); 108 ret = hw_port_test_set(ci, mode);
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index 45f86da1d6d3..03b6743461d1 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -158,7 +158,7 @@ static void ci_otg_work(struct work_struct *work)
158int ci_hdrc_otg_init(struct ci_hdrc *ci) 158int ci_hdrc_otg_init(struct ci_hdrc *ci)
159{ 159{
160 INIT_WORK(&ci->work, ci_otg_work); 160 INIT_WORK(&ci->work, ci_otg_work);
161 ci->wq = create_singlethread_workqueue("ci_otg"); 161 ci->wq = create_freezable_workqueue("ci_otg");
162 if (!ci->wq) { 162 if (!ci->wq) {
163 dev_err(ci->dev, "can't create workqueue\n"); 163 dev_err(ci->dev, "can't create workqueue\n");
164 return -ENODEV; 164 return -ENODEV;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 26ca4f910cb0..fa4e23930614 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -428,7 +428,8 @@ static void acm_read_bulk_callback(struct urb *urb)
428 set_bit(rb->index, &acm->read_urbs_free); 428 set_bit(rb->index, &acm->read_urbs_free);
429 dev_dbg(&acm->data->dev, "%s - non-zero urb status: %d\n", 429 dev_dbg(&acm->data->dev, "%s - non-zero urb status: %d\n",
430 __func__, status); 430 __func__, status);
431 return; 431 if ((status != -ENOENT) || (urb->actual_length == 0))
432 return;
432 } 433 }
433 434
434 usb_mark_last_busy(acm->dev); 435 usb_mark_last_busy(acm->dev);
@@ -1404,6 +1405,8 @@ made_compressed_probe:
1404 usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress), 1405 usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
1405 NULL, acm->writesize, acm_write_bulk, snd); 1406 NULL, acm->writesize, acm_write_bulk, snd);
1406 snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 1407 snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1408 if (quirks & SEND_ZERO_PACKET)
1409 snd->urb->transfer_flags |= URB_ZERO_PACKET;
1407 snd->instance = acm; 1410 snd->instance = acm;
1408 } 1411 }
1409 1412
@@ -1838,6 +1841,11 @@ static const struct usb_device_id acm_ids[] = {
1838 }, 1841 },
1839#endif 1842#endif
1840 1843
1844 /*Samsung phone in firmware update mode */
1845 { USB_DEVICE(0x04e8, 0x685d),
1846 .driver_info = IGNORE_DEVICE,
1847 },
1848
1841 /* Exclude Infineon Flash Loader utility */ 1849 /* Exclude Infineon Flash Loader utility */
1842 { USB_DEVICE(0x058b, 0x0041), 1850 { USB_DEVICE(0x058b, 0x0041),
1843 .driver_info = IGNORE_DEVICE, 1851 .driver_info = IGNORE_DEVICE,
@@ -1861,6 +1869,10 @@ static const struct usb_device_id acm_ids[] = {
1861 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1869 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1862 USB_CDC_ACM_PROTO_AT_CDMA) }, 1870 USB_CDC_ACM_PROTO_AT_CDMA) },
1863 1871
1872 { USB_DEVICE(0x1519, 0x0452), /* Intel 7260 modem */
1873 .driver_info = SEND_ZERO_PACKET,
1874 },
1875
1864 { } 1876 { }
1865}; 1877};
1866 1878
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index dd9af38e7cda..ccfaba9ab4e4 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -134,3 +134,4 @@ struct acm {
134#define IGNORE_DEVICE BIT(5) 134#define IGNORE_DEVICE BIT(5)
135#define QUIRK_CONTROL_LINE_STATE BIT(6) 135#define QUIRK_CONTROL_LINE_STATE BIT(6)
136#define CLEAR_HALT_CONDITIONS BIT(7) 136#define CLEAR_HALT_CONDITIONS BIT(7)
137#define SEND_ZERO_PACKET BIT(8)
diff --git a/drivers/usb/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig
index fd95ba6ec317..f0decc0d69b5 100644
--- a/drivers/usb/dwc2/Kconfig
+++ b/drivers/usb/dwc2/Kconfig
@@ -1,5 +1,6 @@
1config USB_DWC2 1config USB_DWC2
2 tristate "DesignWare USB2 DRD Core Support" 2 tristate "DesignWare USB2 DRD Core Support"
3 depends on HAS_DMA
3 depends on USB || USB_GADGET 4 depends on USB || USB_GADGET
4 help 5 help
5 Say Y here if your system has a Dual Role Hi-Speed USB 6 Say Y here if your system has a Dual Role Hi-Speed USB
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 39a0fa8a4c0a..46c4ba75dc2a 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -572,12 +572,6 @@ static bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host)
572 set = host ? GUSBCFG_FORCEHOSTMODE : GUSBCFG_FORCEDEVMODE; 572 set = host ? GUSBCFG_FORCEHOSTMODE : GUSBCFG_FORCEDEVMODE;
573 clear = host ? GUSBCFG_FORCEDEVMODE : GUSBCFG_FORCEHOSTMODE; 573 clear = host ? GUSBCFG_FORCEDEVMODE : GUSBCFG_FORCEHOSTMODE;
574 574
575 /*
576 * If the force mode bit is already set, don't set it.
577 */
578 if ((gusbcfg & set) && !(gusbcfg & clear))
579 return false;
580
581 gusbcfg &= ~clear; 575 gusbcfg &= ~clear;
582 gusbcfg |= set; 576 gusbcfg |= set;
583 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); 577 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
@@ -625,6 +619,12 @@ void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
625 __func__, hsotg->dr_mode); 619 __func__, hsotg->dr_mode);
626 break; 620 break;
627 } 621 }
622
623 /*
624 * NOTE: This is required for some rockchip soc based
625 * platforms.
626 */
627 msleep(50);
628} 628}
629 629
630/* 630/*
@@ -3278,9 +3278,6 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
3278/** 3278/**
3279 * During device initialization, read various hardware configuration 3279 * During device initialization, read various hardware configuration
3280 * registers and interpret the contents. 3280 * registers and interpret the contents.
3281 *
3282 * This should be called during driver probe. It will perform a core
3283 * soft reset in order to get the reset values of the parameters.
3284 */ 3281 */
3285int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) 3282int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
3286{ 3283{
@@ -3288,7 +3285,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
3288 unsigned width; 3285 unsigned width;
3289 u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4; 3286 u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
3290 u32 grxfsiz; 3287 u32 grxfsiz;
3291 int retval;
3292 3288
3293 /* 3289 /*
3294 * Attempt to ensure this device is really a DWC_otg Controller. 3290 * Attempt to ensure this device is really a DWC_otg Controller.
@@ -3308,10 +3304,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
3308 hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf, 3304 hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
3309 hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid); 3305 hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
3310 3306
3311 retval = dwc2_core_reset(hsotg);
3312 if (retval)
3313 return retval;
3314
3315 hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1); 3307 hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1);
3316 hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2); 3308 hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
3317 hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3); 3309 hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3);
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index 36606fc33c0d..a41274aa52ad 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -1174,14 +1174,11 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
1174 failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc, 1174 failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
1175 halt_status, n_bytes, 1175 halt_status, n_bytes,
1176 xfer_done); 1176 xfer_done);
1177 if (*xfer_done && urb->status != -EINPROGRESS) 1177 if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
1178 failed = 1;
1179
1180 if (failed) {
1181 dwc2_host_complete(hsotg, qtd, urb->status); 1178 dwc2_host_complete(hsotg, qtd, urb->status);
1182 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 1179 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1183 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n", 1180 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n",
1184 failed, *xfer_done, urb->status); 1181 failed, *xfer_done);
1185 return failed; 1182 return failed;
1186 } 1183 }
1187 1184
@@ -1236,21 +1233,23 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1236 1233
1237 list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) { 1234 list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
1238 int i; 1235 int i;
1236 int qtd_desc_count;
1239 1237
1240 qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry); 1238 qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
1241 xfer_done = 0; 1239 xfer_done = 0;
1240 qtd_desc_count = qtd->n_desc;
1242 1241
1243 for (i = 0; i < qtd->n_desc; i++) { 1242 for (i = 0; i < qtd_desc_count; i++) {
1244 if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd, 1243 if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
1245 desc_num, halt_status, 1244 desc_num, halt_status,
1246 &xfer_done)) { 1245 &xfer_done))
1247 qtd = NULL; 1246 goto stop_scan;
1248 break; 1247
1249 }
1250 desc_num++; 1248 desc_num++;
1251 } 1249 }
1252 } 1250 }
1253 1251
1252stop_scan:
1254 if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) { 1253 if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
1255 /* 1254 /*
1256 * Resetting the data toggle for bulk and interrupt endpoints 1255 * Resetting the data toggle for bulk and interrupt endpoints
@@ -1258,7 +1257,7 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1258 */ 1257 */
1259 if (halt_status == DWC2_HC_XFER_STALL) 1258 if (halt_status == DWC2_HC_XFER_STALL)
1260 qh->data_toggle = DWC2_HC_PID_DATA0; 1259 qh->data_toggle = DWC2_HC_PID_DATA0;
1261 else if (qtd) 1260 else
1262 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1261 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1263 } 1262 }
1264 1263
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index f8253803a050..cadba8b13c48 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -525,11 +525,19 @@ void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
525 u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT; 525 u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
526 526
527 if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) { 527 if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
528 if (WARN(!chan || !chan->qh,
529 "chan->qh must be specified for non-control eps\n"))
530 return;
531
528 if (pid == TSIZ_SC_MC_PID_DATA0) 532 if (pid == TSIZ_SC_MC_PID_DATA0)
529 chan->qh->data_toggle = DWC2_HC_PID_DATA0; 533 chan->qh->data_toggle = DWC2_HC_PID_DATA0;
530 else 534 else
531 chan->qh->data_toggle = DWC2_HC_PID_DATA1; 535 chan->qh->data_toggle = DWC2_HC_PID_DATA1;
532 } else { 536 } else {
537 if (WARN(!qtd,
538 "qtd must be specified for control eps\n"))
539 return;
540
533 if (pid == TSIZ_SC_MC_PID_DATA0) 541 if (pid == TSIZ_SC_MC_PID_DATA0)
534 qtd->data_toggle = DWC2_HC_PID_DATA0; 542 qtd->data_toggle = DWC2_HC_PID_DATA0;
535 else 543 else
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 510f787434b3..690b9fd98b55 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -530,7 +530,13 @@ static int dwc2_driver_probe(struct platform_device *dev)
530 if (retval) 530 if (retval)
531 return retval; 531 return retval;
532 532
533 /* Reset the controller and detect hardware config values */ 533 /*
534 * Reset before dwc2_get_hwparams() then it could get power-on real
535 * reset value form registers.
536 */
537 dwc2_core_reset_and_force_dr_mode(hsotg);
538
539 /* Detect config values from hardware */
534 retval = dwc2_get_hwparams(hsotg); 540 retval = dwc2_get_hwparams(hsotg);
535 if (retval) 541 if (retval)
536 goto error; 542 goto error;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 29130682e547..e4f8b90d9627 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -856,7 +856,6 @@ struct dwc3 {
856 unsigned pullups_connected:1; 856 unsigned pullups_connected:1;
857 unsigned resize_fifos:1; 857 unsigned resize_fifos:1;
858 unsigned setup_packet_pending:1; 858 unsigned setup_packet_pending:1;
859 unsigned start_config_issued:1;
860 unsigned three_stage_setup:1; 859 unsigned three_stage_setup:1;
861 unsigned usb3_lpm_capable:1; 860 unsigned usb3_lpm_capable:1;
862 861
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 3a9354abcb68..8d6b75c2f53b 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -555,7 +555,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
555 int ret; 555 int ret;
556 u32 reg; 556 u32 reg;
557 557
558 dwc->start_config_issued = false;
559 cfg = le16_to_cpu(ctrl->wValue); 558 cfg = le16_to_cpu(ctrl->wValue);
560 559
561 switch (state) { 560 switch (state) {
@@ -737,10 +736,6 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
737 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY"); 736 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
738 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl); 737 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
739 break; 738 break;
740 case USB_REQ_SET_INTERFACE:
741 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
742 dwc->start_config_issued = false;
743 /* Fall through */
744 default: 739 default:
745 dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver"); 740 dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
746 ret = dwc3_ep0_delegate_req(dwc, ctrl); 741 ret = dwc3_ep0_delegate_req(dwc, ctrl);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index af023a81a0b0..2363bad45af8 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -385,24 +385,66 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
385 dep->trb_pool_dma = 0; 385 dep->trb_pool_dma = 0;
386} 386}
387 387
388static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
389
390/**
391 * dwc3_gadget_start_config - Configure EP resources
392 * @dwc: pointer to our controller context structure
393 * @dep: endpoint that is being enabled
394 *
395 * The assignment of transfer resources cannot perfectly follow the
396 * data book due to the fact that the controller driver does not have
397 * all knowledge of the configuration in advance. It is given this
398 * information piecemeal by the composite gadget framework after every
399 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
400 * programming model in this scenario can cause errors. For two
401 * reasons:
402 *
403 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
404 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
405 * multiple interfaces.
406 *
407 * 2) The databook does not mention doing more DEPXFERCFG for new
408 * endpoint on alt setting (8.1.6).
409 *
410 * The following simplified method is used instead:
411 *
412 * All hardware endpoints can be assigned a transfer resource and this
413 * setting will stay persistent until either a core reset or
414 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
415 * do DEPXFERCFG for every hardware endpoint as well. We are
416 * guaranteed that there are as many transfer resources as endpoints.
417 *
418 * This function is called for each endpoint when it is being enabled
419 * but is triggered only when called for EP0-out, which always happens
420 * first, and which should only happen in one of the above conditions.
421 */
388static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 422static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
389{ 423{
390 struct dwc3_gadget_ep_cmd_params params; 424 struct dwc3_gadget_ep_cmd_params params;
391 u32 cmd; 425 u32 cmd;
426 int i;
427 int ret;
428
429 if (dep->number)
430 return 0;
392 431
393 memset(&params, 0x00, sizeof(params)); 432 memset(&params, 0x00, sizeof(params));
433 cmd = DWC3_DEPCMD_DEPSTARTCFG;
394 434
395 if (dep->number != 1) { 435 ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
396 cmd = DWC3_DEPCMD_DEPSTARTCFG; 436 if (ret)
397 /* XferRscIdx == 0 for ep0 and 2 for the remaining */ 437 return ret;
398 if (dep->number > 1) {
399 if (dwc->start_config_issued)
400 return 0;
401 dwc->start_config_issued = true;
402 cmd |= DWC3_DEPCMD_PARAM(2);
403 }
404 438
405 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params); 439 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
440 struct dwc3_ep *dep = dwc->eps[i];
441
442 if (!dep)
443 continue;
444
445 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
446 if (ret)
447 return ret;
406 } 448 }
407 449
408 return 0; 450 return 0;
@@ -516,10 +558,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
516 struct dwc3_trb *trb_st_hw; 558 struct dwc3_trb *trb_st_hw;
517 struct dwc3_trb *trb_link; 559 struct dwc3_trb *trb_link;
518 560
519 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
520 if (ret)
521 return ret;
522
523 dep->endpoint.desc = desc; 561 dep->endpoint.desc = desc;
524 dep->comp_desc = comp_desc; 562 dep->comp_desc = comp_desc;
525 dep->type = usb_endpoint_type(desc); 563 dep->type = usb_endpoint_type(desc);
@@ -1636,8 +1674,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
1636 } 1674 }
1637 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1675 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1638 1676
1639 dwc->start_config_issued = false;
1640
1641 /* Start with SuperSpeed Default */ 1677 /* Start with SuperSpeed Default */
1642 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1678 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1643 1679
@@ -2237,7 +2273,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2237 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2273 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2238 2274
2239 dwc3_disconnect_gadget(dwc); 2275 dwc3_disconnect_gadget(dwc);
2240 dwc->start_config_issued = false;
2241 2276
2242 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2277 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2243 dwc->setup_packet_pending = false; 2278 dwc->setup_packet_pending = false;
@@ -2288,7 +2323,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2288 2323
2289 dwc3_stop_active_transfers(dwc); 2324 dwc3_stop_active_transfers(dwc);
2290 dwc3_clear_stall_all_ep(dwc); 2325 dwc3_clear_stall_all_ep(dwc);
2291 dwc->start_config_issued = false;
2292 2326
2293 /* Reset device address to zero */ 2327 /* Reset device address to zero */
2294 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2328 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
@@ -2789,6 +2823,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
2789 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2823 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2790 dwc->gadget.sg_supported = true; 2824 dwc->gadget.sg_supported = true;
2791 dwc->gadget.name = "dwc3-gadget"; 2825 dwc->gadget.name = "dwc3-gadget";
2826 dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG;
2792 2827
2793 /* 2828 /*
2794 * FIXME We might be setting max_speed to <SUPER, however versions 2829 * FIXME We might be setting max_speed to <SUPER, however versions
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 7e179f81d05c..87fb0fd6aaab 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -130,7 +130,8 @@ struct dev_data {
130 setup_can_stall : 1, 130 setup_can_stall : 1,
131 setup_out_ready : 1, 131 setup_out_ready : 1,
132 setup_out_error : 1, 132 setup_out_error : 1,
133 setup_abort : 1; 133 setup_abort : 1,
134 gadget_registered : 1;
134 unsigned setup_wLength; 135 unsigned setup_wLength;
135 136
136 /* the rest is basically write-once */ 137 /* the rest is basically write-once */
@@ -1179,7 +1180,8 @@ dev_release (struct inode *inode, struct file *fd)
1179 1180
1180 /* closing ep0 === shutdown all */ 1181 /* closing ep0 === shutdown all */
1181 1182
1182 usb_gadget_unregister_driver (&gadgetfs_driver); 1183 if (dev->gadget_registered)
1184 usb_gadget_unregister_driver (&gadgetfs_driver);
1183 1185
1184 /* at this point "good" hardware has disconnected the 1186 /* at this point "good" hardware has disconnected the
1185 * device from USB; the host won't see it any more. 1187 * device from USB; the host won't see it any more.
@@ -1847,6 +1849,7 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1847 * kick in after the ep0 descriptor is closed. 1849 * kick in after the ep0 descriptor is closed.
1848 */ 1850 */
1849 value = len; 1851 value = len;
1852 dev->gadget_registered = true;
1850 } 1853 }
1851 return value; 1854 return value;
1852 1855
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 53c0692f1b09..93d28cb00b76 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -2340,7 +2340,7 @@ static struct qe_udc *qe_udc_config(struct platform_device *ofdev)
2340{ 2340{
2341 struct qe_udc *udc; 2341 struct qe_udc *udc;
2342 struct device_node *np = ofdev->dev.of_node; 2342 struct device_node *np = ofdev->dev.of_node;
2343 unsigned int tmp_addr = 0; 2343 unsigned long tmp_addr = 0;
2344 struct usb_device_para __iomem *usbpram; 2344 struct usb_device_para __iomem *usbpram;
2345 unsigned int i; 2345 unsigned int i;
2346 u64 size; 2346 u64 size;
diff --git a/drivers/usb/gadget/udc/net2280.h b/drivers/usb/gadget/udc/net2280.h
index 4dff60d34f73..0d32052bf16f 100644
--- a/drivers/usb/gadget/udc/net2280.h
+++ b/drivers/usb/gadget/udc/net2280.h
@@ -369,9 +369,20 @@ static inline void set_max_speed(struct net2280_ep *ep, u32 max)
369 static const u32 ep_enhanced[9] = { 0x10, 0x60, 0x30, 0x80, 369 static const u32 ep_enhanced[9] = { 0x10, 0x60, 0x30, 0x80,
370 0x50, 0x20, 0x70, 0x40, 0x90 }; 370 0x50, 0x20, 0x70, 0x40, 0x90 };
371 371
372 if (ep->dev->enhanced_mode) 372 if (ep->dev->enhanced_mode) {
373 reg = ep_enhanced[ep->num]; 373 reg = ep_enhanced[ep->num];
374 else{ 374 switch (ep->dev->gadget.speed) {
375 case USB_SPEED_SUPER:
376 reg += 2;
377 break;
378 case USB_SPEED_FULL:
379 reg += 1;
380 break;
381 case USB_SPEED_HIGH:
382 default:
383 break;
384 }
385 } else {
375 reg = (ep->num + 1) * 0x10; 386 reg = (ep->num + 1) * 0x10;
376 if (ep->dev->gadget.speed != USB_SPEED_HIGH) 387 if (ep->dev->gadget.speed != USB_SPEED_HIGH)
377 reg += 1; 388 reg += 1;
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index fd73a3ea07c2..b86a6f03592e 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -413,9 +413,10 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
413 if (!driver->udc_name || strcmp(driver->udc_name, 413 if (!driver->udc_name || strcmp(driver->udc_name,
414 dev_name(&udc->dev)) == 0) { 414 dev_name(&udc->dev)) == 0) {
415 ret = udc_bind_to_driver(udc, driver); 415 ret = udc_bind_to_driver(udc, driver);
416 if (ret != -EPROBE_DEFER)
417 list_del(&driver->pending);
416 if (ret) 418 if (ret)
417 goto err4; 419 goto err4;
418 list_del(&driver->pending);
419 break; 420 break;
420 } 421 }
421 } 422 }
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index 04ce6b156b35..e0244fb3903d 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -112,12 +112,16 @@ static inline int xhci_find_next_ext_cap(void __iomem *base, u32 start, int id)
112 offset = start; 112 offset = start;
113 if (!start || start == XHCI_HCC_PARAMS_OFFSET) { 113 if (!start || start == XHCI_HCC_PARAMS_OFFSET) {
114 val = readl(base + XHCI_HCC_PARAMS_OFFSET); 114 val = readl(base + XHCI_HCC_PARAMS_OFFSET);
115 if (val == ~0)
116 return 0;
115 offset = XHCI_HCC_EXT_CAPS(val) << 2; 117 offset = XHCI_HCC_EXT_CAPS(val) << 2;
116 if (!offset) 118 if (!offset)
117 return 0; 119 return 0;
118 }; 120 };
119 do { 121 do {
120 val = readl(base + offset); 122 val = readl(base + offset);
123 if (val == ~0)
124 return 0;
121 if (XHCI_EXT_CAPS_ID(val) == id && offset != start) 125 if (XHCI_EXT_CAPS_ID(val) == id && offset != start)
122 return offset; 126 return offset;
123 127
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index c30de7c39f44..73f763c4f5f5 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -275,8 +275,9 @@ static bool need_bw_sch(struct usb_host_endpoint *ep,
275 return false; 275 return false;
276 276
277 /* 277 /*
278 * for LS & FS periodic endpoints which its device don't attach 278 * for LS & FS periodic endpoints which its device is not behind
279 * to TT are also ignored, root-hub will schedule them directly 279 * a TT are also ignored, root-hub will schedule them directly,
280 * but need set @bpkts field of endpoint context to 1.
280 */ 281 */
281 if (is_fs_or_ls(speed) && !has_tt) 282 if (is_fs_or_ls(speed) && !has_tt)
282 return false; 283 return false;
@@ -339,8 +340,17 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
339 GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)), 340 GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)),
340 usb_endpoint_dir_in(&ep->desc), ep); 341 usb_endpoint_dir_in(&ep->desc), ep);
341 342
342 if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT)) 343 if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT)) {
344 /*
345 * set @bpkts to 1 if it is LS or FS periodic endpoint, and its
346 * device does not connected through an external HS hub
347 */
348 if (usb_endpoint_xfer_int(&ep->desc)
349 || usb_endpoint_xfer_isoc(&ep->desc))
350 ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(1));
351
343 return 0; 352 return 0;
353 }
344 354
345 bw_index = get_bw_index(xhci, udev, ep); 355 bw_index = get_bw_index(xhci, udev, ep);
346 sch_bw = &sch_array[bw_index]; 356 sch_bw = &sch_array[bw_index];
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index c9ab6a44c34a..9532f5aef71b 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -696,9 +696,24 @@ static int xhci_mtk_remove(struct platform_device *dev)
696} 696}
697 697
698#ifdef CONFIG_PM_SLEEP 698#ifdef CONFIG_PM_SLEEP
699/*
700 * if ip sleep fails, and all clocks are disabled, access register will hang
701 * AHB bus, so stop polling roothubs to avoid regs access on bus suspend.
702 * and no need to check whether ip sleep failed or not; this will cause SPM
703 * to wake up system immediately after system suspend complete if ip sleep
704 * fails, it is what we wanted.
705 */
699static int xhci_mtk_suspend(struct device *dev) 706static int xhci_mtk_suspend(struct device *dev)
700{ 707{
701 struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev); 708 struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev);
709 struct usb_hcd *hcd = mtk->hcd;
710 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
711
712 xhci_dbg(xhci, "%s: stop port polling\n", __func__);
713 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
714 del_timer_sync(&hcd->rh_timer);
715 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
716 del_timer_sync(&xhci->shared_hcd->rh_timer);
702 717
703 xhci_mtk_host_disable(mtk); 718 xhci_mtk_host_disable(mtk);
704 xhci_mtk_phy_power_off(mtk); 719 xhci_mtk_phy_power_off(mtk);
@@ -710,11 +725,19 @@ static int xhci_mtk_suspend(struct device *dev)
710static int xhci_mtk_resume(struct device *dev) 725static int xhci_mtk_resume(struct device *dev)
711{ 726{
712 struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev); 727 struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev);
728 struct usb_hcd *hcd = mtk->hcd;
729 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
713 730
714 usb_wakeup_disable(mtk); 731 usb_wakeup_disable(mtk);
715 xhci_mtk_clks_enable(mtk); 732 xhci_mtk_clks_enable(mtk);
716 xhci_mtk_phy_power_on(mtk); 733 xhci_mtk_phy_power_on(mtk);
717 xhci_mtk_host_enable(mtk); 734 xhci_mtk_host_enable(mtk);
735
736 xhci_dbg(xhci, "%s: restart port polling\n", __func__);
737 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
738 usb_hcd_poll_rh_status(hcd);
739 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
740 usb_hcd_poll_rh_status(xhci->shared_hcd);
718 return 0; 741 return 0;
719} 742}
720 743
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 58c43ed7ff3b..f0640b7a1c42 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -28,7 +28,9 @@
28#include "xhci.h" 28#include "xhci.h"
29#include "xhci-trace.h" 29#include "xhci-trace.h"
30 30
31#define PORT2_SSIC_CONFIG_REG2 0x883c 31#define SSIC_PORT_NUM 2
32#define SSIC_PORT_CFG2 0x880c
33#define SSIC_PORT_CFG2_OFFSET 0x30
32#define PROG_DONE (1 << 30) 34#define PROG_DONE (1 << 30)
33#define SSIC_PORT_UNUSED (1 << 31) 35#define SSIC_PORT_UNUSED (1 << 31)
34 36
@@ -45,6 +47,7 @@
45#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5 47#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
46#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f 48#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
47#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f 49#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
50#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
48 51
49static const char hcd_name[] = "xhci_hcd"; 52static const char hcd_name[] = "xhci_hcd";
50 53
@@ -151,9 +154,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
151 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 154 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
152 (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || 155 (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
153 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI || 156 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
154 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) { 157 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
158 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
155 xhci->quirks |= XHCI_PME_STUCK_QUIRK; 159 xhci->quirks |= XHCI_PME_STUCK_QUIRK;
156 } 160 }
161 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
162 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
163 xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
164 }
157 if (pdev->vendor == PCI_VENDOR_ID_ETRON && 165 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
158 pdev->device == PCI_DEVICE_ID_EJ168) { 166 pdev->device == PCI_DEVICE_ID_EJ168) {
159 xhci->quirks |= XHCI_RESET_ON_RESUME; 167 xhci->quirks |= XHCI_RESET_ON_RESUME;
@@ -312,22 +320,20 @@ static void xhci_pci_remove(struct pci_dev *dev)
312 * SSIC PORT need to be marked as "unused" before putting xHCI 320 * SSIC PORT need to be marked as "unused" before putting xHCI
313 * into D3. After D3 exit, the SSIC port need to be marked as "used". 321 * into D3. After D3 exit, the SSIC port need to be marked as "used".
314 * Without this change, xHCI might not enter D3 state. 322 * Without this change, xHCI might not enter D3 state.
315 * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
316 * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
317 */ 323 */
318static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend) 324static void xhci_ssic_port_unused_quirk(struct usb_hcd *hcd, bool suspend)
319{ 325{
320 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 326 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
321 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
322 u32 val; 327 u32 val;
323 void __iomem *reg; 328 void __iomem *reg;
329 int i;
324 330
325 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 331 for (i = 0; i < SSIC_PORT_NUM; i++) {
326 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) { 332 reg = (void __iomem *) xhci->cap_regs +
327 333 SSIC_PORT_CFG2 +
328 reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2; 334 i * SSIC_PORT_CFG2_OFFSET;
329 335
330 /* Notify SSIC that SSIC profile programming is not done */ 336 /* Notify SSIC that SSIC profile programming is not done. */
331 val = readl(reg) & ~PROG_DONE; 337 val = readl(reg) & ~PROG_DONE;
332 writel(val, reg); 338 writel(val, reg);
333 339
@@ -344,6 +350,17 @@ static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
344 writel(val, reg); 350 writel(val, reg);
345 readl(reg); 351 readl(reg);
346 } 352 }
353}
354
355/*
356 * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
357 * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
358 */
359static void xhci_pme_quirk(struct usb_hcd *hcd)
360{
361 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
362 void __iomem *reg;
363 u32 val;
347 364
348 reg = (void __iomem *) xhci->cap_regs + 0x80a4; 365 reg = (void __iomem *) xhci->cap_regs + 0x80a4;
349 val = readl(reg); 366 val = readl(reg);
@@ -355,6 +372,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
355{ 372{
356 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 373 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
357 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 374 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
375 int ret;
358 376
359 /* 377 /*
360 * Systems with the TI redriver that loses port status change events 378 * Systems with the TI redriver that loses port status change events
@@ -364,9 +382,16 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
364 pdev->no_d3cold = true; 382 pdev->no_d3cold = true;
365 383
366 if (xhci->quirks & XHCI_PME_STUCK_QUIRK) 384 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
367 xhci_pme_quirk(hcd, true); 385 xhci_pme_quirk(hcd);
386
387 if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
388 xhci_ssic_port_unused_quirk(hcd, true);
368 389
369 return xhci_suspend(xhci, do_wakeup); 390 ret = xhci_suspend(xhci, do_wakeup);
391 if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED))
392 xhci_ssic_port_unused_quirk(hcd, false);
393
394 return ret;
370} 395}
371 396
372static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) 397static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
@@ -396,8 +421,11 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
396 if (pdev->vendor == PCI_VENDOR_ID_INTEL) 421 if (pdev->vendor == PCI_VENDOR_ID_INTEL)
397 usb_enable_intel_xhci_ports(pdev); 422 usb_enable_intel_xhci_ports(pdev);
398 423
424 if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
425 xhci_ssic_port_unused_quirk(hcd, false);
426
399 if (xhci->quirks & XHCI_PME_STUCK_QUIRK) 427 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
400 xhci_pme_quirk(hcd, false); 428 xhci_pme_quirk(hcd);
401 429
402 retval = xhci_resume(xhci, hibernated); 430 retval = xhci_resume(xhci, hibernated);
403 return retval; 431 return retval;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 770b6b088797..d39d6bf1d090 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -184,7 +184,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
184 struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); 184 struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
185 185
186 /* Just copy data for now */ 186 /* Just copy data for now */
187 *priv = *priv_match; 187 if (priv_match)
188 *priv = *priv_match;
188 } 189 }
189 190
190 if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_MARVELL_ARMADA)) { 191 if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_MARVELL_ARMADA)) {
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index f1c21c40b4a6..3915657e6078 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2193,10 +2193,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2193 } 2193 }
2194 /* Fast path - was this the last TRB in the TD for this URB? */ 2194 /* Fast path - was this the last TRB in the TD for this URB? */
2195 } else if (event_trb == td->last_trb) { 2195 } else if (event_trb == td->last_trb) {
2196 if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX)
2197 return finish_td(xhci, td, event_trb, event, ep,
2198 status, false);
2199
2200 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { 2196 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2201 td->urb->actual_length = 2197 td->urb->actual_length =
2202 td->urb->transfer_buffer_length - 2198 td->urb->transfer_buffer_length -
@@ -2248,12 +2244,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2248 td->urb->actual_length += 2244 td->urb->actual_length +=
2249 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - 2245 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2250 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 2246 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2251
2252 if (trb_comp_code == COMP_SHORT_TX) {
2253 xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n");
2254 td->urb_length_set = true;
2255 return 0;
2256 }
2257 } 2247 }
2258 2248
2259 return finish_td(xhci, td, event_trb, event, ep, status, false); 2249 return finish_td(xhci, td, event_trb, event, ep, status, false);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 26a44c0e969e..0c8087d3c313 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1554,7 +1554,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1554 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1554 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1555 "HW died, freeing TD."); 1555 "HW died, freeing TD.");
1556 urb_priv = urb->hcpriv; 1556 urb_priv = urb->hcpriv;
1557 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { 1557 for (i = urb_priv->td_cnt;
1558 i < urb_priv->length && xhci->devs[urb->dev->slot_id];
1559 i++) {
1558 td = urb_priv->td[i]; 1560 td = urb_priv->td[i];
1559 if (!list_empty(&td->td_list)) 1561 if (!list_empty(&td->td_list))
1560 list_del_init(&td->td_list); 1562 list_del_init(&td->td_list);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 9be7348872ba..cc651383ce5a 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1631,6 +1631,7 @@ struct xhci_hcd {
1631#define XHCI_BROKEN_STREAMS (1 << 19) 1631#define XHCI_BROKEN_STREAMS (1 << 19)
1632#define XHCI_PME_STUCK_QUIRK (1 << 20) 1632#define XHCI_PME_STUCK_QUIRK (1 << 20)
1633#define XHCI_MTK_HOST (1 << 21) 1633#define XHCI_MTK_HOST (1 << 21)
1634#define XHCI_SSIC_PORT_UNUSED (1 << 22)
1634 unsigned int num_active_eps; 1635 unsigned int num_active_eps;
1635 unsigned int limit_active_eps; 1636 unsigned int limit_active_eps;
1636 /* There are two roothubs to keep track of bus suspend info for */ 1637 /* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 795a45b1b25b..58487a473521 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -662,7 +662,7 @@ static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
662 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); 662 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
663 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */ 663 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
664 } 664 }
665 channel->desired_mode = mode; 665 channel->desired_mode = *mode;
666 musb_writew(epio, MUSB_TXCSR, csr); 666 musb_writew(epio, MUSB_TXCSR, csr);
667 667
668 return 0; 668 return 0;
@@ -2003,10 +2003,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
2003 qh->offset, 2003 qh->offset,
2004 urb->transfer_buffer_length); 2004 urb->transfer_buffer_length);
2005 2005
2006 done = musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, 2006 if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
2007 urb, xfer_len, 2007 xfer_len, iso_err))
2008 iso_err);
2009 if (done)
2010 goto finish; 2008 goto finish;
2011 else 2009 else
2012 dev_err(musb->controller, "error: rx_dma failed\n"); 2010 dev_err(musb->controller, "error: rx_dma failed\n");
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index b2685e75a683..3eaa4ba6867d 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -348,7 +348,9 @@ static int ux500_suspend(struct device *dev)
348 struct ux500_glue *glue = dev_get_drvdata(dev); 348 struct ux500_glue *glue = dev_get_drvdata(dev);
349 struct musb *musb = glue_to_musb(glue); 349 struct musb *musb = glue_to_musb(glue);
350 350
351 usb_phy_set_suspend(musb->xceiv, 1); 351 if (musb)
352 usb_phy_set_suspend(musb->xceiv, 1);
353
352 clk_disable_unprepare(glue->clk); 354 clk_disable_unprepare(glue->clk);
353 355
354 return 0; 356 return 0;
@@ -366,7 +368,8 @@ static int ux500_resume(struct device *dev)
366 return ret; 368 return ret;
367 } 369 }
368 370
369 usb_phy_set_suspend(musb->xceiv, 0); 371 if (musb)
372 usb_phy_set_suspend(musb->xceiv, 0);
370 373
371 return 0; 374 return 0;
372} 375}
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 0d19a6d61a71..72b387d592c2 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -757,14 +757,8 @@ static int msm_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
757 otg->host = host; 757 otg->host = host;
758 dev_dbg(otg->usb_phy->dev, "host driver registered w/ tranceiver\n"); 758 dev_dbg(otg->usb_phy->dev, "host driver registered w/ tranceiver\n");
759 759
760 /* 760 pm_runtime_get_sync(otg->usb_phy->dev);
761 * Kick the state machine work, if peripheral is not supported 761 schedule_work(&motg->sm_work);
762 * or peripheral is already registered with us.
763 */
764 if (motg->pdata->mode == USB_DR_MODE_HOST || otg->gadget) {
765 pm_runtime_get_sync(otg->usb_phy->dev);
766 schedule_work(&motg->sm_work);
767 }
768 762
769 return 0; 763 return 0;
770} 764}
@@ -827,14 +821,8 @@ static int msm_otg_set_peripheral(struct usb_otg *otg,
827 dev_dbg(otg->usb_phy->dev, 821 dev_dbg(otg->usb_phy->dev,
828 "peripheral driver registered w/ tranceiver\n"); 822 "peripheral driver registered w/ tranceiver\n");
829 823
830 /* 824 pm_runtime_get_sync(otg->usb_phy->dev);
831 * Kick the state machine work, if host is not supported 825 schedule_work(&motg->sm_work);
832 * or host is already registered with us.
833 */
834 if (motg->pdata->mode == USB_DR_MODE_PERIPHERAL || otg->host) {
835 pm_runtime_get_sync(otg->usb_phy->dev);
836 schedule_work(&motg->sm_work);
837 }
838 826
839 return 0; 827 return 0;
840} 828}
@@ -1599,6 +1587,8 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
1599 &motg->id.nb); 1587 &motg->id.nb);
1600 if (ret < 0) { 1588 if (ret < 0) {
1601 dev_err(&pdev->dev, "register ID notifier failed\n"); 1589 dev_err(&pdev->dev, "register ID notifier failed\n");
1590 extcon_unregister_notifier(motg->vbus.extcon,
1591 EXTCON_USB, &motg->vbus.nb);
1602 return ret; 1592 return ret;
1603 } 1593 }
1604 1594
@@ -1660,15 +1650,6 @@ static int msm_otg_probe(struct platform_device *pdev)
1660 if (!motg) 1650 if (!motg)
1661 return -ENOMEM; 1651 return -ENOMEM;
1662 1652
1663 pdata = dev_get_platdata(&pdev->dev);
1664 if (!pdata) {
1665 if (!np)
1666 return -ENXIO;
1667 ret = msm_otg_read_dt(pdev, motg);
1668 if (ret)
1669 return ret;
1670 }
1671
1672 motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg), 1653 motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
1673 GFP_KERNEL); 1654 GFP_KERNEL);
1674 if (!motg->phy.otg) 1655 if (!motg->phy.otg)
@@ -1710,6 +1691,15 @@ static int msm_otg_probe(struct platform_device *pdev)
1710 if (!motg->regs) 1691 if (!motg->regs)
1711 return -ENOMEM; 1692 return -ENOMEM;
1712 1693
1694 pdata = dev_get_platdata(&pdev->dev);
1695 if (!pdata) {
1696 if (!np)
1697 return -ENXIO;
1698 ret = msm_otg_read_dt(pdev, motg);
1699 if (ret)
1700 return ret;
1701 }
1702
1713 /* 1703 /*
1714 * NOTE: The PHYs can be multiplexed between the chipidea controller 1704 * NOTE: The PHYs can be multiplexed between the chipidea controller
1715 * and the dwc3 controller, using a single bit. It is important that 1705 * and the dwc3 controller, using a single bit. It is important that
@@ -1717,8 +1707,10 @@ static int msm_otg_probe(struct platform_device *pdev)
1717 */ 1707 */
1718 if (motg->phy_number) { 1708 if (motg->phy_number) {
1719 phy_select = devm_ioremap_nocache(&pdev->dev, USB2_PHY_SEL, 4); 1709 phy_select = devm_ioremap_nocache(&pdev->dev, USB2_PHY_SEL, 4);
1720 if (!phy_select) 1710 if (!phy_select) {
1721 return -ENOMEM; 1711 ret = -ENOMEM;
1712 goto unregister_extcon;
1713 }
1722 /* Enable second PHY with the OTG port */ 1714 /* Enable second PHY with the OTG port */
1723 writel(0x1, phy_select); 1715 writel(0x1, phy_select);
1724 } 1716 }
@@ -1728,7 +1720,8 @@ static int msm_otg_probe(struct platform_device *pdev)
1728 motg->irq = platform_get_irq(pdev, 0); 1720 motg->irq = platform_get_irq(pdev, 0);
1729 if (motg->irq < 0) { 1721 if (motg->irq < 0) {
1730 dev_err(&pdev->dev, "platform_get_irq failed\n"); 1722 dev_err(&pdev->dev, "platform_get_irq failed\n");
1731 return motg->irq; 1723 ret = motg->irq;
1724 goto unregister_extcon;
1732 } 1725 }
1733 1726
1734 regs[0].supply = "vddcx"; 1727 regs[0].supply = "vddcx";
@@ -1737,7 +1730,7 @@ static int msm_otg_probe(struct platform_device *pdev)
1737 1730
1738 ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs); 1731 ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs);
1739 if (ret) 1732 if (ret)
1740 return ret; 1733 goto unregister_extcon;
1741 1734
1742 motg->vddcx = regs[0].consumer; 1735 motg->vddcx = regs[0].consumer;
1743 motg->v3p3 = regs[1].consumer; 1736 motg->v3p3 = regs[1].consumer;
@@ -1834,6 +1827,12 @@ disable_clks:
1834 clk_disable_unprepare(motg->clk); 1827 clk_disable_unprepare(motg->clk);
1835 if (!IS_ERR(motg->core_clk)) 1828 if (!IS_ERR(motg->core_clk))
1836 clk_disable_unprepare(motg->core_clk); 1829 clk_disable_unprepare(motg->core_clk);
1830unregister_extcon:
1831 extcon_unregister_notifier(motg->id.extcon,
1832 EXTCON_USB_HOST, &motg->id.nb);
1833 extcon_unregister_notifier(motg->vbus.extcon,
1834 EXTCON_USB, &motg->vbus.nb);
1835
1837 return ret; 1836 return ret;
1838} 1837}
1839 1838
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index c2936dc48ca7..00bfea01be65 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -220,7 +220,7 @@ static int mxs_phy_hw_init(struct mxs_phy *mxs_phy)
220/* Return true if the vbus is there */ 220/* Return true if the vbus is there */
221static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy) 221static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy)
222{ 222{
223 unsigned int vbus_value; 223 unsigned int vbus_value = 0;
224 224
225 if (!mxs_phy->regmap_anatop) 225 if (!mxs_phy->regmap_anatop)
226 return false; 226 return false;
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index f612dda9c977..56ecb8b5115d 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -475,22 +475,6 @@ config USB_SERIAL_MOS7840
475 To compile this driver as a module, choose M here: the 475 To compile this driver as a module, choose M here: the
476 module will be called mos7840. If unsure, choose N. 476 module will be called mos7840. If unsure, choose N.
477 477
478config USB_SERIAL_MXUPORT11
479 tristate "USB Moxa UPORT 11x0 Serial Driver"
480 ---help---
481 Say Y here if you want to use a MOXA UPort 11x0 Serial hub.
482
483 This driver supports:
484
485 - UPort 1110 : 1 port RS-232 USB to Serial Hub.
486 - UPort 1130 : 1 port RS-422/485 USB to Serial Hub.
487 - UPort 1130I : 1 port RS-422/485 USB to Serial Hub with Isolation.
488 - UPort 1150 : 1 port RS-232/422/485 USB to Serial Hub.
489 - UPort 1150I : 1 port RS-232/422/485 USB to Serial Hub with Isolation.
490
491 To compile this driver as a module, choose M here: the
492 module will be called mxu11x0.
493
494config USB_SERIAL_MXUPORT 478config USB_SERIAL_MXUPORT
495 tristate "USB Moxa UPORT Serial Driver" 479 tristate "USB Moxa UPORT Serial Driver"
496 ---help--- 480 ---help---
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index f3fa5e53702d..349d9df0895f 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -38,7 +38,6 @@ obj-$(CONFIG_USB_SERIAL_METRO) += metro-usb.o
38obj-$(CONFIG_USB_SERIAL_MOS7720) += mos7720.o 38obj-$(CONFIG_USB_SERIAL_MOS7720) += mos7720.o
39obj-$(CONFIG_USB_SERIAL_MOS7840) += mos7840.o 39obj-$(CONFIG_USB_SERIAL_MOS7840) += mos7840.o
40obj-$(CONFIG_USB_SERIAL_MXUPORT) += mxuport.o 40obj-$(CONFIG_USB_SERIAL_MXUPORT) += mxuport.o
41obj-$(CONFIG_USB_SERIAL_MXUPORT11) += mxu11x0.o
42obj-$(CONFIG_USB_SERIAL_NAVMAN) += navman.o 41obj-$(CONFIG_USB_SERIAL_NAVMAN) += navman.o
43obj-$(CONFIG_USB_SERIAL_OMNINET) += omninet.o 42obj-$(CONFIG_USB_SERIAL_OMNINET) += omninet.o
44obj-$(CONFIG_USB_SERIAL_OPTICON) += opticon.o 43obj-$(CONFIG_USB_SERIAL_OPTICON) += opticon.o
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 9b90ad747d87..73a366de5102 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -99,6 +99,7 @@ static const struct usb_device_id id_table[] = {
99 { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */ 99 { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
100 { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */ 100 { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
101 { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */ 101 { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
102 { USB_DEVICE(0x10C4, 0x81D7) }, /* IAI Corp. RCB-CV-USB USB to RS485 Adaptor */
102 { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */ 103 { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
103 { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */ 104 { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
104 { USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */ 105 { USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
@@ -162,6 +163,9 @@ static const struct usb_device_id id_table[] = {
162 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ 163 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
163 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ 164 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
164 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ 165 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
166 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
167 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
168 { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
165 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ 169 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
166 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ 170 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
167 { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ 171 { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index a5a0376bbd48..8c660ae401d8 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -824,6 +824,7 @@ static const struct usb_device_id id_table_combined[] = {
824 { USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID), 824 { USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID),
825 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 825 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
826 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, 826 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
827 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_SCU18) },
827 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, 828 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
828 829
829 /* Papouch devices based on FTDI chip */ 830 /* Papouch devices based on FTDI chip */
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 67c6d4469730..a84df2513994 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -615,6 +615,7 @@
615 */ 615 */
616#define RATOC_VENDOR_ID 0x0584 616#define RATOC_VENDOR_ID 0x0584
617#define RATOC_PRODUCT_ID_USB60F 0xb020 617#define RATOC_PRODUCT_ID_USB60F 0xb020
618#define RATOC_PRODUCT_ID_SCU18 0xb03a
618 619
619/* 620/*
620 * Infineon Technologies 621 * Infineon Technologies
diff --git a/drivers/usb/serial/mxu11x0.c b/drivers/usb/serial/mxu11x0.c
deleted file mode 100644
index e3c3f57c2d82..000000000000
--- a/drivers/usb/serial/mxu11x0.c
+++ /dev/null
@@ -1,986 +0,0 @@
1/*
2 * USB Moxa UPORT 11x0 Serial Driver
3 *
4 * Copyright (C) 2007 MOXA Technologies Co., Ltd.
5 * Copyright (C) 2015 Mathieu Othacehe <m.othacehe@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 *
13 * Supports the following Moxa USB to serial converters:
14 * UPort 1110, 1 port RS-232 USB to Serial Hub.
15 * UPort 1130, 1 port RS-422/485 USB to Serial Hub.
16 * UPort 1130I, 1 port RS-422/485 USB to Serial Hub with isolation
17 * protection.
18 * UPort 1150, 1 port RS-232/422/485 USB to Serial Hub.
19 * UPort 1150I, 1 port RS-232/422/485 USB to Serial Hub with isolation
20 * protection.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/firmware.h>
26#include <linux/jiffies.h>
27#include <linux/serial.h>
28#include <linux/serial_reg.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include <linux/mutex.h>
32#include <linux/tty.h>
33#include <linux/tty_driver.h>
34#include <linux/tty_flip.h>
35#include <linux/uaccess.h>
36#include <linux/usb.h>
37#include <linux/usb/serial.h>
38
39/* Vendor and product ids */
40#define MXU1_VENDOR_ID 0x110a
41#define MXU1_1110_PRODUCT_ID 0x1110
42#define MXU1_1130_PRODUCT_ID 0x1130
43#define MXU1_1150_PRODUCT_ID 0x1150
44#define MXU1_1151_PRODUCT_ID 0x1151
45#define MXU1_1131_PRODUCT_ID 0x1131
46
47/* Commands */
48#define MXU1_GET_VERSION 0x01
49#define MXU1_GET_PORT_STATUS 0x02
50#define MXU1_GET_PORT_DEV_INFO 0x03
51#define MXU1_GET_CONFIG 0x04
52#define MXU1_SET_CONFIG 0x05
53#define MXU1_OPEN_PORT 0x06
54#define MXU1_CLOSE_PORT 0x07
55#define MXU1_START_PORT 0x08
56#define MXU1_STOP_PORT 0x09
57#define MXU1_TEST_PORT 0x0A
58#define MXU1_PURGE_PORT 0x0B
59#define MXU1_RESET_EXT_DEVICE 0x0C
60#define MXU1_GET_OUTQUEUE 0x0D
61#define MXU1_WRITE_DATA 0x80
62#define MXU1_READ_DATA 0x81
63#define MXU1_REQ_TYPE_CLASS 0x82
64
65/* Module identifiers */
66#define MXU1_I2C_PORT 0x01
67#define MXU1_IEEE1284_PORT 0x02
68#define MXU1_UART1_PORT 0x03
69#define MXU1_UART2_PORT 0x04
70#define MXU1_RAM_PORT 0x05
71
72/* Modem status */
73#define MXU1_MSR_DELTA_CTS 0x01
74#define MXU1_MSR_DELTA_DSR 0x02
75#define MXU1_MSR_DELTA_RI 0x04
76#define MXU1_MSR_DELTA_CD 0x08
77#define MXU1_MSR_CTS 0x10
78#define MXU1_MSR_DSR 0x20
79#define MXU1_MSR_RI 0x40
80#define MXU1_MSR_CD 0x80
81#define MXU1_MSR_DELTA_MASK 0x0F
82#define MXU1_MSR_MASK 0xF0
83
84/* Line status */
85#define MXU1_LSR_OVERRUN_ERROR 0x01
86#define MXU1_LSR_PARITY_ERROR 0x02
87#define MXU1_LSR_FRAMING_ERROR 0x04
88#define MXU1_LSR_BREAK 0x08
89#define MXU1_LSR_ERROR 0x0F
90#define MXU1_LSR_RX_FULL 0x10
91#define MXU1_LSR_TX_EMPTY 0x20
92
93/* Modem control */
94#define MXU1_MCR_LOOP 0x04
95#define MXU1_MCR_DTR 0x10
96#define MXU1_MCR_RTS 0x20
97
98/* Mask settings */
99#define MXU1_UART_ENABLE_RTS_IN 0x0001
100#define MXU1_UART_DISABLE_RTS 0x0002
101#define MXU1_UART_ENABLE_PARITY_CHECKING 0x0008
102#define MXU1_UART_ENABLE_DSR_OUT 0x0010
103#define MXU1_UART_ENABLE_CTS_OUT 0x0020
104#define MXU1_UART_ENABLE_X_OUT 0x0040
105#define MXU1_UART_ENABLE_XA_OUT 0x0080
106#define MXU1_UART_ENABLE_X_IN 0x0100
107#define MXU1_UART_ENABLE_DTR_IN 0x0800
108#define MXU1_UART_DISABLE_DTR 0x1000
109#define MXU1_UART_ENABLE_MS_INTS 0x2000
110#define MXU1_UART_ENABLE_AUTO_START_DMA 0x4000
111#define MXU1_UART_SEND_BREAK_SIGNAL 0x8000
112
113/* Parity */
114#define MXU1_UART_NO_PARITY 0x00
115#define MXU1_UART_ODD_PARITY 0x01
116#define MXU1_UART_EVEN_PARITY 0x02
117#define MXU1_UART_MARK_PARITY 0x03
118#define MXU1_UART_SPACE_PARITY 0x04
119
120/* Stop bits */
121#define MXU1_UART_1_STOP_BITS 0x00
122#define MXU1_UART_1_5_STOP_BITS 0x01
123#define MXU1_UART_2_STOP_BITS 0x02
124
125/* Bits per character */
126#define MXU1_UART_5_DATA_BITS 0x00
127#define MXU1_UART_6_DATA_BITS 0x01
128#define MXU1_UART_7_DATA_BITS 0x02
129#define MXU1_UART_8_DATA_BITS 0x03
130
131/* Operation modes */
132#define MXU1_UART_232 0x00
133#define MXU1_UART_485_RECEIVER_DISABLED 0x01
134#define MXU1_UART_485_RECEIVER_ENABLED 0x02
135
136/* Pipe transfer mode and timeout */
137#define MXU1_PIPE_MODE_CONTINUOUS 0x01
138#define MXU1_PIPE_MODE_MASK 0x03
139#define MXU1_PIPE_TIMEOUT_MASK 0x7C
140#define MXU1_PIPE_TIMEOUT_ENABLE 0x80
141
142/* Config struct */
143struct mxu1_uart_config {
144 __be16 wBaudRate;
145 __be16 wFlags;
146 u8 bDataBits;
147 u8 bParity;
148 u8 bStopBits;
149 char cXon;
150 char cXoff;
151 u8 bUartMode;
152} __packed;
153
154/* Purge modes */
155#define MXU1_PURGE_OUTPUT 0x00
156#define MXU1_PURGE_INPUT 0x80
157
158/* Read/Write data */
159#define MXU1_RW_DATA_ADDR_SFR 0x10
160#define MXU1_RW_DATA_ADDR_IDATA 0x20
161#define MXU1_RW_DATA_ADDR_XDATA 0x30
162#define MXU1_RW_DATA_ADDR_CODE 0x40
163#define MXU1_RW_DATA_ADDR_GPIO 0x50
164#define MXU1_RW_DATA_ADDR_I2C 0x60
165#define MXU1_RW_DATA_ADDR_FLASH 0x70
166#define MXU1_RW_DATA_ADDR_DSP 0x80
167
168#define MXU1_RW_DATA_UNSPECIFIED 0x00
169#define MXU1_RW_DATA_BYTE 0x01
170#define MXU1_RW_DATA_WORD 0x02
171#define MXU1_RW_DATA_DOUBLE_WORD 0x04
172
173struct mxu1_write_data_bytes {
174 u8 bAddrType;
175 u8 bDataType;
176 u8 bDataCounter;
177 __be16 wBaseAddrHi;
178 __be16 wBaseAddrLo;
179 u8 bData[0];
180} __packed;
181
182/* Interrupt codes */
183#define MXU1_CODE_HARDWARE_ERROR 0xFF
184#define MXU1_CODE_DATA_ERROR 0x03
185#define MXU1_CODE_MODEM_STATUS 0x04
186
187static inline int mxu1_get_func_from_code(unsigned char code)
188{
189 return code & 0x0f;
190}
191
192/* Download firmware max packet size */
193#define MXU1_DOWNLOAD_MAX_PACKET_SIZE 64
194
195/* Firmware image header */
196struct mxu1_firmware_header {
197 __le16 wLength;
198 u8 bCheckSum;
199} __packed;
200
201#define MXU1_UART_BASE_ADDR 0xFFA0
202#define MXU1_UART_OFFSET_MCR 0x0004
203
204#define MXU1_BAUD_BASE 923077
205
206#define MXU1_TRANSFER_TIMEOUT 2
207#define MXU1_DOWNLOAD_TIMEOUT 1000
208#define MXU1_DEFAULT_CLOSING_WAIT 4000 /* in .01 secs */
209
210struct mxu1_port {
211 u8 msr;
212 u8 mcr;
213 u8 uart_mode;
214 spinlock_t spinlock; /* Protects msr */
215 struct mutex mutex; /* Protects mcr */
216 bool send_break;
217};
218
219struct mxu1_device {
220 u16 mxd_model;
221};
222
223static const struct usb_device_id mxu1_idtable[] = {
224 { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1110_PRODUCT_ID) },
225 { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1130_PRODUCT_ID) },
226 { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
227 { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
228 { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
229 { }
230};
231
232MODULE_DEVICE_TABLE(usb, mxu1_idtable);
233
234/* Write the given buffer out to the control pipe. */
235static int mxu1_send_ctrl_data_urb(struct usb_serial *serial,
236 u8 request,
237 u16 value, u16 index,
238 void *data, size_t size)
239{
240 int status;
241
242 status = usb_control_msg(serial->dev,
243 usb_sndctrlpipe(serial->dev, 0),
244 request,
245 (USB_DIR_OUT | USB_TYPE_VENDOR |
246 USB_RECIP_DEVICE), value, index,
247 data, size,
248 USB_CTRL_SET_TIMEOUT);
249 if (status < 0) {
250 dev_err(&serial->interface->dev,
251 "%s - usb_control_msg failed: %d\n",
252 __func__, status);
253 return status;
254 }
255
256 if (status != size) {
257 dev_err(&serial->interface->dev,
258 "%s - short write (%d / %zd)\n",
259 __func__, status, size);
260 return -EIO;
261 }
262
263 return 0;
264}
265
266/* Send a vendor request without any data */
267static int mxu1_send_ctrl_urb(struct usb_serial *serial,
268 u8 request, u16 value, u16 index)
269{
270 return mxu1_send_ctrl_data_urb(serial, request, value, index,
271 NULL, 0);
272}
273
274static int mxu1_download_firmware(struct usb_serial *serial,
275 const struct firmware *fw_p)
276{
277 int status = 0;
278 int buffer_size;
279 int pos;
280 int len;
281 int done;
282 u8 cs = 0;
283 u8 *buffer;
284 struct usb_device *dev = serial->dev;
285 struct mxu1_firmware_header *header;
286 unsigned int pipe;
287
288 pipe = usb_sndbulkpipe(dev, serial->port[0]->bulk_out_endpointAddress);
289
290 buffer_size = fw_p->size + sizeof(*header);
291 buffer = kmalloc(buffer_size, GFP_KERNEL);
292 if (!buffer)
293 return -ENOMEM;
294
295 memcpy(buffer, fw_p->data, fw_p->size);
296 memset(buffer + fw_p->size, 0xff, buffer_size - fw_p->size);
297
298 for (pos = sizeof(*header); pos < buffer_size; pos++)
299 cs = (u8)(cs + buffer[pos]);
300
301 header = (struct mxu1_firmware_header *)buffer;
302 header->wLength = cpu_to_le16(buffer_size - sizeof(*header));
303 header->bCheckSum = cs;
304
305 dev_dbg(&dev->dev, "%s - downloading firmware\n", __func__);
306
307 for (pos = 0; pos < buffer_size; pos += done) {
308 len = min(buffer_size - pos, MXU1_DOWNLOAD_MAX_PACKET_SIZE);
309
310 status = usb_bulk_msg(dev, pipe, buffer + pos, len, &done,
311 MXU1_DOWNLOAD_TIMEOUT);
312 if (status)
313 break;
314 }
315
316 kfree(buffer);
317
318 if (status) {
319 dev_err(&dev->dev, "failed to download firmware: %d\n", status);
320 return status;
321 }
322
323 msleep_interruptible(100);
324 usb_reset_device(dev);
325
326 dev_dbg(&dev->dev, "%s - download successful\n", __func__);
327
328 return 0;
329}
330
331static int mxu1_port_probe(struct usb_serial_port *port)
332{
333 struct mxu1_port *mxport;
334 struct mxu1_device *mxdev;
335
336 if (!port->interrupt_in_urb) {
337 dev_err(&port->dev, "no interrupt urb\n");
338 return -ENODEV;
339 }
340
341 mxport = kzalloc(sizeof(struct mxu1_port), GFP_KERNEL);
342 if (!mxport)
343 return -ENOMEM;
344
345 spin_lock_init(&mxport->spinlock);
346 mutex_init(&mxport->mutex);
347
348 mxdev = usb_get_serial_data(port->serial);
349
350 switch (mxdev->mxd_model) {
351 case MXU1_1110_PRODUCT_ID:
352 case MXU1_1150_PRODUCT_ID:
353 case MXU1_1151_PRODUCT_ID:
354 mxport->uart_mode = MXU1_UART_232;
355 break;
356 case MXU1_1130_PRODUCT_ID:
357 case MXU1_1131_PRODUCT_ID:
358 mxport->uart_mode = MXU1_UART_485_RECEIVER_DISABLED;
359 break;
360 }
361
362 usb_set_serial_port_data(port, mxport);
363
364 port->port.closing_wait =
365 msecs_to_jiffies(MXU1_DEFAULT_CLOSING_WAIT * 10);
366 port->port.drain_delay = 1;
367
368 return 0;
369}
370
371static int mxu1_startup(struct usb_serial *serial)
372{
373 struct mxu1_device *mxdev;
374 struct usb_device *dev = serial->dev;
375 struct usb_host_interface *cur_altsetting;
376 char fw_name[32];
377 const struct firmware *fw_p = NULL;
378 int err;
379
380 dev_dbg(&serial->interface->dev, "%s - product 0x%04X, num configurations %d, configuration value %d\n",
381 __func__, le16_to_cpu(dev->descriptor.idProduct),
382 dev->descriptor.bNumConfigurations,
383 dev->actconfig->desc.bConfigurationValue);
384
385 /* create device structure */
386 mxdev = kzalloc(sizeof(struct mxu1_device), GFP_KERNEL);
387 if (!mxdev)
388 return -ENOMEM;
389
390 usb_set_serial_data(serial, mxdev);
391
392 mxdev->mxd_model = le16_to_cpu(dev->descriptor.idProduct);
393
394 cur_altsetting = serial->interface->cur_altsetting;
395
396 /* if we have only 1 configuration, download firmware */
397 if (cur_altsetting->desc.bNumEndpoints == 1) {
398
399 snprintf(fw_name,
400 sizeof(fw_name),
401 "moxa/moxa-%04x.fw",
402 mxdev->mxd_model);
403
404 err = request_firmware(&fw_p, fw_name, &serial->interface->dev);
405 if (err) {
406 dev_err(&serial->interface->dev, "failed to request firmware: %d\n",
407 err);
408 goto err_free_mxdev;
409 }
410
411 err = mxu1_download_firmware(serial, fw_p);
412 if (err)
413 goto err_release_firmware;
414
415 /* device is being reset */
416 err = -ENODEV;
417 goto err_release_firmware;
418 }
419
420 return 0;
421
422err_release_firmware:
423 release_firmware(fw_p);
424err_free_mxdev:
425 kfree(mxdev);
426
427 return err;
428}
429
430static int mxu1_write_byte(struct usb_serial_port *port, u32 addr,
431 u8 mask, u8 byte)
432{
433 int status;
434 size_t size;
435 struct mxu1_write_data_bytes *data;
436
437 dev_dbg(&port->dev, "%s - addr 0x%08X, mask 0x%02X, byte 0x%02X\n",
438 __func__, addr, mask, byte);
439
440 size = sizeof(struct mxu1_write_data_bytes) + 2;
441 data = kzalloc(size, GFP_KERNEL);
442 if (!data)
443 return -ENOMEM;
444
445 data->bAddrType = MXU1_RW_DATA_ADDR_XDATA;
446 data->bDataType = MXU1_RW_DATA_BYTE;
447 data->bDataCounter = 1;
448 data->wBaseAddrHi = cpu_to_be16(addr >> 16);
449 data->wBaseAddrLo = cpu_to_be16(addr);
450 data->bData[0] = mask;
451 data->bData[1] = byte;
452
453 status = mxu1_send_ctrl_data_urb(port->serial, MXU1_WRITE_DATA, 0,
454 MXU1_RAM_PORT, data, size);
455 if (status < 0)
456 dev_err(&port->dev, "%s - failed: %d\n", __func__, status);
457
458 kfree(data);
459
460 return status;
461}
462
463static int mxu1_set_mcr(struct usb_serial_port *port, unsigned int mcr)
464{
465 int status;
466
467 status = mxu1_write_byte(port,
468 MXU1_UART_BASE_ADDR + MXU1_UART_OFFSET_MCR,
469 MXU1_MCR_RTS | MXU1_MCR_DTR | MXU1_MCR_LOOP,
470 mcr);
471 return status;
472}
473
474static void mxu1_set_termios(struct tty_struct *tty,
475 struct usb_serial_port *port,
476 struct ktermios *old_termios)
477{
478 struct mxu1_port *mxport = usb_get_serial_port_data(port);
479 struct mxu1_uart_config *config;
480 tcflag_t cflag, iflag;
481 speed_t baud;
482 int status;
483 unsigned int mcr;
484
485 cflag = tty->termios.c_cflag;
486 iflag = tty->termios.c_iflag;
487
488 if (old_termios &&
489 !tty_termios_hw_change(&tty->termios, old_termios) &&
490 tty->termios.c_iflag == old_termios->c_iflag) {
491 dev_dbg(&port->dev, "%s - nothing to change\n", __func__);
492 return;
493 }
494
495 dev_dbg(&port->dev,
496 "%s - cflag 0x%08x, iflag 0x%08x\n", __func__, cflag, iflag);
497
498 if (old_termios) {
499 dev_dbg(&port->dev, "%s - old cflag 0x%08x, old iflag 0x%08x\n",
500 __func__,
501 old_termios->c_cflag,
502 old_termios->c_iflag);
503 }
504
505 config = kzalloc(sizeof(*config), GFP_KERNEL);
506 if (!config)
507 return;
508
509 /* these flags must be set */
510 config->wFlags |= MXU1_UART_ENABLE_MS_INTS;
511 config->wFlags |= MXU1_UART_ENABLE_AUTO_START_DMA;
512 if (mxport->send_break)
513 config->wFlags |= MXU1_UART_SEND_BREAK_SIGNAL;
514 config->bUartMode = mxport->uart_mode;
515
516 switch (C_CSIZE(tty)) {
517 case CS5:
518 config->bDataBits = MXU1_UART_5_DATA_BITS;
519 break;
520 case CS6:
521 config->bDataBits = MXU1_UART_6_DATA_BITS;
522 break;
523 case CS7:
524 config->bDataBits = MXU1_UART_7_DATA_BITS;
525 break;
526 default:
527 case CS8:
528 config->bDataBits = MXU1_UART_8_DATA_BITS;
529 break;
530 }
531
532 if (C_PARENB(tty)) {
533 config->wFlags |= MXU1_UART_ENABLE_PARITY_CHECKING;
534 if (C_CMSPAR(tty)) {
535 if (C_PARODD(tty))
536 config->bParity = MXU1_UART_MARK_PARITY;
537 else
538 config->bParity = MXU1_UART_SPACE_PARITY;
539 } else {
540 if (C_PARODD(tty))
541 config->bParity = MXU1_UART_ODD_PARITY;
542 else
543 config->bParity = MXU1_UART_EVEN_PARITY;
544 }
545 } else {
546 config->bParity = MXU1_UART_NO_PARITY;
547 }
548
549 if (C_CSTOPB(tty))
550 config->bStopBits = MXU1_UART_2_STOP_BITS;
551 else
552 config->bStopBits = MXU1_UART_1_STOP_BITS;
553
554 if (C_CRTSCTS(tty)) {
555 /* RTS flow control must be off to drop RTS for baud rate B0 */
556 if (C_BAUD(tty) != B0)
557 config->wFlags |= MXU1_UART_ENABLE_RTS_IN;
558 config->wFlags |= MXU1_UART_ENABLE_CTS_OUT;
559 }
560
561 if (I_IXOFF(tty) || I_IXON(tty)) {
562 config->cXon = START_CHAR(tty);
563 config->cXoff = STOP_CHAR(tty);
564
565 if (I_IXOFF(tty))
566 config->wFlags |= MXU1_UART_ENABLE_X_IN;
567
568 if (I_IXON(tty))
569 config->wFlags |= MXU1_UART_ENABLE_X_OUT;
570 }
571
572 baud = tty_get_baud_rate(tty);
573 if (!baud)
574 baud = 9600;
575 config->wBaudRate = MXU1_BAUD_BASE / baud;
576
577 dev_dbg(&port->dev, "%s - BaudRate=%d, wBaudRate=%d, wFlags=0x%04X, bDataBits=%d, bParity=%d, bStopBits=%d, cXon=%d, cXoff=%d, bUartMode=%d\n",
578 __func__, baud, config->wBaudRate, config->wFlags,
579 config->bDataBits, config->bParity, config->bStopBits,
580 config->cXon, config->cXoff, config->bUartMode);
581
582 cpu_to_be16s(&config->wBaudRate);
583 cpu_to_be16s(&config->wFlags);
584
585 status = mxu1_send_ctrl_data_urb(port->serial, MXU1_SET_CONFIG, 0,
586 MXU1_UART1_PORT, config,
587 sizeof(*config));
588 if (status)
589 dev_err(&port->dev, "cannot set config: %d\n", status);
590
591 mutex_lock(&mxport->mutex);
592 mcr = mxport->mcr;
593
594 if (C_BAUD(tty) == B0)
595 mcr &= ~(MXU1_MCR_DTR | MXU1_MCR_RTS);
596 else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
597 mcr |= MXU1_MCR_DTR | MXU1_MCR_RTS;
598
599 status = mxu1_set_mcr(port, mcr);
600 if (status)
601 dev_err(&port->dev, "cannot set modem control: %d\n", status);
602 else
603 mxport->mcr = mcr;
604
605 mutex_unlock(&mxport->mutex);
606
607 kfree(config);
608}
609
610static int mxu1_get_serial_info(struct usb_serial_port *port,
611 struct serial_struct __user *ret_arg)
612{
613 struct serial_struct ret_serial;
614 unsigned cwait;
615
616 if (!ret_arg)
617 return -EFAULT;
618
619 cwait = port->port.closing_wait;
620 if (cwait != ASYNC_CLOSING_WAIT_NONE)
621 cwait = jiffies_to_msecs(cwait) / 10;
622
623 memset(&ret_serial, 0, sizeof(ret_serial));
624
625 ret_serial.type = PORT_16550A;
626 ret_serial.line = port->minor;
627 ret_serial.port = 0;
628 ret_serial.xmit_fifo_size = port->bulk_out_size;
629 ret_serial.baud_base = MXU1_BAUD_BASE;
630 ret_serial.close_delay = 5*HZ;
631 ret_serial.closing_wait = cwait;
632
633 if (copy_to_user(ret_arg, &ret_serial, sizeof(*ret_arg)))
634 return -EFAULT;
635
636 return 0;
637}
638
639
640static int mxu1_set_serial_info(struct usb_serial_port *port,
641 struct serial_struct __user *new_arg)
642{
643 struct serial_struct new_serial;
644 unsigned cwait;
645
646 if (copy_from_user(&new_serial, new_arg, sizeof(new_serial)))
647 return -EFAULT;
648
649 cwait = new_serial.closing_wait;
650 if (cwait != ASYNC_CLOSING_WAIT_NONE)
651 cwait = msecs_to_jiffies(10 * new_serial.closing_wait);
652
653 port->port.closing_wait = cwait;
654
655 return 0;
656}
657
658static int mxu1_ioctl(struct tty_struct *tty,
659 unsigned int cmd, unsigned long arg)
660{
661 struct usb_serial_port *port = tty->driver_data;
662
663 switch (cmd) {
664 case TIOCGSERIAL:
665 return mxu1_get_serial_info(port,
666 (struct serial_struct __user *)arg);
667 case TIOCSSERIAL:
668 return mxu1_set_serial_info(port,
669 (struct serial_struct __user *)arg);
670 }
671
672 return -ENOIOCTLCMD;
673}
674
675static int mxu1_tiocmget(struct tty_struct *tty)
676{
677 struct usb_serial_port *port = tty->driver_data;
678 struct mxu1_port *mxport = usb_get_serial_port_data(port);
679 unsigned int result;
680 unsigned int msr;
681 unsigned int mcr;
682 unsigned long flags;
683
684 mutex_lock(&mxport->mutex);
685 spin_lock_irqsave(&mxport->spinlock, flags);
686
687 msr = mxport->msr;
688 mcr = mxport->mcr;
689
690 spin_unlock_irqrestore(&mxport->spinlock, flags);
691 mutex_unlock(&mxport->mutex);
692
693 result = ((mcr & MXU1_MCR_DTR) ? TIOCM_DTR : 0) |
694 ((mcr & MXU1_MCR_RTS) ? TIOCM_RTS : 0) |
695 ((mcr & MXU1_MCR_LOOP) ? TIOCM_LOOP : 0) |
696 ((msr & MXU1_MSR_CTS) ? TIOCM_CTS : 0) |
697 ((msr & MXU1_MSR_CD) ? TIOCM_CAR : 0) |
698 ((msr & MXU1_MSR_RI) ? TIOCM_RI : 0) |
699 ((msr & MXU1_MSR_DSR) ? TIOCM_DSR : 0);
700
701 dev_dbg(&port->dev, "%s - 0x%04X\n", __func__, result);
702
703 return result;
704}
705
706static int mxu1_tiocmset(struct tty_struct *tty,
707 unsigned int set, unsigned int clear)
708{
709 struct usb_serial_port *port = tty->driver_data;
710 struct mxu1_port *mxport = usb_get_serial_port_data(port);
711 int err;
712 unsigned int mcr;
713
714 mutex_lock(&mxport->mutex);
715 mcr = mxport->mcr;
716
717 if (set & TIOCM_RTS)
718 mcr |= MXU1_MCR_RTS;
719 if (set & TIOCM_DTR)
720 mcr |= MXU1_MCR_DTR;
721 if (set & TIOCM_LOOP)
722 mcr |= MXU1_MCR_LOOP;
723
724 if (clear & TIOCM_RTS)
725 mcr &= ~MXU1_MCR_RTS;
726 if (clear & TIOCM_DTR)
727 mcr &= ~MXU1_MCR_DTR;
728 if (clear & TIOCM_LOOP)
729 mcr &= ~MXU1_MCR_LOOP;
730
731 err = mxu1_set_mcr(port, mcr);
732 if (!err)
733 mxport->mcr = mcr;
734
735 mutex_unlock(&mxport->mutex);
736
737 return err;
738}
739
740static void mxu1_break(struct tty_struct *tty, int break_state)
741{
742 struct usb_serial_port *port = tty->driver_data;
743 struct mxu1_port *mxport = usb_get_serial_port_data(port);
744
745 if (break_state == -1)
746 mxport->send_break = true;
747 else
748 mxport->send_break = false;
749
750 mxu1_set_termios(tty, port, NULL);
751}
752
753static int mxu1_open(struct tty_struct *tty, struct usb_serial_port *port)
754{
755 struct mxu1_port *mxport = usb_get_serial_port_data(port);
756 struct usb_serial *serial = port->serial;
757 int status;
758 u16 open_settings;
759
760 open_settings = (MXU1_PIPE_MODE_CONTINUOUS |
761 MXU1_PIPE_TIMEOUT_ENABLE |
762 (MXU1_TRANSFER_TIMEOUT << 2));
763
764 mxport->msr = 0;
765
766 status = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
767 if (status) {
768 dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
769 status);
770 return status;
771 }
772
773 if (tty)
774 mxu1_set_termios(tty, port, NULL);
775
776 status = mxu1_send_ctrl_urb(serial, MXU1_OPEN_PORT,
777 open_settings, MXU1_UART1_PORT);
778 if (status) {
779 dev_err(&port->dev, "cannot send open command: %d\n", status);
780 goto unlink_int_urb;
781 }
782
783 status = mxu1_send_ctrl_urb(serial, MXU1_START_PORT,
784 0, MXU1_UART1_PORT);
785 if (status) {
786 dev_err(&port->dev, "cannot send start command: %d\n", status);
787 goto unlink_int_urb;
788 }
789
790 status = mxu1_send_ctrl_urb(serial, MXU1_PURGE_PORT,
791 MXU1_PURGE_INPUT, MXU1_UART1_PORT);
792 if (status) {
793 dev_err(&port->dev, "cannot clear input buffers: %d\n",
794 status);
795
796 goto unlink_int_urb;
797 }
798
799 status = mxu1_send_ctrl_urb(serial, MXU1_PURGE_PORT,
800 MXU1_PURGE_OUTPUT, MXU1_UART1_PORT);
801 if (status) {
802 dev_err(&port->dev, "cannot clear output buffers: %d\n",
803 status);
804
805 goto unlink_int_urb;
806 }
807
808 /*
809 * reset the data toggle on the bulk endpoints to work around bug in
810 * host controllers where things get out of sync some times
811 */
812 usb_clear_halt(serial->dev, port->write_urb->pipe);
813 usb_clear_halt(serial->dev, port->read_urb->pipe);
814
815 if (tty)
816 mxu1_set_termios(tty, port, NULL);
817
818 status = mxu1_send_ctrl_urb(serial, MXU1_OPEN_PORT,
819 open_settings, MXU1_UART1_PORT);
820 if (status) {
821 dev_err(&port->dev, "cannot send open command: %d\n", status);
822 goto unlink_int_urb;
823 }
824
825 status = mxu1_send_ctrl_urb(serial, MXU1_START_PORT,
826 0, MXU1_UART1_PORT);
827 if (status) {
828 dev_err(&port->dev, "cannot send start command: %d\n", status);
829 goto unlink_int_urb;
830 }
831
832 status = usb_serial_generic_open(tty, port);
833 if (status)
834 goto unlink_int_urb;
835
836 return 0;
837
838unlink_int_urb:
839 usb_kill_urb(port->interrupt_in_urb);
840
841 return status;
842}
843
844static void mxu1_close(struct usb_serial_port *port)
845{
846 int status;
847
848 usb_serial_generic_close(port);
849 usb_kill_urb(port->interrupt_in_urb);
850
851 status = mxu1_send_ctrl_urb(port->serial, MXU1_CLOSE_PORT,
852 0, MXU1_UART1_PORT);
853 if (status) {
854 dev_err(&port->dev, "failed to send close port command: %d\n",
855 status);
856 }
857}
858
859static void mxu1_handle_new_msr(struct usb_serial_port *port, u8 msr)
860{
861 struct mxu1_port *mxport = usb_get_serial_port_data(port);
862 struct async_icount *icount;
863 unsigned long flags;
864
865 dev_dbg(&port->dev, "%s - msr 0x%02X\n", __func__, msr);
866
867 spin_lock_irqsave(&mxport->spinlock, flags);
868 mxport->msr = msr & MXU1_MSR_MASK;
869 spin_unlock_irqrestore(&mxport->spinlock, flags);
870
871 if (msr & MXU1_MSR_DELTA_MASK) {
872 icount = &port->icount;
873 if (msr & MXU1_MSR_DELTA_CTS)
874 icount->cts++;
875 if (msr & MXU1_MSR_DELTA_DSR)
876 icount->dsr++;
877 if (msr & MXU1_MSR_DELTA_CD)
878 icount->dcd++;
879 if (msr & MXU1_MSR_DELTA_RI)
880 icount->rng++;
881
882 wake_up_interruptible(&port->port.delta_msr_wait);
883 }
884}
885
886static void mxu1_interrupt_callback(struct urb *urb)
887{
888 struct usb_serial_port *port = urb->context;
889 unsigned char *data = urb->transfer_buffer;
890 int length = urb->actual_length;
891 int function;
892 int status;
893 u8 msr;
894
895 switch (urb->status) {
896 case 0:
897 break;
898 case -ECONNRESET:
899 case -ENOENT:
900 case -ESHUTDOWN:
901 dev_dbg(&port->dev, "%s - urb shutting down: %d\n",
902 __func__, urb->status);
903 return;
904 default:
905 dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
906 __func__, urb->status);
907 goto exit;
908 }
909
910 if (length != 2) {
911 dev_dbg(&port->dev, "%s - bad packet size: %d\n",
912 __func__, length);
913 goto exit;
914 }
915
916 if (data[0] == MXU1_CODE_HARDWARE_ERROR) {
917 dev_err(&port->dev, "hardware error: %d\n", data[1]);
918 goto exit;
919 }
920
921 function = mxu1_get_func_from_code(data[0]);
922
923 dev_dbg(&port->dev, "%s - function %d, data 0x%02X\n",
924 __func__, function, data[1]);
925
926 switch (function) {
927 case MXU1_CODE_DATA_ERROR:
928 dev_dbg(&port->dev, "%s - DATA ERROR, data 0x%02X\n",
929 __func__, data[1]);
930 break;
931
932 case MXU1_CODE_MODEM_STATUS:
933 msr = data[1];
934 mxu1_handle_new_msr(port, msr);
935 break;
936
937 default:
938 dev_err(&port->dev, "unknown interrupt code: 0x%02X\n",
939 data[1]);
940 break;
941 }
942
943exit:
944 status = usb_submit_urb(urb, GFP_ATOMIC);
945 if (status) {
946 dev_err(&port->dev, "resubmit interrupt urb failed: %d\n",
947 status);
948 }
949}
950
951static struct usb_serial_driver mxu11x0_device = {
952 .driver = {
953 .owner = THIS_MODULE,
954 .name = "mxu11x0",
955 },
956 .description = "MOXA UPort 11x0",
957 .id_table = mxu1_idtable,
958 .num_ports = 1,
959 .port_probe = mxu1_port_probe,
960 .attach = mxu1_startup,
961 .open = mxu1_open,
962 .close = mxu1_close,
963 .ioctl = mxu1_ioctl,
964 .set_termios = mxu1_set_termios,
965 .tiocmget = mxu1_tiocmget,
966 .tiocmset = mxu1_tiocmset,
967 .tiocmiwait = usb_serial_generic_tiocmiwait,
968 .get_icount = usb_serial_generic_get_icount,
969 .break_ctl = mxu1_break,
970 .read_int_callback = mxu1_interrupt_callback,
971};
972
973static struct usb_serial_driver *const serial_drivers[] = {
974 &mxu11x0_device, NULL
975};
976
977module_usb_serial_driver(serial_drivers, mxu1_idtable);
978
979MODULE_AUTHOR("Mathieu Othacehe <m.othacehe@gmail.com>");
980MODULE_DESCRIPTION("MOXA UPort 11x0 USB to Serial Hub Driver");
981MODULE_LICENSE("GPL");
982MODULE_FIRMWARE("moxa/moxa-1110.fw");
983MODULE_FIRMWARE("moxa/moxa-1130.fw");
984MODULE_FIRMWARE("moxa/moxa-1131.fw");
985MODULE_FIRMWARE("moxa/moxa-1150.fw");
986MODULE_FIRMWARE("moxa/moxa-1151.fw");
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index f2280606b73c..348e19834b83 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -268,6 +268,9 @@ static void option_instat_callback(struct urb *urb);
268#define TELIT_PRODUCT_CC864_SINGLE 0x1006 268#define TELIT_PRODUCT_CC864_SINGLE 0x1006
269#define TELIT_PRODUCT_DE910_DUAL 0x1010 269#define TELIT_PRODUCT_DE910_DUAL 0x1010
270#define TELIT_PRODUCT_UE910_V2 0x1012 270#define TELIT_PRODUCT_UE910_V2 0x1012
271#define TELIT_PRODUCT_LE922_USBCFG0 0x1042
272#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
273#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
271#define TELIT_PRODUCT_LE920 0x1200 274#define TELIT_PRODUCT_LE920 0x1200
272#define TELIT_PRODUCT_LE910 0x1201 275#define TELIT_PRODUCT_LE910 0x1201
273 276
@@ -313,6 +316,7 @@ static void option_instat_callback(struct urb *urb);
313#define TOSHIBA_PRODUCT_G450 0x0d45 316#define TOSHIBA_PRODUCT_G450 0x0d45
314 317
315#define ALINK_VENDOR_ID 0x1e0e 318#define ALINK_VENDOR_ID 0x1e0e
319#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */
316#define ALINK_PRODUCT_PH300 0x9100 320#define ALINK_PRODUCT_PH300 0x9100
317#define ALINK_PRODUCT_3GU 0x9200 321#define ALINK_PRODUCT_3GU 0x9200
318 322
@@ -605,6 +609,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
605 .reserved = BIT(3) | BIT(4), 609 .reserved = BIT(3) | BIT(4),
606}; 610};
607 611
612static const struct option_blacklist_info simcom_sim7100e_blacklist = {
613 .reserved = BIT(5) | BIT(6),
614};
615
608static const struct option_blacklist_info telit_le910_blacklist = { 616static const struct option_blacklist_info telit_le910_blacklist = {
609 .sendsetup = BIT(0), 617 .sendsetup = BIT(0),
610 .reserved = BIT(1) | BIT(2), 618 .reserved = BIT(1) | BIT(2),
@@ -615,6 +623,16 @@ static const struct option_blacklist_info telit_le920_blacklist = {
615 .reserved = BIT(1) | BIT(5), 623 .reserved = BIT(1) | BIT(5),
616}; 624};
617 625
626static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
627 .sendsetup = BIT(2),
628 .reserved = BIT(0) | BIT(1) | BIT(3),
629};
630
631static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
632 .sendsetup = BIT(0),
633 .reserved = BIT(1) | BIT(2) | BIT(3),
634};
635
618static const struct usb_device_id option_ids[] = { 636static const struct usb_device_id option_ids[] = {
619 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 637 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
620 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 638 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1110,9 +1128,13 @@ static const struct usb_device_id option_ids[] = {
1110 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, 1128 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
1111 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, 1129 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
1112 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ 1130 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
1131 { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
1132 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1113 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 1133 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1114 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ 1134 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1115 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1135 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1136 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
1137 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1116 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1138 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1117 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1139 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1118 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), 1140 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1160,6 +1182,12 @@ static const struct usb_device_id option_ids[] = {
1160 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, 1182 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
1161 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, 1183 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
1162 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) }, 1184 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
1185 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
1186 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
1187 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
1188 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
1189 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
1190 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
1163 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), 1191 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
1164 .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, 1192 .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
1165 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), 1193 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@@ -1629,6 +1657,8 @@ static const struct usb_device_id option_ids[] = {
1629 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, 1657 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
1630 { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) }, 1658 { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
1631 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, 1659 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
1660 { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
1661 .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
1632 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), 1662 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
1633 .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist 1663 .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
1634 }, 1664 },
@@ -1679,7 +1709,7 @@ static const struct usb_device_id option_ids[] = {
1679 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, 1709 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
1680 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8), 1710 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
1681 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1711 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1682 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) }, 1712 { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
1683 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX), 1713 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
1684 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1714 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1685 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 1715 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 9919d2a9faf2..1bc6089b9008 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -157,14 +157,17 @@ static const struct usb_device_id id_table[] = {
157 {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */ 157 {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
158 {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */ 158 {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
159 {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */ 159 {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
160 {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx/EM74xx */ 160 {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
161 {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx/EM74xx */ 161 {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
162 {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
163 {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
162 {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ 164 {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
163 {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ 165 {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
164 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ 166 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
165 {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ 167 {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
166 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 168 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
167 {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ 169 {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
170 {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
168 171
169 /* Huawei devices */ 172 /* Huawei devices */
170 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ 173 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 60afb39eb73c..337a0be89fcf 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -544,6 +544,11 @@ static int treo_attach(struct usb_serial *serial)
544 (serial->num_interrupt_in == 0)) 544 (serial->num_interrupt_in == 0))
545 return 0; 545 return 0;
546 546
547 if (serial->num_bulk_in < 2 || serial->num_interrupt_in < 2) {
548 dev_err(&serial->interface->dev, "missing endpoints\n");
549 return -ENODEV;
550 }
551
547 /* 552 /*
548 * It appears that Treos and Kyoceras want to use the 553 * It appears that Treos and Kyoceras want to use the
549 * 1st bulk in endpoint to communicate with the 2nd bulk out endpoint, 554 * 1st bulk in endpoint to communicate with the 2nd bulk out endpoint,
@@ -597,8 +602,10 @@ static int clie_5_attach(struct usb_serial *serial)
597 */ 602 */
598 603
599 /* some sanity check */ 604 /* some sanity check */
600 if (serial->num_ports < 2) 605 if (serial->num_bulk_out < 2) {
601 return -1; 606 dev_err(&serial->interface->dev, "missing bulk out endpoints\n");
607 return -ENODEV;
608 }
602 609
603 /* port 0 now uses the modified endpoint Address */ 610 /* port 0 now uses the modified endpoint Address */
604 port = serial->port[0]; 611 port = serial->port[0];
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 2760a7ba3f30..8c80a48e3233 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -446,7 +446,8 @@ static long vfio_pci_ioctl(void *device_data,
446 info.num_regions = VFIO_PCI_NUM_REGIONS; 446 info.num_regions = VFIO_PCI_NUM_REGIONS;
447 info.num_irqs = VFIO_PCI_NUM_IRQS; 447 info.num_irqs = VFIO_PCI_NUM_IRQS;
448 448
449 return copy_to_user((void __user *)arg, &info, minsz); 449 return copy_to_user((void __user *)arg, &info, minsz) ?
450 -EFAULT : 0;
450 451
451 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { 452 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
452 struct pci_dev *pdev = vdev->pdev; 453 struct pci_dev *pdev = vdev->pdev;
@@ -520,7 +521,8 @@ static long vfio_pci_ioctl(void *device_data,
520 return -EINVAL; 521 return -EINVAL;
521 } 522 }
522 523
523 return copy_to_user((void __user *)arg, &info, minsz); 524 return copy_to_user((void __user *)arg, &info, minsz) ?
525 -EFAULT : 0;
524 526
525 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { 527 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
526 struct vfio_irq_info info; 528 struct vfio_irq_info info;
@@ -555,7 +557,8 @@ static long vfio_pci_ioctl(void *device_data,
555 else 557 else
556 info.flags |= VFIO_IRQ_INFO_NORESIZE; 558 info.flags |= VFIO_IRQ_INFO_NORESIZE;
557 559
558 return copy_to_user((void __user *)arg, &info, minsz); 560 return copy_to_user((void __user *)arg, &info, minsz) ?
561 -EFAULT : 0;
559 562
560 } else if (cmd == VFIO_DEVICE_SET_IRQS) { 563 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
561 struct vfio_irq_set hdr; 564 struct vfio_irq_set hdr;
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index 418cdd9ba3f4..e65b142d3422 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -219,7 +219,8 @@ static long vfio_platform_ioctl(void *device_data,
219 info.num_regions = vdev->num_regions; 219 info.num_regions = vdev->num_regions;
220 info.num_irqs = vdev->num_irqs; 220 info.num_irqs = vdev->num_irqs;
221 221
222 return copy_to_user((void __user *)arg, &info, minsz); 222 return copy_to_user((void __user *)arg, &info, minsz) ?
223 -EFAULT : 0;
223 224
224 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { 225 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
225 struct vfio_region_info info; 226 struct vfio_region_info info;
@@ -240,7 +241,8 @@ static long vfio_platform_ioctl(void *device_data,
240 info.size = vdev->regions[info.index].size; 241 info.size = vdev->regions[info.index].size;
241 info.flags = vdev->regions[info.index].flags; 242 info.flags = vdev->regions[info.index].flags;
242 243
243 return copy_to_user((void __user *)arg, &info, minsz); 244 return copy_to_user((void __user *)arg, &info, minsz) ?
245 -EFAULT : 0;
244 246
245 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { 247 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
246 struct vfio_irq_info info; 248 struct vfio_irq_info info;
@@ -259,7 +261,8 @@ static long vfio_platform_ioctl(void *device_data,
259 info.flags = vdev->irqs[info.index].flags; 261 info.flags = vdev->irqs[info.index].flags;
260 info.count = vdev->irqs[info.index].count; 262 info.count = vdev->irqs[info.index].count;
261 263
262 return copy_to_user((void __user *)arg, &info, minsz); 264 return copy_to_user((void __user *)arg, &info, minsz) ?
265 -EFAULT : 0;
263 266
264 } else if (cmd == VFIO_DEVICE_SET_IRQS) { 267 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
265 struct vfio_irq_set hdr; 268 struct vfio_irq_set hdr;
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 82f25cc1c460..ecca316386f5 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -123,8 +123,8 @@ struct iommu_group *vfio_iommu_group_get(struct device *dev)
123 /* 123 /*
124 * With noiommu enabled, an IOMMU group will be created for a device 124 * With noiommu enabled, an IOMMU group will be created for a device
125 * that doesn't already have one and doesn't have an iommu_ops on their 125 * that doesn't already have one and doesn't have an iommu_ops on their
126 * bus. We use iommu_present() again in the main code to detect these 126 * bus. We set iommudata simply to be able to identify these groups
127 * fake groups. 127 * as special use and for reclamation later.
128 */ 128 */
129 if (group || !noiommu || iommu_present(dev->bus)) 129 if (group || !noiommu || iommu_present(dev->bus))
130 return group; 130 return group;
@@ -134,6 +134,7 @@ struct iommu_group *vfio_iommu_group_get(struct device *dev)
134 return NULL; 134 return NULL;
135 135
136 iommu_group_set_name(group, "vfio-noiommu"); 136 iommu_group_set_name(group, "vfio-noiommu");
137 iommu_group_set_iommudata(group, &noiommu, NULL);
137 ret = iommu_group_add_device(group, dev); 138 ret = iommu_group_add_device(group, dev);
138 iommu_group_put(group); 139 iommu_group_put(group);
139 if (ret) 140 if (ret)
@@ -158,7 +159,7 @@ EXPORT_SYMBOL_GPL(vfio_iommu_group_get);
158void vfio_iommu_group_put(struct iommu_group *group, struct device *dev) 159void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
159{ 160{
160#ifdef CONFIG_VFIO_NOIOMMU 161#ifdef CONFIG_VFIO_NOIOMMU
161 if (!iommu_present(dev->bus)) 162 if (iommu_group_get_iommudata(group) == &noiommu)
162 iommu_group_remove_device(dev); 163 iommu_group_remove_device(dev);
163#endif 164#endif
164 165
@@ -190,16 +191,10 @@ static long vfio_noiommu_ioctl(void *iommu_data,
190 return -ENOTTY; 191 return -ENOTTY;
191} 192}
192 193
193static int vfio_iommu_present(struct device *dev, void *unused)
194{
195 return iommu_present(dev->bus) ? 1 : 0;
196}
197
198static int vfio_noiommu_attach_group(void *iommu_data, 194static int vfio_noiommu_attach_group(void *iommu_data,
199 struct iommu_group *iommu_group) 195 struct iommu_group *iommu_group)
200{ 196{
201 return iommu_group_for_each_dev(iommu_group, NULL, 197 return iommu_group_get_iommudata(iommu_group) == &noiommu ? 0 : -EINVAL;
202 vfio_iommu_present) ? -EINVAL : 0;
203} 198}
204 199
205static void vfio_noiommu_detach_group(void *iommu_data, 200static void vfio_noiommu_detach_group(void *iommu_data,
@@ -323,8 +318,7 @@ static void vfio_group_unlock_and_free(struct vfio_group *group)
323/** 318/**
324 * Group objects - create, release, get, put, search 319 * Group objects - create, release, get, put, search
325 */ 320 */
326static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group, 321static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
327 bool iommu_present)
328{ 322{
329 struct vfio_group *group, *tmp; 323 struct vfio_group *group, *tmp;
330 struct device *dev; 324 struct device *dev;
@@ -342,7 +336,9 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
342 atomic_set(&group->container_users, 0); 336 atomic_set(&group->container_users, 0);
343 atomic_set(&group->opened, 0); 337 atomic_set(&group->opened, 0);
344 group->iommu_group = iommu_group; 338 group->iommu_group = iommu_group;
345 group->noiommu = !iommu_present; 339#ifdef CONFIG_VFIO_NOIOMMU
340 group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu);
341#endif
346 342
347 group->nb.notifier_call = vfio_iommu_group_notifier; 343 group->nb.notifier_call = vfio_iommu_group_notifier;
348 344
@@ -767,7 +763,7 @@ int vfio_add_group_dev(struct device *dev,
767 763
768 group = vfio_group_get_from_iommu(iommu_group); 764 group = vfio_group_get_from_iommu(iommu_group);
769 if (!group) { 765 if (!group) {
770 group = vfio_create_group(iommu_group, iommu_present(dev->bus)); 766 group = vfio_create_group(iommu_group);
771 if (IS_ERR(group)) { 767 if (IS_ERR(group)) {
772 iommu_group_put(iommu_group); 768 iommu_group_put(iommu_group);
773 return PTR_ERR(group); 769 return PTR_ERR(group);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 6f1ea3dddbad..75b24e93cedb 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -999,7 +999,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
999 999
1000 info.iova_pgsizes = vfio_pgsize_bitmap(iommu); 1000 info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
1001 1001
1002 return copy_to_user((void __user *)arg, &info, minsz); 1002 return copy_to_user((void __user *)arg, &info, minsz) ?
1003 -EFAULT : 0;
1003 1004
1004 } else if (cmd == VFIO_IOMMU_MAP_DMA) { 1005 } else if (cmd == VFIO_IOMMU_MAP_DMA) {
1005 struct vfio_iommu_type1_dma_map map; 1006 struct vfio_iommu_type1_dma_map map;
@@ -1032,7 +1033,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
1032 if (ret) 1033 if (ret)
1033 return ret; 1034 return ret;
1034 1035
1035 return copy_to_user((void __user *)arg, &unmap, minsz); 1036 return copy_to_user((void __user *)arg, &unmap, minsz) ?
1037 -EFAULT : 0;
1036 } 1038 }
1037 1039
1038 return -ENOTTY; 1040 return -ENOTTY;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index ad2146a9ab2d..236553e81027 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1156,6 +1156,8 @@ int vhost_init_used(struct vhost_virtqueue *vq)
1156{ 1156{
1157 __virtio16 last_used_idx; 1157 __virtio16 last_used_idx;
1158 int r; 1158 int r;
1159 bool is_le = vq->is_le;
1160
1159 if (!vq->private_data) { 1161 if (!vq->private_data) {
1160 vq->is_le = virtio_legacy_is_little_endian(); 1162 vq->is_le = virtio_legacy_is_little_endian();
1161 return 0; 1163 return 0;
@@ -1165,15 +1167,20 @@ int vhost_init_used(struct vhost_virtqueue *vq)
1165 1167
1166 r = vhost_update_used_flags(vq); 1168 r = vhost_update_used_flags(vq);
1167 if (r) 1169 if (r)
1168 return r; 1170 goto err;
1169 vq->signalled_used_valid = false; 1171 vq->signalled_used_valid = false;
1170 if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) 1172 if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) {
1171 return -EFAULT; 1173 r = -EFAULT;
1174 goto err;
1175 }
1172 r = __get_user(last_used_idx, &vq->used->idx); 1176 r = __get_user(last_used_idx, &vq->used->idx);
1173 if (r) 1177 if (r)
1174 return r; 1178 goto err;
1175 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx); 1179 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
1176 return 0; 1180 return 0;
1181err:
1182 vq->is_le = is_le;
1183 return r;
1177} 1184}
1178EXPORT_SYMBOL_GPL(vhost_init_used); 1185EXPORT_SYMBOL_GPL(vhost_init_used);
1179 1186
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 92f394927f24..6e92917ba77a 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -709,6 +709,7 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
709 } 709 }
710 710
711 if (!err) { 711 if (!err) {
712 ops->cur_blink_jiffies = HZ / 5;
712 info->fbcon_par = ops; 713 info->fbcon_par = ops;
713 714
714 if (vc) 715 if (vc)
@@ -956,6 +957,7 @@ static const char *fbcon_startup(void)
956 ops->currcon = -1; 957 ops->currcon = -1;
957 ops->graphics = 1; 958 ops->graphics = 1;
958 ops->cur_rotate = -1; 959 ops->cur_rotate = -1;
960 ops->cur_blink_jiffies = HZ / 5;
959 info->fbcon_par = ops; 961 info->fbcon_par = ops;
960 p->con_rotate = initial_rotation; 962 p->con_rotate = initial_rotation;
961 set_blitting_type(vc, info); 963 set_blitting_type(vc, info);
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index 0081725c6b5b..6b2a06d09f2b 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -152,7 +152,7 @@ static void lcdc_write(unsigned int val, unsigned int addr)
152 152
153struct da8xx_fb_par { 153struct da8xx_fb_par {
154 struct device *dev; 154 struct device *dev;
155 resource_size_t p_palette_base; 155 dma_addr_t p_palette_base;
156 unsigned char *v_palette_base; 156 unsigned char *v_palette_base;
157 dma_addr_t vram_phys; 157 dma_addr_t vram_phys;
158 unsigned long vram_size; 158 unsigned long vram_size;
@@ -1428,7 +1428,7 @@ static int fb_probe(struct platform_device *device)
1428 1428
1429 par->vram_virt = dma_alloc_coherent(NULL, 1429 par->vram_virt = dma_alloc_coherent(NULL,
1430 par->vram_size, 1430 par->vram_size,
1431 (resource_size_t *) &par->vram_phys, 1431 &par->vram_phys,
1432 GFP_KERNEL | GFP_DMA); 1432 GFP_KERNEL | GFP_DMA);
1433 if (!par->vram_virt) { 1433 if (!par->vram_virt) {
1434 dev_err(&device->dev, 1434 dev_err(&device->dev,
@@ -1448,7 +1448,7 @@ static int fb_probe(struct platform_device *device)
1448 1448
1449 /* allocate palette buffer */ 1449 /* allocate palette buffer */
1450 par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE, 1450 par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE,
1451 (resource_size_t *)&par->p_palette_base, 1451 &par->p_palette_base,
1452 GFP_KERNEL | GFP_DMA); 1452 GFP_KERNEL | GFP_DMA);
1453 if (!par->v_palette_base) { 1453 if (!par->v_palette_base) {
1454 dev_err(&device->dev, 1454 dev_err(&device->dev,
diff --git a/drivers/video/fbdev/exynos/s6e8ax0.c b/drivers/video/fbdev/exynos/s6e8ax0.c
index 95873f26e39c..de2f3e793786 100644
--- a/drivers/video/fbdev/exynos/s6e8ax0.c
+++ b/drivers/video/fbdev/exynos/s6e8ax0.c
@@ -829,8 +829,7 @@ static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev)
829 return 0; 829 return 0;
830} 830}
831 831
832#ifdef CONFIG_PM 832static int __maybe_unused s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
833static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
834{ 833{
835 struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev); 834 struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
836 835
@@ -843,7 +842,7 @@ static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
843 return 0; 842 return 0;
844} 843}
845 844
846static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev) 845static int __maybe_unused s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
847{ 846{
848 struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev); 847 struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
849 848
@@ -855,10 +854,6 @@ static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
855 854
856 return 0; 855 return 0;
857} 856}
858#else
859#define s6e8ax0_suspend NULL
860#define s6e8ax0_resume NULL
861#endif
862 857
863static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = { 858static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = {
864 .name = "s6e8ax0", 859 .name = "s6e8ax0",
@@ -867,8 +862,8 @@ static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = {
867 .power_on = s6e8ax0_power_on, 862 .power_on = s6e8ax0_power_on,
868 .set_sequence = s6e8ax0_set_sequence, 863 .set_sequence = s6e8ax0_set_sequence,
869 .probe = s6e8ax0_probe, 864 .probe = s6e8ax0_probe,
870 .suspend = s6e8ax0_suspend, 865 .suspend = IS_ENABLED(CONFIG_PM) ? s6e8ax0_suspend : NULL,
871 .resume = s6e8ax0_resume, 866 .resume = IS_ENABLED(CONFIG_PM) ? s6e8ax0_resume : NULL,
872}; 867};
873 868
874static int s6e8ax0_init(void) 869static int s6e8ax0_init(void)
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index cee88603efc9..bb2f1e866020 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -902,6 +902,21 @@ static int imxfb_probe(struct platform_device *pdev)
902 goto failed_getclock; 902 goto failed_getclock;
903 } 903 }
904 904
905 /*
906 * The LCDC controller does not have an enable bit. The
907 * controller starts directly when the clocks are enabled.
908 * If the clocks are enabled when the controller is not yet
909 * programmed with proper register values (enabled at the
910 * bootloader, for example) then it just goes into some undefined
911 * state.
912 * To avoid this issue, let's enable and disable LCDC IPG clock
913 * so that we force some kind of 'reset' to the LCDC block.
914 */
915 ret = clk_prepare_enable(fbi->clk_ipg);
916 if (ret)
917 goto failed_getclock;
918 clk_disable_unprepare(fbi->clk_ipg);
919
905 fbi->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 920 fbi->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
906 if (IS_ERR(fbi->clk_ahb)) { 921 if (IS_ERR(fbi->clk_ahb)) {
907 ret = PTR_ERR(fbi->clk_ahb); 922 ret = PTR_ERR(fbi->clk_ahb);
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
index de54a4748065..b6f83d5df9fd 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
@@ -503,8 +503,7 @@ static int mmphw_probe(struct platform_device *pdev)
503 ctrl->reg_base = devm_ioremap_nocache(ctrl->dev, 503 ctrl->reg_base = devm_ioremap_nocache(ctrl->dev,
504 res->start, resource_size(res)); 504 res->start, resource_size(res));
505 if (ctrl->reg_base == NULL) { 505 if (ctrl->reg_base == NULL) {
506 dev_err(ctrl->dev, "%s: res %x - %x map failed\n", __func__, 506 dev_err(ctrl->dev, "%s: res %pR map failed\n", __func__, res);
507 res->start, res->end);
508 ret = -ENOMEM; 507 ret = -ENOMEM;
509 goto failed; 508 goto failed;
510 } 509 }
diff --git a/drivers/video/fbdev/ocfb.c b/drivers/video/fbdev/ocfb.c
index c9293aea8ec3..a970edc2a6f8 100644
--- a/drivers/video/fbdev/ocfb.c
+++ b/drivers/video/fbdev/ocfb.c
@@ -123,11 +123,11 @@ static int ocfb_setupfb(struct ocfb_dev *fbdev)
123 123
124 /* Horizontal timings */ 124 /* Horizontal timings */
125 ocfb_writereg(fbdev, OCFB_HTIM, (var->hsync_len - 1) << 24 | 125 ocfb_writereg(fbdev, OCFB_HTIM, (var->hsync_len - 1) << 24 |
126 (var->right_margin - 1) << 16 | (var->xres - 1)); 126 (var->left_margin - 1) << 16 | (var->xres - 1));
127 127
128 /* Vertical timings */ 128 /* Vertical timings */
129 ocfb_writereg(fbdev, OCFB_VTIM, (var->vsync_len - 1) << 24 | 129 ocfb_writereg(fbdev, OCFB_VTIM, (var->vsync_len - 1) << 24 |
130 (var->lower_margin - 1) << 16 | (var->yres - 1)); 130 (var->upper_margin - 1) << 16 | (var->yres - 1));
131 131
132 /* Total length of frame */ 132 /* Total length of frame */
133 hlen = var->left_margin + var->right_margin + var->hsync_len + 133 hlen = var->left_margin + var->right_margin + var->hsync_len +
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 36205c27c4d0..f6bed86c17f9 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -545,6 +545,7 @@ err_enable_device:
545static void virtio_pci_remove(struct pci_dev *pci_dev) 545static void virtio_pci_remove(struct pci_dev *pci_dev)
546{ 546{
547 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); 547 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
548 struct device *dev = get_device(&vp_dev->vdev.dev);
548 549
549 unregister_virtio_device(&vp_dev->vdev); 550 unregister_virtio_device(&vp_dev->vdev);
550 551
@@ -554,6 +555,7 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
554 virtio_pci_modern_remove(vp_dev); 555 virtio_pci_modern_remove(vp_dev);
555 556
556 pci_disable_device(pci_dev); 557 pci_disable_device(pci_dev);
558 put_device(dev);
557} 559}
558 560
559static struct pci_driver virtio_pci_driver = { 561static struct pci_driver virtio_pci_driver = {
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index c0c11fad4611..7760fc1a2218 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -679,7 +679,7 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
679 679
680 pci_read_config_dword(pci_dev, 680 pci_read_config_dword(pci_dev,
681 notify + offsetof(struct virtio_pci_notify_cap, 681 notify + offsetof(struct virtio_pci_notify_cap,
682 cap.length), 682 cap.offset),
683 &notify_offset); 683 &notify_offset);
684 684
685 /* We don't know how many VQs we'll map, ahead of the time. 685 /* We don't know how many VQs we'll map, ahead of the time.
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 4f0e7be0da34..80825a7e8e48 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -145,7 +145,8 @@ config MENF21BMC_WATCHDOG
145config TANGOX_WATCHDOG 145config TANGOX_WATCHDOG
146 tristate "Sigma Designs SMP86xx/SMP87xx watchdog" 146 tristate "Sigma Designs SMP86xx/SMP87xx watchdog"
147 select WATCHDOG_CORE 147 select WATCHDOG_CORE
148 depends on ARCH_TANGOX || COMPILE_TEST 148 depends on ARCH_TANGO || COMPILE_TEST
149 depends on HAS_IOMEM
149 help 150 help
150 Support for the watchdog in Sigma Designs SMP86xx (tango3) 151 Support for the watchdog in Sigma Designs SMP86xx (tango3)
151 and SMP87xx (tango4) family chips. 152 and SMP87xx (tango4) family chips.
@@ -618,6 +619,7 @@ config DIGICOLOR_WATCHDOG
618config LPC18XX_WATCHDOG 619config LPC18XX_WATCHDOG
619 tristate "LPC18xx/43xx Watchdog" 620 tristate "LPC18xx/43xx Watchdog"
620 depends on ARCH_LPC18XX || COMPILE_TEST 621 depends on ARCH_LPC18XX || COMPILE_TEST
622 depends on HAS_IOMEM
621 select WATCHDOG_CORE 623 select WATCHDOG_CORE
622 help 624 help
623 Say Y here if to include support for the watchdog timer 625 Say Y here if to include support for the watchdog timer
@@ -1374,6 +1376,7 @@ config BCM_KONA_WDT_DEBUG
1374config BCM7038_WDT 1376config BCM7038_WDT
1375 tristate "BCM7038 Watchdog" 1377 tristate "BCM7038 Watchdog"
1376 select WATCHDOG_CORE 1378 select WATCHDOG_CORE
1379 depends on HAS_IOMEM
1377 help 1380 help
1378 Watchdog driver for the built-in hardware in Broadcom 7038 SoCs. 1381 Watchdog driver for the built-in hardware in Broadcom 7038 SoCs.
1379 1382
@@ -1383,6 +1386,7 @@ config IMGPDC_WDT
1383 tristate "Imagination Technologies PDC Watchdog Timer" 1386 tristate "Imagination Technologies PDC Watchdog Timer"
1384 depends on HAS_IOMEM 1387 depends on HAS_IOMEM
1385 depends on METAG || MIPS || COMPILE_TEST 1388 depends on METAG || MIPS || COMPILE_TEST
1389 select WATCHDOG_CORE
1386 help 1390 help
1387 Driver for Imagination Technologies PowerDown Controller 1391 Driver for Imagination Technologies PowerDown Controller
1388 Watchdog Timer. 1392 Watchdog Timer.
@@ -1565,6 +1569,17 @@ config WATCHDOG_RIO
1565 machines. The watchdog timeout period is normally one minute but 1569 machines. The watchdog timeout period is normally one minute but
1566 can be changed with a boot-time parameter. 1570 can be changed with a boot-time parameter.
1567 1571
1572config WATCHDOG_SUN4V
1573 tristate "Sun4v Watchdog support"
1574 select WATCHDOG_CORE
1575 depends on SPARC64
1576 help
1577 Say Y here to support the hypervisor watchdog capability embedded
1578 in the SPARC sun4v architecture.
1579
1580 To compile this driver as a module, choose M here. The module will
1581 be called sun4v_wdt.
1582
1568# XTENSA Architecture 1583# XTENSA Architecture
1569 1584
1570# Xen Architecture 1585# Xen Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index f566753256ab..f6a6a387c6c7 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -179,6 +179,7 @@ obj-$(CONFIG_SH_WDT) += shwdt.o
179 179
180obj-$(CONFIG_WATCHDOG_RIO) += riowd.o 180obj-$(CONFIG_WATCHDOG_RIO) += riowd.o
181obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwd.o 181obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwd.o
182obj-$(CONFIG_WATCHDOG_SUN4V) += sun4v_wdt.o
182 183
183# XTENSA Architecture 184# XTENSA Architecture
184 185
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index f36ca4be0720..ac5840d9689a 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -292,4 +292,4 @@ MODULE_PARM_DESC(nodelay,
292 "Force selection of a timeout setting without initial delay " 292 "Force selection of a timeout setting without initial delay "
293 "(max6373/74 only, default=0)"); 293 "(max6373/74 only, default=0)");
294 294
295MODULE_LICENSE("GPL"); 295MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 1a11aedc4fe8..68952d9ccf83 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -608,7 +608,7 @@ static int usb_pcwd_probe(struct usb_interface *interface,
608 struct usb_host_interface *iface_desc; 608 struct usb_host_interface *iface_desc;
609 struct usb_endpoint_descriptor *endpoint; 609 struct usb_endpoint_descriptor *endpoint;
610 struct usb_pcwd_private *usb_pcwd = NULL; 610 struct usb_pcwd_private *usb_pcwd = NULL;
611 int pipe, maxp; 611 int pipe;
612 int retval = -ENOMEM; 612 int retval = -ENOMEM;
613 int got_fw_rev; 613 int got_fw_rev;
614 unsigned char fw_rev_major, fw_rev_minor; 614 unsigned char fw_rev_major, fw_rev_minor;
@@ -641,7 +641,6 @@ static int usb_pcwd_probe(struct usb_interface *interface,
641 641
642 /* get a handle to the interrupt data pipe */ 642 /* get a handle to the interrupt data pipe */
643 pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress); 643 pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
644 maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
645 644
646 /* allocate memory for our device and initialize it */ 645 /* allocate memory for our device and initialize it */
647 usb_pcwd = kzalloc(sizeof(struct usb_pcwd_private), GFP_KERNEL); 646 usb_pcwd = kzalloc(sizeof(struct usb_pcwd_private), GFP_KERNEL);
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 01d816251302..e7a715e82021 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -139,12 +139,11 @@ static int wdt_config(struct watchdog_device *wdd, bool ping)
139 139
140 writel_relaxed(UNLOCK, wdt->base + WDTLOCK); 140 writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
141 writel_relaxed(wdt->load_val, wdt->base + WDTLOAD); 141 writel_relaxed(wdt->load_val, wdt->base + WDTLOAD);
142 writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
142 143
143 if (!ping) { 144 if (!ping)
144 writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
145 writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base + 145 writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base +
146 WDTCONTROL); 146 WDTCONTROL);
147 }
148 147
149 writel_relaxed(LOCK, wdt->base + WDTLOCK); 148 writel_relaxed(LOCK, wdt->base + WDTLOCK);
150 149
diff --git a/drivers/watchdog/sun4v_wdt.c b/drivers/watchdog/sun4v_wdt.c
new file mode 100644
index 000000000000..1467fe50a76f
--- /dev/null
+++ b/drivers/watchdog/sun4v_wdt.c
@@ -0,0 +1,191 @@
1/*
2 * sun4v watchdog timer
3 * (c) Copyright 2016 Oracle Corporation
4 *
5 * Implement a simple watchdog driver using the built-in sun4v hypervisor
6 * watchdog support. If time expires, the hypervisor stops or bounces
7 * the guest domain.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/errno.h>
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/watchdog.h>
23#include <asm/hypervisor.h>
24#include <asm/mdesc.h>
25
26#define WDT_TIMEOUT 60
27#define WDT_MAX_TIMEOUT 31536000
28#define WDT_MIN_TIMEOUT 1
29#define WDT_DEFAULT_RESOLUTION_MS 1000 /* 1 second */
30
31static unsigned int timeout;
32module_param(timeout, uint, 0);
33MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default="
34 __MODULE_STRING(WDT_TIMEOUT) ")");
35
36static bool nowayout = WATCHDOG_NOWAYOUT;
37module_param(nowayout, bool, S_IRUGO);
38MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
39 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
40
41static int sun4v_wdt_stop(struct watchdog_device *wdd)
42{
43 sun4v_mach_set_watchdog(0, NULL);
44
45 return 0;
46}
47
48static int sun4v_wdt_ping(struct watchdog_device *wdd)
49{
50 int hverr;
51
52 /*
53 * HV watchdog timer will round up the timeout
54 * passed in to the nearest multiple of the
55 * watchdog resolution in milliseconds.
56 */
57 hverr = sun4v_mach_set_watchdog(wdd->timeout * 1000, NULL);
58 if (hverr == HV_EINVAL)
59 return -EINVAL;
60
61 return 0;
62}
63
64static int sun4v_wdt_set_timeout(struct watchdog_device *wdd,
65 unsigned int timeout)
66{
67 wdd->timeout = timeout;
68
69 return 0;
70}
71
72static const struct watchdog_info sun4v_wdt_ident = {
73 .options = WDIOF_SETTIMEOUT |
74 WDIOF_MAGICCLOSE |
75 WDIOF_KEEPALIVEPING,
76 .identity = "sun4v hypervisor watchdog",
77 .firmware_version = 0,
78};
79
80static struct watchdog_ops sun4v_wdt_ops = {
81 .owner = THIS_MODULE,
82 .start = sun4v_wdt_ping,
83 .stop = sun4v_wdt_stop,
84 .ping = sun4v_wdt_ping,
85 .set_timeout = sun4v_wdt_set_timeout,
86};
87
88static struct watchdog_device wdd = {
89 .info = &sun4v_wdt_ident,
90 .ops = &sun4v_wdt_ops,
91 .min_timeout = WDT_MIN_TIMEOUT,
92 .max_timeout = WDT_MAX_TIMEOUT,
93 .timeout = WDT_TIMEOUT,
94};
95
96static int __init sun4v_wdt_init(void)
97{
98 struct mdesc_handle *handle;
99 u64 node;
100 const u64 *value;
101 int err = 0;
102 unsigned long major = 1, minor = 1;
103
104 /*
105 * There are 2 properties that can be set from the control
106 * domain for the watchdog.
107 * watchdog-resolution
108 * watchdog-max-timeout
109 *
110 * We can expect a handle to be returned otherwise something
111 * serious is wrong. Correct to return -ENODEV here.
112 */
113
114 handle = mdesc_grab();
115 if (!handle)
116 return -ENODEV;
117
118 node = mdesc_node_by_name(handle, MDESC_NODE_NULL, "platform");
119 err = -ENODEV;
120 if (node == MDESC_NODE_NULL)
121 goto out_release;
122
123 /*
124 * This is a safe way to validate if we are on the right
125 * platform.
126 */
127 if (sun4v_hvapi_register(HV_GRP_CORE, major, &minor))
128 goto out_hv_unreg;
129
130 /* Allow value of watchdog-resolution up to 1s (default) */
131 value = mdesc_get_property(handle, node, "watchdog-resolution", NULL);
132 err = -EINVAL;
133 if (value) {
134 if (*value == 0 ||
135 *value > WDT_DEFAULT_RESOLUTION_MS)
136 goto out_hv_unreg;
137 }
138
139 value = mdesc_get_property(handle, node, "watchdog-max-timeout", NULL);
140 if (value) {
141 /*
142 * If the property value (in ms) is smaller than
143 * min_timeout, return -EINVAL.
144 */
145 if (*value < wdd.min_timeout * 1000)
146 goto out_hv_unreg;
147
148 /*
149 * If the property value is smaller than
150 * default max_timeout then set watchdog max_timeout to
151 * the value of the property in seconds.
152 */
153 if (*value < wdd.max_timeout * 1000)
154 wdd.max_timeout = *value / 1000;
155 }
156
157 watchdog_init_timeout(&wdd, timeout, NULL);
158
159 watchdog_set_nowayout(&wdd, nowayout);
160
161 err = watchdog_register_device(&wdd);
162 if (err)
163 goto out_hv_unreg;
164
165 pr_info("initialized (timeout=%ds, nowayout=%d)\n",
166 wdd.timeout, nowayout);
167
168 mdesc_release(handle);
169
170 return 0;
171
172out_hv_unreg:
173 sun4v_hvapi_unregister(HV_GRP_CORE);
174
175out_release:
176 mdesc_release(handle);
177 return err;
178}
179
180static void __exit sun4v_wdt_exit(void)
181{
182 sun4v_hvapi_unregister(HV_GRP_CORE);
183 watchdog_unregister_device(&wdd);
184}
185
186module_init(sun4v_wdt_init);
187module_exit(sun4v_wdt_exit);
188
189MODULE_AUTHOR("Wim Coekaerts <wim.coekaerts@oracle.com>");
190MODULE_DESCRIPTION("sun4v watchdog driver");
191MODULE_LICENSE("GPL");
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index 945fc4327201..4ac2ca8a7656 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -242,7 +242,7 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
242 return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize); 242 return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
243} 243}
244 244
245static struct cleancache_ops tmem_cleancache_ops = { 245static const struct cleancache_ops tmem_cleancache_ops = {
246 .put_page = tmem_cleancache_put_page, 246 .put_page = tmem_cleancache_put_page,
247 .get_page = tmem_cleancache_get_page, 247 .get_page = tmem_cleancache_get_page,
248 .invalidate_page = tmem_cleancache_flush_page, 248 .invalidate_page = tmem_cleancache_flush_page,
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 73dafdc494aa..fb0221434f81 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -227,8 +227,9 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
227 /* 227 /*
228 * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able 228 * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
229 * to access the BARs where the MSI-X entries reside. 229 * to access the BARs where the MSI-X entries reside.
230 * But VF devices are unique in which the PF needs to be checked.
230 */ 231 */
231 pci_read_config_word(dev, PCI_COMMAND, &cmd); 232 pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
232 if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY)) 233 if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
233 return -ENXIO; 234 return -ENXIO;
234 235
@@ -332,6 +333,9 @@ void xen_pcibk_do_op(struct work_struct *data)
332 struct xen_pcibk_dev_data *dev_data = NULL; 333 struct xen_pcibk_dev_data *dev_data = NULL;
333 struct xen_pci_op *op = &pdev->op; 334 struct xen_pci_op *op = &pdev->op;
334 int test_intx = 0; 335 int test_intx = 0;
336#ifdef CONFIG_PCI_MSI
337 unsigned int nr = 0;
338#endif
335 339
336 *op = pdev->sh_info->op; 340 *op = pdev->sh_info->op;
337 barrier(); 341 barrier();
@@ -360,6 +364,7 @@ void xen_pcibk_do_op(struct work_struct *data)
360 op->err = xen_pcibk_disable_msi(pdev, dev, op); 364 op->err = xen_pcibk_disable_msi(pdev, dev, op);
361 break; 365 break;
362 case XEN_PCI_OP_enable_msix: 366 case XEN_PCI_OP_enable_msix:
367 nr = op->value;
363 op->err = xen_pcibk_enable_msix(pdev, dev, op); 368 op->err = xen_pcibk_enable_msix(pdev, dev, op);
364 break; 369 break;
365 case XEN_PCI_OP_disable_msix: 370 case XEN_PCI_OP_disable_msix:
@@ -382,7 +387,7 @@ void xen_pcibk_do_op(struct work_struct *data)
382 if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) { 387 if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
383 unsigned int i; 388 unsigned int i;
384 389
385 for (i = 0; i < op->value; i++) 390 for (i = 0; i < nr; i++)
386 pdev->sh_info->op.msix_entries[i].vector = 391 pdev->sh_info->op.msix_entries[i].vector =
387 op->msix_entries[i].vector; 392 op->msix_entries[i].vector;
388 } 393 }
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index ad4eb1024d1f..c46ee189466f 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -849,15 +849,31 @@ static int scsiback_map(struct vscsibk_info *info)
849} 849}
850 850
851/* 851/*
852 Check for a translation entry being present
853*/
854static struct v2p_entry *scsiback_chk_translation_entry(
855 struct vscsibk_info *info, struct ids_tuple *v)
856{
857 struct list_head *head = &(info->v2p_entry_lists);
858 struct v2p_entry *entry;
859
860 list_for_each_entry(entry, head, l)
861 if ((entry->v.chn == v->chn) &&
862 (entry->v.tgt == v->tgt) &&
863 (entry->v.lun == v->lun))
864 return entry;
865
866 return NULL;
867}
868
869/*
852 Add a new translation entry 870 Add a new translation entry
853*/ 871*/
854static int scsiback_add_translation_entry(struct vscsibk_info *info, 872static int scsiback_add_translation_entry(struct vscsibk_info *info,
855 char *phy, struct ids_tuple *v) 873 char *phy, struct ids_tuple *v)
856{ 874{
857 int err = 0; 875 int err = 0;
858 struct v2p_entry *entry;
859 struct v2p_entry *new; 876 struct v2p_entry *new;
860 struct list_head *head = &(info->v2p_entry_lists);
861 unsigned long flags; 877 unsigned long flags;
862 char *lunp; 878 char *lunp;
863 unsigned long long unpacked_lun; 879 unsigned long long unpacked_lun;
@@ -917,15 +933,10 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
917 spin_lock_irqsave(&info->v2p_lock, flags); 933 spin_lock_irqsave(&info->v2p_lock, flags);
918 934
919 /* Check double assignment to identical virtual ID */ 935 /* Check double assignment to identical virtual ID */
920 list_for_each_entry(entry, head, l) { 936 if (scsiback_chk_translation_entry(info, v)) {
921 if ((entry->v.chn == v->chn) && 937 pr_warn("Virtual ID is already used. Assignment was not performed.\n");
922 (entry->v.tgt == v->tgt) && 938 err = -EEXIST;
923 (entry->v.lun == v->lun)) { 939 goto out;
924 pr_warn("Virtual ID is already used. Assignment was not performed.\n");
925 err = -EEXIST;
926 goto out;
927 }
928
929 } 940 }
930 941
931 /* Create a new translation entry and add to the list */ 942 /* Create a new translation entry and add to the list */
@@ -933,18 +944,18 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
933 new->v = *v; 944 new->v = *v;
934 new->tpg = tpg; 945 new->tpg = tpg;
935 new->lun = unpacked_lun; 946 new->lun = unpacked_lun;
936 list_add_tail(&new->l, head); 947 list_add_tail(&new->l, &info->v2p_entry_lists);
937 948
938out: 949out:
939 spin_unlock_irqrestore(&info->v2p_lock, flags); 950 spin_unlock_irqrestore(&info->v2p_lock, flags);
940 951
941out_free: 952out_free:
942 mutex_lock(&tpg->tv_tpg_mutex); 953 if (err) {
943 tpg->tv_tpg_fe_count--; 954 mutex_lock(&tpg->tv_tpg_mutex);
944 mutex_unlock(&tpg->tv_tpg_mutex); 955 tpg->tv_tpg_fe_count--;
945 956 mutex_unlock(&tpg->tv_tpg_mutex);
946 if (err)
947 kfree(new); 957 kfree(new);
958 }
948 959
949 return err; 960 return err;
950} 961}
@@ -956,39 +967,40 @@ static void __scsiback_del_translation_entry(struct v2p_entry *entry)
956} 967}
957 968
958/* 969/*
959 Delete the translation entry specfied 970 Delete the translation entry specified
960*/ 971*/
961static int scsiback_del_translation_entry(struct vscsibk_info *info, 972static int scsiback_del_translation_entry(struct vscsibk_info *info,
962 struct ids_tuple *v) 973 struct ids_tuple *v)
963{ 974{
964 struct v2p_entry *entry; 975 struct v2p_entry *entry;
965 struct list_head *head = &(info->v2p_entry_lists);
966 unsigned long flags; 976 unsigned long flags;
977 int ret = 0;
967 978
968 spin_lock_irqsave(&info->v2p_lock, flags); 979 spin_lock_irqsave(&info->v2p_lock, flags);
969 /* Find out the translation entry specified */ 980 /* Find out the translation entry specified */
970 list_for_each_entry(entry, head, l) { 981 entry = scsiback_chk_translation_entry(info, v);
971 if ((entry->v.chn == v->chn) && 982 if (entry)
972 (entry->v.tgt == v->tgt) && 983 __scsiback_del_translation_entry(entry);
973 (entry->v.lun == v->lun)) { 984 else
974 goto found; 985 ret = -ENOENT;
975 }
976 }
977
978 spin_unlock_irqrestore(&info->v2p_lock, flags);
979 return 1;
980
981found:
982 /* Delete the translation entry specfied */
983 __scsiback_del_translation_entry(entry);
984 986
985 spin_unlock_irqrestore(&info->v2p_lock, flags); 987 spin_unlock_irqrestore(&info->v2p_lock, flags);
986 return 0; 988 return ret;
987} 989}
988 990
989static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state, 991static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
990 char *phy, struct ids_tuple *vir, int try) 992 char *phy, struct ids_tuple *vir, int try)
991{ 993{
994 struct v2p_entry *entry;
995 unsigned long flags;
996
997 if (try) {
998 spin_lock_irqsave(&info->v2p_lock, flags);
999 entry = scsiback_chk_translation_entry(info, vir);
1000 spin_unlock_irqrestore(&info->v2p_lock, flags);
1001 if (entry)
1002 return;
1003 }
992 if (!scsiback_add_translation_entry(info, phy, vir)) { 1004 if (!scsiback_add_translation_entry(info, phy, vir)) {
993 if (xenbus_printf(XBT_NIL, info->dev->nodename, state, 1005 if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
994 "%d", XenbusStateInitialised)) { 1006 "%d", XenbusStateInitialised)) {
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 9433e46518c8..912b64edb42b 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -188,6 +188,8 @@ static int queue_reply(struct list_head *queue, const void *data, size_t len)
188 188
189 if (len == 0) 189 if (len == 0)
190 return 0; 190 return 0;
191 if (len > XENSTORE_PAYLOAD_MAX)
192 return -EINVAL;
191 193
192 rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); 194 rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
193 if (rb == NULL) 195 if (rb == NULL)